diff --git a/.gitignore b/.gitignore
index fd693adc3be7b0be708a1a38deb6123383ec399d..d5c7f763cf41939b0e577fc0ce72a2d8bf2436b6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,6 +46,7 @@ psim/
pysim/
*.out
*DS_Store
+tests/script/api/batchprepare
# Doxygen Generated files
html/
@@ -108,4 +109,5 @@ TAGS
contrib/*
!contrib/CMakeLists.txt
!contrib/test
-sql
\ No newline at end of file
+sql
+debug*/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0436f5b25923927edaa7568ba57c7b948446f8b1..b78b89690858b91a95273b67272b72b9dd771b0f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.16)
+cmake_minimum_required(VERSION 3.0)
project(
TDengine
@@ -35,7 +35,7 @@ endif(${BUILD_TEST})
add_subdirectory(source)
add_subdirectory(tools)
add_subdirectory(tests)
-add_subdirectory(example)
+add_subdirectory(examples/c)
# docs
add_subdirectory(docs)
diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 14c03068d7a32745bb269d07d7903da12253694b..a2b55e3acca0c141a2d550ccabb5bb129adb3d7e 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -269,7 +269,7 @@ pipeline {
}
}
stage('linux test') {
- agent{label " slave3_0 || slave15 || slave16 || slave17 "}
+ agent{label " worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
when {
changeRequest()
@@ -289,7 +289,7 @@ pipeline {
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
- timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
+ timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480
'''
}
}
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 0ae4f56f71db237ca08a7a0a10bcbfe99e58b2d6..8d71870e7d8ce3e554dd9c6810ea3829e5e9511a 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.16)
+cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
@@ -46,7 +46,7 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
- SET(COMMON_FLAGS "/w /D_WIN32 /Zi")
+ SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
@@ -71,8 +71,8 @@ ELSE ()
ENDIF ()
IF (${SANITIZER} MATCHES "true")
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -g3")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -g3")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3")
MESSAGE(STATUS "Will compile with Address Sanitizer!")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3")
diff --git a/cmake/cmake.options b/cmake/cmake.options
index cb6fd1400d43b6073d81ab43e46140343b277512..ab3c5ac1ad08b98ee2dbe09692584be63e477d71 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -49,7 +49,7 @@ IF(${TD_WINDOWS})
option(
BUILD_TEST
"If build unit tests using googletest"
- OFF
+ ON
)
ELSE ()
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 0312f92a5b4116cad03d4bb9c2e7556d7a35deb2..acf17e9427bc453e1ece67cca5cbfe45f8827337 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.16)
+cmake_minimum_required(VERSION 3.0)
MESSAGE("Current system is ${CMAKE_SYSTEM_NAME}")
diff --git a/contrib/test/craft/raftMain.c b/contrib/test/craft/raftMain.c
index 12be3deb2e33aba9be9b45acd1595a749ab1b2c5..e1c66422b3b90b23ff8c6f01cf07aa8adace5983 100644
--- a/contrib/test/craft/raftMain.c
+++ b/contrib/test/craft/raftMain.c
@@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "dropnode") == 0) {
- char host[HOST_LEN];
+ char host[HOST_LEN] = {0};
uint32_t port;
parseAddr(param1, host, HOST_LEN, &port);
uint64_t rid = raftId(host, port);
@@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "put") == 0) {
- char buf[256];
+ char buf[256] = {0};
snprintf(buf, sizeof(buf), "%s--%s", param1, param2);
putValue(&pRaftServer->raft, buf);
diff --git a/docs-cn/02-intro.md b/docs-cn/02-intro.md
index 2a56c5e9e667b511003b1ee08801ddcb54ff2ec4..673c2e96b65814fc1cd572d54f948793ed6fa521 100644
--- a/docs-cn/02-intro.md
+++ b/docs-cn/02-intro.md
@@ -62,7 +62,7 @@ TDengine的主要功能如下:
-
+
图 1. TDengine技术生态图
@@ -119,7 +119,6 @@ TDengine的主要功能如下:
- [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html)
- [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html)
-- [TDengine 与 InfluxDB 对比测试](https://www.taosdata.com/blog/2019/07/19/419.html)
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
diff --git a/docs-cn/04-concept/index.md b/docs-cn/04-concept/index.md
index ca25595260953f8d941ccaf367bdc45a8325488f..8e97d4a2f43537c1229c8e8ea092ddfc1257dde7 100644
--- a/docs-cn/04-concept/index.md
+++ b/docs-cn/04-concept/index.md
@@ -29,7 +29,7 @@ title: 数据模型和基本概念
10.3
219
0.31
-Beijing.Chaoyang
+California.SanFrancisco
2
@@ -38,7 +38,7 @@ title: 数据模型和基本概念
10.2
220
0.23
-Beijing.Chaoyang
+California.SanFrancisco
3
@@ -47,7 +47,7 @@ title: 数据模型和基本概念
11.5
221
0.35
-Beijing.Haidian
+California.LosAngeles
3
@@ -56,7 +56,7 @@ title: 数据模型和基本概念
13.4
223
0.29
-Beijing.Haidian
+California.LosAngeles
2
@@ -65,7 +65,7 @@ title: 数据模型和基本概念
12.6
218
0.33
-Beijing.Chaoyang
+California.SanFrancisco
2
@@ -74,7 +74,7 @@ title: 数据模型和基本概念
11.8
221
0.28
-Beijing.Haidian
+California.LosAngeles
2
@@ -83,7 +83,7 @@ title: 数据模型和基本概念
10.3
218
0.25
-Beijing.Chaoyang
+California.SanFrancisco
3
@@ -92,7 +92,7 @@ title: 数据模型和基本概念
12.3
221
0.31
-Beijing.Chaoyang
+California.SanFrancisco
2
diff --git a/docs-cn/05-get-started/index.md b/docs-cn/05-get-started/index.md
index 458df909166b9769af2052ba654699e869d2081c..878d7f020245fbff383308c281fbc3fa28ba5f6c 100644
--- a/docs-cn/05-get-started/index.md
+++ b/docs-cn/05-get-started/index.md
@@ -132,7 +132,7 @@ Query OK, 2 row(s) in set (0.003128s)
taosBenchmark
```
-该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
+该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
@@ -154,10 +154,10 @@ taos> select count(*) from test.meters;
taos> select avg(current), max(voltage), min(phase) from test.meters;
```
-查询 location="beijing" 的记录总条数:
+查询 location="California.SanFrancisco" 的记录总条数:
```sql
-taos> select count(*) from test.meters where location="beijing";
+taos> select count(*) from test.meters where location="California.SanFrancisco";
```
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
diff --git a/docs-cn/07-develop/02-model/index.mdx b/docs-cn/07-develop/02-model/index.mdx
index a060e3c84b8c5b8e25714ce15fb2bc7afc7d49d2..7e2762b6e78393493c2c5b61959e9a6ff57a7b13 100644
--- a/docs-cn/07-develop/02-model/index.mdx
+++ b/docs-cn/07-develop/02-model/index.mdx
@@ -55,10 +55,10 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG
TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表 1](/tdinternal/arch#model_table1)中的智能电表为例,可以使用如下的 SQL 命令建表:
```sql
-CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
+CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 ”Beijing.Chaoyang",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
+其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
:::warning
目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。
@@ -72,10 +72,10 @@ TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序
在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如:
```sql
-INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
+INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32);
```
-上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"Beijing.Chaoyang", 2`。
+上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"California.SanFrancisco", 2`。
关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。
diff --git a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx
index e63ffce6dd07366da99fe1f41d0a2a8d7a623f31..99a92573c87d0f90f699a8d1352619f4df4aef39 100644
--- a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx
@@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
:::info
-- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K,一条 SQL 语句总长度不能超过 1M 。
+- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。
- TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。
:::
diff --git a/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx
index dedd7f0e70834e21257bda78dd184f5ddc520160..54f02c91475bb5524e259a0aa890363603a86fba 100644
--- a/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -29,7 +29,7 @@ measurement,tag_set field_set timestamp
例如:
```
-meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
+meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
```
:::note
@@ -42,7 +42,6 @@ meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 16
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
-
## 示例代码
diff --git a/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index dfbe6efda67b6928999287900637e0a251b86562..2b397e1bdc7a4c76686cd4b6d457a25dbcc2c950 100644
--- a/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -29,10 +29,10 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
例如:
```txt
-meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3
+meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
-参考[OpenTSDB Telnet API文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
+参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码
@@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
tbname | ts | value | groupid | location |
==================================================================================================================================
- t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian |
- t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian |
- t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang |
- t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang |
+ t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
+ t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles |
+ t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco |
+ t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
diff --git a/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx
index 5d445997d061ca052e4f3673b8e881ea4acf0ade..a15f80a5851ad29605e871f16aed60b68109038a 100644
--- a/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -19,33 +19,33 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
```json
[
- {
- "metric": "sys.cpu.nice",
- "timestamp": 1346846400,
- "value": 18,
- "tags": {
- "host": "web01",
- "dc": "lga"
- }
- },
- {
- "metric": "sys.cpu.nice",
- "timestamp": 1346846400,
- "value": 9,
- "tags": {
- "host": "web02",
- "dc": "lga"
- }
+ {
+ "metric": "sys.cpu.nice",
+ "timestamp": 1346846400,
+ "value": 18,
+ "tags": {
+ "host": "web01",
+ "dc": "lga"
}
+ },
+ {
+ "metric": "sys.cpu.nice",
+ "timestamp": 1346846400,
+ "value": 9,
+ "tags": {
+ "host": "web02",
+ "dc": "lga"
+ }
+ }
]
```
与 OpenTSDB 行协议类似, metric 将作为超级表名, timestamp 表示时间戳,value 表示度量值, tags 表示标签集。
-
-参考[OpenTSDB HTTP API文档](http://opentsdb.net/docs/build/html/api_http/put.html)。
+参考[OpenTSDB HTTP API 文档](http://opentsdb.net/docs/build/html/api_http/put.html)。
:::note
+
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
@@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
ts | value | groupid | location |
===================================================================================================================
- 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang |
- 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang |
+ 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
+ 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
diff --git a/docs-cn/07-develop/04-query-data/index.mdx b/docs-cn/07-develop/04-query-data/index.mdx
index b0a6bad3eaad174a97d8dce4e1ba0125cbf5dc03..824f36ef2f98aac227bdcaf2016d7be0a2e59328 100644
--- a/docs-cn/07-develop/04-query-data/index.mdx
+++ b/docs-cn/07-develop/04-query-data/index.mdx
@@ -50,14 +50,14 @@ Query OK, 2 row(s) in set (0.001100s)
### 示例一
-在 TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照 location 分组。
+在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
```
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
- 222.000000000 | Beijing.Haidian |
- 219.200000000 | Beijing.Chaoyang |
+ 222.000000000 | California.LosAngeles |
+ 219.200000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.002136s)
```
@@ -88,10 +88,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
Query OK, 2 row(s) in set (0.000883s)
```
-降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和
+降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和
```
-taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
+taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
diff --git a/docs-cn/07-develop/05-continuous-query.mdx b/docs-cn/07-develop/05-continuous-query.mdx
index 2fd1b3cc755188f513fe511541a84efa3558d3ea..b2223d15e33114d263b9833df51e4201bc01c772 100644
--- a/docs-cn/07-develop/05-continuous-query.mdx
+++ b/docs-cn/07-develop/05-continuous-query.mdx
@@ -34,8 +34,8 @@ SLIDING: 连续查询的时间窗口向前滑动的时间间隔
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
-create table D1001 using meters tags ("Beijing.Chaoyang", 2);
-create table D1002 using meters tags ("Beijing.Haidian", 2);
+create table D1001 using meters tags ("California.SanFrancisco", 2);
+create table D1002 using meters tags ("California.LosAngeles", 2);
...
```
diff --git a/docs-cn/07-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx
index d471c114e827d7c4b40195c2c1b3c8f6a9d26ed4..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644
--- a/docs-cn/07-develop/06-subscribe.mdx
+++ b/docs-cn/07-develop/06-subscribe.mdx
@@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
-其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
+其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
代码介绍完毕,我们来看一下实际的运行效果。假设:
@@ -184,8 +184,8 @@ taos> use power;
# create super table "meters"
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
# create tabes using the schema defined by super table "meters"
-taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
-taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
+taos> create table d1001 using meters tags ("California.SanFrancisco", 2);
+taos> create table d1002 using meters tags ("California.LosAngeles", 2);
# insert some rows
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
@@ -193,27 +193,28 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08
taos> select * from meters where current > 10;
ts | current | voltage | phase | location | groupid |
===========================================================================================================
- 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
- 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
- 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
- 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
- 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
+ 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 |
+ 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 |
+ 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 |
+ 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 |
+ 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 |
Query OK, 5 row(s) in set (0.004896s)
```
+
### 示例代码
-
+
-
+
{/*
*/}
-
+
{/*
@@ -222,20 +223,20 @@ Query OK, 5 row(s) in set (0.004896s)
*/}
-
-
+
+
### 运行示例程序
-
+
示例程序会先消费符合查询条件的所有历史数据:
```bash
-ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
-ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
-ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
-ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
-ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
+ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
+ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2
+ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
+ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2
+ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2
```
接着,使用 TDengine CLI 向表中新增一条数据:
@@ -249,5 +250,5 @@ taos> insert into d1001 values(now, 12.4, 220, 1);
因为这条数据的电流大于 10A,示例程序会将其消费:
```
-ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
+ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2
```
diff --git a/docs-cn/07-develop/07-cache.md b/docs-cn/07-develop/07-cache.md
index fd31335310d62d792e5173e38a9aa778ee6c6c60..cc59c0353c0d12fb7a8f0f20254087d741361031 100644
--- a/docs-cn/07-develop/07-cache.md
+++ b/docs-cn/07-develop/07-cache.md
@@ -1,6 +1,6 @@
---
sidebar_label: 缓存
-title: 缓存
+title: 缓存
description: "提供写驱动的缓存管理机制,将每个表最近写入的一条记录持续保存在缓存中,可以提供高性能的最近状态查询。"
---
@@ -15,7 +15,7 @@ TDengine 将内存池按块划分进行管理,数据在内存块里是以行
你可以通过函数 last_row() 快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如:
```sql
-select last_row(voltage) from meters where location='Beijing.Chaoyang';
+select last_row(voltage) from meters where location='California.SanFrancisco';
```
-该 SQL 语句将获取所有位于北京朝阳区的电表最后记录的电压值。
+该 SQL 语句将获取所有位于加利福尼亚州旧金山市的电表最后记录的电压值。
diff --git a/docs-cn/10-cluster/01-deploy.md b/docs-cn/10-cluster/01-deploy.md
index cee140c0ec13bc9c8052a599a2147acc1aa15a8d..b44d2942f2e4672ef6060aa9d084db1d3342e1c8 100644
--- a/docs-cn/10-cluster/01-deploy.md
+++ b/docs-cn/10-cluster/01-deploy.md
@@ -22,7 +22,7 @@ title: 集群部署
### 第二步
-建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042 的 TCP 和 UDP 端口都是开放的。强烈建议先关闭防火墙,集群搭建完毕之后,再来配置端口;
+确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。
### 第三步
diff --git a/docs-cn/12-taos-sql/01-data-type.md b/docs-cn/12-taos-sql/01-data-type.md
index be5c9a8cb4ed7f4ed9f9c7e11faf1b0f8f6e51b8..8ac6ee3b872bd31f616ea0aea3fd4a093abb4402 100644
--- a/docs-cn/12-taos-sql/01-data-type.md
+++ b/docs-cn/12-taos-sql/01-data-type.md
@@ -4,6 +4,8 @@ title: 支持的数据类型
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
---
+## 时间戳
+
使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则:
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
@@ -12,39 +14,59 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。)
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。
-TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度)
+TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
+## 数据类型
-在 TDengine 中,普通表的数据模型中可使用以下 10 种数据类型。
+在 TDengine 中,普通表的数据模型中可使用以下数据类型。
| # | **类型** | **Bytes** | **说明** |
| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度) |
-| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
-| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用作 NULL |
-| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
-| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
-| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用作 NULL |
-| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用作 NULL |
-| 9 | BOOL | 1 | 布尔型,{true, false} |
-| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
-| 11 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
-
-:::tip
-TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
+| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
+| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1]
+| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
+| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
+| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
+| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
+| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
+| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
+| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] |
+| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
+| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
+| 13 | BOOL | 1 | 布尔型,{true, false} |
+| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
+| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
+| 16 | VARCHAR | 自定义 | BINARY类型的别名 |
-:::
:::note
-虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
+- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
+- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。
+- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
+
+## 常量
+TDengine支持多个类型的常量,细节如下表:
+
+| # | **语法** | **类型** | **说明** |
+| --- | :-------: | --------- | -------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。|
+| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。|
+| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。|
+| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。|
+| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 |
+| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。|
+
:::note
-SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
+- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。
:::
diff --git a/docs-cn/12-taos-sql/03-table.md b/docs-cn/12-taos-sql/03-table.md
index 675c157b3def0d670f771f55b767f3ca4f2a28af..d7235f312933ec46ed427d5da7e2c5a229fa2926 100644
--- a/docs-cn/12-taos-sql/03-table.md
+++ b/docs-cn/12-taos-sql/03-table.md
@@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
2. 表名最大长度为 192;
-3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
+3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs-cn/12-taos-sql/04-stable.md
index a3c227317c85917b64b2477994d335710610ec70..3901427736e80bc8dd0dd87b454947af6e586561 100644
--- a/docs-cn/12-taos-sql/04-stable.md
+++ b/docs-cn/12-taos-sql/04-stable.md
@@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length);
ALTER STABLE stb_name ADD TAG new_tag_name tag_type;
```
-为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。
+为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB 。
### 删除标签
diff --git a/docs-cn/12-taos-sql/05-insert.md b/docs-cn/12-taos-sql/05-insert.md
index e542e442b78c9033ae37196f4913a7c67fb19d8b..04118303f3f6517d65d8ecbbe9fdeb774a3177b7 100644
--- a/docs-cn/12-taos-sql/05-insert.md
+++ b/docs-cn/12-taos-sql/05-insert.md
@@ -67,7 +67,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
```
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
```
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
@@ -79,7 +79,7 @@ INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.
自动建表语法也支持在一条语句中向多个表插入记录。例如:
```
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
@@ -108,13 +108,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
```
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv';
```
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
```
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
```
@@ -137,7 +137,7 @@ Query OK, 1 row(s) in set (0.001029s)
taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s)
-taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
+taos> INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a');
DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
diff --git a/docs-cn/12-taos-sql/06-select.md b/docs-cn/12-taos-sql/06-select.md
index 3a860119cfe664f9ac3b0ebd046b5f4f0a612118..92abc4344b7562842fae71a84fe0cb9a168596ed 100644
--- a/docs-cn/12-taos-sql/06-select.md
+++ b/docs-cn/12-taos-sql/06-select.md
@@ -40,15 +40,15 @@ Query OK, 3 row(s) in set (0.001165s)
taos> SELECT * FROM meters;
ts | current | voltage | phase | location | groupid |
=====================================================================================================================================
- 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 |
- 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 |
- 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 |
- 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 |
- 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 |
- 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 |
- 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 |
- 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 |
- 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 |
+ 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LosAngeles | 2 |
+ 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LosAngeles | 2 |
+ 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LosAngeles | 3 |
+ 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LosAngeles | 3 |
+ 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 |
+ 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 |
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 |
+ 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 |
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 |
Query OK, 9 row(s) in set (0.002022s)
```
@@ -104,8 +104,8 @@ Query OK, 1 row(s) in set (0.000849s)
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
location | groupid | current |
======================================================================
- Beijing.Chaoyang | 2 | 10.30000 |
- Beijing.Chaoyang | 2 | 12.60000 |
+ California.SanFrancisco | 2 | 10.30000 |
+ California.SanFrancisco | 2 | 12.60000 |
Query OK, 2 row(s) in set (0.003112s)
```
@@ -284,10 +284,10 @@ SELECT COUNT(TBNAME) FROM meters;
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
- d1004 | Beijing.Haidian |
- d1003 | Beijing.Haidian |
- d1002 | Beijing.Chaoyang |
- d1001 | Beijing.Chaoyang |
+ d1004 | California.LosAngeles |
+ d1003 | California.LosAngeles |
+ d1002 | California.SanFrancisco |
+ d1001 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
@@ -327,15 +327,15 @@ Query OK, 1 row(s) in set (0.001091s)
- <\> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
- like 算子使用通配符字符串进行匹配检查。
- - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
- - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
- - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
+ - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
+ - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
+ - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
- 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
- - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
+ - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
- 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
- - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
+ - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
- 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
-- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
+- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('California.SanFrancisco', 'California.SanDieo')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
- 从 2.3.0.0 版本开始,条件过滤开始支持正则表达式,关键字 match/nmatch,不区分大小写。
## 正则表达式过滤
@@ -380,7 +380,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
:::note
-JOIN语句存在如下限制要求:
+JOIN 语句存在如下限制要求:
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
- 在包含 JOIN 操作的查询语句中不支持 FILL。
@@ -409,13 +409,13 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
- 目前内层查询、外层查询均不支持 UNION 操作。
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
- - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
+ - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
- - 计算函数部分:
- - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
- - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
- - 外层查询中不支持 IN 算子,但在内层中可以使用。
- - 外层查询不支持 GROUP BY。
+ - 计算函数部分:
+ - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
+ - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
+ - 外层查询中不支持 IN 算子,但在内层中可以使用。
+ - 外层查询不支持 GROUP BY。
:::
diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md
index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..7674967f09fb0c9e3069097dbc2bf35e93256992 100644
--- a/docs-cn/12-taos-sql/07-function.md
+++ b/docs-cn/12-taos-sql/07-function.md
@@ -1,1794 +1,1217 @@
---
-sidebar_label: SQL 函数
-title: SQL 函数
+sidebar_label: 函数
+title: 函数
+toc_max_heading_level: 4
---
-## 聚合函数
+## 单行函数
-TDengine 支持针对数据的聚合查询。提供支持的聚合和选择函数如下:
+单行函数为查询结果中的每一行返回一个结果行。
-### COUNT
+### 数学函数
-```
-SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
-```
+#### ABS
-**功能说明**:统计表/超级表中记录行数或某列的非空值个数。
+```sql
+ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
-**返回数据类型**:长整型 INT64。
+**功能说明**:获得指定列的绝对值
-**应用字段**:应用全部字段。
+**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。
-**适用于**:表、超级表。
+**适用数据类型**:数值类型。
-**使用说明**:
+**嵌套子查询支持**:适用于内层查询和外层查询。
-- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
-- 针对同一表的(不包含 NULL 值)字段查询结果均相同。
-- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。
+**适用于**: 表和超级表
-**示例**:
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-```
-taos> SELECT COUNT(*), COUNT(voltage) FROM meters;
- count(*) | count(voltage) |
-================================================
- 9 | 9 |
-Query OK, 1 row(s) in set (0.004475s)
+#### ACOS
-taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
- count(*) | count(voltage) |
-================================================
- 3 | 3 |
-Query OK, 1 row(s) in set (0.001075s)
+```sql
+ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-### AVG
-
-```
-SELECT AVG(field_name) FROM tb_name [WHERE clause];
-```
+**功能说明**:获得指定列的反余弦结果
-**功能说明**:统计表/超级表中某列的平均值。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**返回数据类型**:双精度浮点数 Double。
+**适用数据类型**:数值类型。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**:表、超级表。
+**适用于**: 表和超级表
-**示例**:
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-```
-taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters;
- avg(current) | avg(voltage) | avg(phase) |
-====================================================================================
- 11.466666751 | 220.444444444 | 0.293333333 |
-Query OK, 1 row(s) in set (0.004135s)
+#### ASIN
-taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001;
- avg(current) | avg(voltage) | avg(phase) |
-====================================================================================
- 11.733333588 | 219.333333333 | 0.316666673 |
-Query OK, 1 row(s) in set (0.000943s)
+```sql
+ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-### TWA
-
-```
-SELECT TWA(field_name) FROM tb_name WHERE clause;
-```
+**功能说明**:获得指定列的反正弦结果
-**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**返回数据类型**:双精度浮点数 Double。
+**适用数据类型**:数值类型。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**:表、超级表。
+**适用于**: 表和超级表
-**使用说明**:
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 从 2.1.3.0 版本开始,TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
-### IRATE
+#### ATAN
-```
-SELECT IRATE(field_name) FROM tb_name WHERE clause;
+```sql
+ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
+**功能说明**:获得指定列的反正切结果
-**返回数据类型**:双精度浮点数 Double。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**使用说明**:
+**适用于**: 表和超级表
-- 从 2.1.3.0 版本开始此函数可用,IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-### SUM
+
+#### CEIL
```
-SELECT SUM(field_name) FROM tb_name [WHERE clause];
+SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:统计表/超级表中某列的和。
+**功能说明**:获得指定列的向上取整数的结果。
-**返回数据类型**:双精度浮点数 Double 和长整型 INT64。
+**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**: 普通表、超级表。
-**示例**:
+**嵌套子查询支持**:适用于内层查询和外层查询。
-```
-taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters;
- sum(current) | sum(voltage) | sum(phase) |
-================================================================================
- 103.200000763 | 1984 | 2.640000001 |
-Query OK, 1 row(s) in set (0.001702s)
+**使用说明**:
-taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001;
- sum(current) | sum(voltage) | sum(phase) |
-================================================================================
- 35.200000763 | 658 | 0.950000018 |
-Query OK, 1 row(s) in set (0.000980s)
-```
+- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。
+- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-### STDDEV
+#### COS
-```
-SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
+```sql
+ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:统计表中某列的均方差。
+**功能说明**:获得指定列的余弦结果
-**返回数据类型**:双精度浮点数 Double。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:数值类型。
-**适用于**:表、超级表(从 2.0.15.1 版本开始)
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**示例**:
+**适用于**: 表和超级表
-```
-taos> SELECT STDDEV(current) FROM d1001;
- stddev(current) |
-============================
- 1.020892909 |
-Query OK, 1 row(s) in set (0.000915s)
-```
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-### LEASTSQUARES
+#### FLOOR
```
-SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
+SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
-
-**返回数据类型**:字符串表达式(斜率, 截距)。
-
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
-
-**适用于**:表。
+**功能说明**:获得指定列的向下取整数的结果。
+ 其他使用说明参见 CEIL 函数描述。
-**示例**:
+#### LOG
-```
-taos> SELECT LEASTSQUARES(current, 1, 1) FROM d1001;
- leastsquares(current, 1, 1) |
-=====================================================
-{slop:1.000000, intercept:9.733334} |
-Query OK, 1 row(s) in set (0.000921s)
+```sql
+ SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
```
-### MODE
+**功能说明**:获得指定列对于底数 base 的对数
-```
-SELECT MODE(field_name) FROM tb_name [WHERE clause];
-```
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。不能匹配标签、时间戳输出。
+**适用数据类型**:数值类型。
-**返回数据类型**:同应用的字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**应用字段**:适合于除时间主列外的任何类型字段。
+**适用于**: 表和超级表
-**使用说明**:由于返回数据量未知,考虑到内存因素,为了函数可以正常返回结果,建议不重复的数据量在 10 万级别,否则会报错。
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-**支持的版本**:2.6.0.0 及以后的版本。
-**示例**:
+#### POW
+```sql
+ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
```
-taos> select voltage from d002;
- voltage |
-========================
- 1 |
- 1 |
- 2 |
- 19 |
-Query OK, 4 row(s) in set (0.003545s)
-taos> select mode(voltage) from d002;
- mode(voltage) |
-========================
- 1 |
-Query OK, 1 row(s) in set (0.019393s)
-```
+**功能说明**:获得指定列的指数为 power 的幂
-### HYPERLOGLOG
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-```
-SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**适用数据类型**:数值类型。
-**功能说明**:
- - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
- - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**返回结果类型**:整形。
+**适用于**: 表和超级表
-**应用字段**:适合于任何类型字段。
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-**支持的版本**:2.6.0.0 及以后的版本。
-**示例**:
+#### ROUND
```
-taos> select dbig from shll;
- dbig |
-========================
- 1 |
- 1 |
- 1 |
- NULL |
- 2 |
- 19 |
- NULL |
- 9 |
-Query OK, 8 row(s) in set (0.003755s)
-
-taos> select hyperloglog(dbig) from shll;
- hyperloglog(dbig)|
-========================
- 4 |
-Query OK, 1 row(s) in set (0.008388s)
+SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-## 选择函数
+**功能说明**:获得指定列的四舍五入的结果。
+ 其他使用说明参见 CEIL 函数描述。
-在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。
-### MIN
+#### SIN
+```sql
+ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
-```
-
-**功能说明**:统计表/超级表中某列的值最小值。
-**返回数据类型**:同应用的字段。
+**功能说明**:获得指定列的正弦结果
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**适用于**:表、超级表。
+**适用数据类型**:数值类型。
-**示例**:
+**嵌套子查询支持**:适用于内层查询和外层查询。
-```
-taos> SELECT MIN(current), MIN(voltage) FROM meters;
- min(current) | min(voltage) |
-======================================
- 10.20000 | 218 |
-Query OK, 1 row(s) in set (0.001765s)
+**适用于**: 表和超级表
-taos> SELECT MIN(current), MIN(voltage) FROM d1001;
- min(current) | min(voltage) |
-======================================
- 10.30000 | 218 |
-Query OK, 1 row(s) in set (0.000950s)
-```
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-### MAX
+#### SQRT
-```
-SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
+```sql
+ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:统计表/超级表中某列的值最大值。
+**功能说明**:获得指定列的平方根
-**返回数据类型**:同应用的字段。
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**示例**:
+**适用于**: 表和超级表
-```
-taos> SELECT MAX(current), MAX(voltage) FROM meters;
- max(current) | max(voltage) |
-======================================
- 13.40000 | 223 |
-Query OK, 1 row(s) in set (0.001123s)
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-taos> SELECT MAX(current), MAX(voltage) FROM d1001;
- max(current) | max(voltage) |
-======================================
- 12.60000 | 221 |
-Query OK, 1 row(s) in set (0.000987s)
+#### TAN
+
+```sql
+ SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-### FIRST
+**功能说明**:获得指定列的正切结果
-```
-SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。
+**适用数据类型**:数值类型。
-**返回数据类型**:同应用的字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**应用字段**:所有字段。
+**适用于**: 表和超级表
-**适用于**:表、超级表。
+**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-**使用说明**:
+### 字符串函数
-- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*);
-- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;
-- 如果结果集中所有列全部为 NULL 值,则不返回结果。
+字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。
-**示例**:
+#### CHAR_LENGTH
```
-taos> SELECT FIRST(*) FROM meters;
- first(ts) | first(current) | first(voltage) | first(phase) |
-=========================================================================================
-2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 |
-Query OK, 1 row(s) in set (0.004767s)
-
-taos> SELECT FIRST(current) FROM d1002;
- first(current) |
-=======================
- 10.20000 |
-Query OK, 1 row(s) in set (0.001023s)
+ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-### LAST
+**功能说明**:以字符计数的字符串长度。
-```
-SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**返回结果类型**:INT。如果输入值为NULL,输出值为NULL。
-**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。
+**适用数据类型**:VARCHAR, NCHAR
-**返回数据类型**:同应用的字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**应用字段**:所有字段。
+**适用于**: 表和超级表
-**适用于**:表、超级表。
+#### CONCAT
-**使用说明**:
+```sql
+ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+```
-- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);
-- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
-- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
+**功能说明**:字符串连接函数。
+**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。
-**示例**:
+**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。
-```
-taos> SELECT LAST(*) FROM meters;
- last(ts) | last(current) | last(voltage) | last(phase) |
-========================================================================================
-2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
-Query OK, 1 row(s) in set (0.001452s)
+**嵌套子查询支持**:适用于内层查询和外层查询。
-taos> SELECT LAST(current) FROM d1002;
- last(current) |
-=======================
- 10.30000 |
-Query OK, 1 row(s) in set (0.000843s)
-```
+**适用于**: 表和超级表
-### TOP
+
+#### CONCAT_WS
```
-SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
+**功能说明**:带分隔符的字符串连接函数。
-**返回数据类型**:同应用的字段。
+**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**使用说明**:
+**适用于**: 表和超级表
-- *k*值取值范围 1≤*k*≤100;
-- 系统同时返回该记录关联的时间戳列;
-- 限制:TOP 函数不支持 FILL 子句。
-**示例**:
+#### LENGTH
```
-taos> SELECT TOP(current, 3) FROM meters;
- ts | top(current, 3) |
-=================================================
-2018-10-03 14:38:15.000 | 12.60000 |
-2018-10-03 14:38:16.600 | 13.40000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 3 row(s) in set (0.001548s)
-
-taos> SELECT TOP(current, 2) FROM d1001;
- ts | top(current, 2) |
-=================================================
-2018-10-03 14:38:15.000 | 12.60000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 2 row(s) in set (0.000810s)
+ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-### BOTTOM
+**功能说明**:以字节计数的字符串长度。
-```
-SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
-```
+**返回结果类型**:INT。
-**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
-**返回数据类型**:同应用的字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用于**: 表和超级表
-**适用于**:表、超级表。
-**使用说明**:
+#### LOWER
-- *k*值取值范围 1≤*k*≤100;
-- 系统同时返回该记录关联的时间戳列;
-- 限制:BOTTOM 函数不支持 FILL 子句。
+```
+ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+```
-**示例**:
+**功能说明**:将字符串参数值转换为全小写字母。
-```
-taos> SELECT BOTTOM(voltage, 2) FROM meters;
- ts | bottom(voltage, 2) |
-===============================================
-2018-10-03 14:38:15.000 | 218 |
-2018-10-03 14:38:16.650 | 218 |
-Query OK, 2 row(s) in set (0.001332s)
+**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
-taos> SELECT BOTTOM(current, 2) FROM d1001;
- ts | bottom(current, 2) |
-=================================================
-2018-10-03 14:38:05.000 | 10.30000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 2 row(s) in set (0.000793s)
-```
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
-### PERCENTILE
+**嵌套子查询支持**:适用于内层查询和外层查询。
+
+**适用于**: 表和超级表
+
+
+#### LTRIM
```
-SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
+ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:统计表中某列的值百分比分位数。
-
-**返回数据类型**: 双精度浮点数 Double。
+**功能说明**:返回清除左边空格后的字符串。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
-**适用于**:表。
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
-**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**示例**:
+**适用于**: 表和超级表
-```
-taos> SELECT PERCENTILE(current, 20) FROM d1001;
-percentile(current, 20) |
-============================
- 11.100000191 |
-Query OK, 1 row(s) in set (0.000787s)
-```
-### APERCENTILE
+#### RTRIM
```
-SELECT APERCENTILE(field_name, P[, algo_type])
-FROM { tb_name | stb_name } [WHERE clause]
+ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:统计表/超级表中指定列的值百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
+**功能说明**:返回清除右边空格后的字符串。
-**返回数据类型**: 双精度浮点数 Double。
+**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**使用说明**
+**适用于**: 表和超级表
-- **P**值有效取值范围 0≤P≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX;
-- **algo_type**的有效输入:**default** 和 **t-digest**
-- 用于指定计算近似分位数的算法。可不提供第三个参数的输入,此时将使用 default 的算法进行计算,即 apercentile(column_name, 50, "default") 与 apercentile(column_name, 50) 等价。
-- 当使用“t-digest”参数的时候,将使用 t-digest 方式采样计算近似分位数。但该参数指定计算算法的功能从 2.2.0.x 版本开始支持,2.2.0.0 之前的版本不支持指定使用算法的功能。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+#### SUBSTR
```
-taos> SELECT APERCENTILE(current, 20) FROM d1001;
-apercentile(current, 20) |
-============================
- 10.300000191 |
-Query OK, 1 row(s) in set (0.000645s)
-
-taos> select apercentile (count, 80, 'default') from stb1;
- apercentile (c0, 80, 'default') |
-==================================
- 601920857.210056424 |
-Query OK, 1 row(s) in set (0.012363s)
-
-taos> select apercentile (count, 80, 't-digest') from stb1;
- apercentile (c0, 80, 't-digest') |
-===================================
- 605869120.966666579 |
-Query OK, 1 row(s) in set (0.011639s)
-```
-
-### LAST_ROW
-
-```
-SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
+ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:返回表/超级表的最后一条记录。
-
-**返回数据类型**:同应用的字段。
-
-**应用字段**:所有字段。
-
-**适用于**:表、超级表。
+**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
-**使用说明**:
+**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
-- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
-- 不能与 INTERVAL 一起使用。
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。
-**示例**:
+**嵌套子查询支持**:适用于内层查询和外层查询。
-```
- taos> SELECT LAST_ROW(current) FROM meters;
- last_row(current) |
- =======================
- 12.30000 |
- Query OK, 1 row(s) in set (0.001238s)
+**适用于**: 表和超级表
- taos> SELECT LAST_ROW(current) FROM d1002;
- last_row(current) |
- =======================
- 10.30000 |
- Query OK, 1 row(s) in set (0.001042s)
-```
-### INTERP [2.3.1 及之后的版本]
+#### UPPER
```
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
+ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:返回表/超级表的指定时间截面指定列的记录值(插值)。
-
-**返回数据类型**:同字段类型。
-
-**应用字段**:数值型字段。
+**功能说明**:将字符串参数值转换为全大写字母。
-**适用于**:表、超级表、嵌套查询。
+**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
-**使用说明**
+**嵌套子查询支持**:适用于内层查询和外层查询。
-- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
-- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
-- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。
-- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
-- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
-- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。
-- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。
+**适用于**: 表和超级表
-**SQL示例(基于文档中广泛使用的电表 schema )**:
-- 单点线性插值
+### 转换函数
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR);
-```
+转换函数将值从一种数据类型转换为另一种数据类型。
-- 在2017-07-14 18:00:00到2017-07-14 19:00:00间每隔5秒钟进行取值(不插值)
+#### CAST
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s);
+```sql
+ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
```
-- 在2017-07-14 18:00:00到2017-07-14 19:00:00间每隔5秒钟进行线性插值
+**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
-```
+**返回结果类型**:CAST 中指定的类型(type_name),可以是 BIGINT、BIGINT UNSIGNED、BINARY、VARCHAR、NCHAR和TIMESTAMP。
-- 在所有时间范围内每隔 5 秒钟进行向后插值
+**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型
-```
- taos> SELECT INTERP(current) FROM t1 EVERY(5s) FILL(NEXT);
-```
-
-- 根据 2017-07-14 17:00:00 到 2017-07-14 20:00:00 间的数据进行从 2017-07-14 18:00:00 到 2017-07-14 19:00:00 间每隔 5 秒钟进行线性插值
+**使用说明**:
-```
- taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
-```
+- 对于不能支持的类型转换会直接报错。
+- 如果输入值为NULL则输出值也为NULL。
+- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
+ 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。
+ 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。
+ 3)转换到字符串类型时,如果转换后长度超过type_name的长度,则会截断,但不会报错。
-### INTERP [2.3.1 之前的版本]
+#### TO_ISO8601
-```
-SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
+```sql
+SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:返回表/超级表的指定时间截面、指定字段的记录。
+**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。
-**返回数据类型**:同字段类型。
+**返回结果数据类型**:VARCHAR 类型。
-**应用字段**:数值型字段。
+**适用数据类型**:UNIX 时间戳常量或是 TIMESTAMP 类型的列
**适用于**:表、超级表。
-**使用说明**:
-
-- 从 2.0.15.0 及以后版本可用
-- INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。
-- INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。
-- 单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值
-
-**示例**:
-
-```
- taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004';
- interp(ts) | interp(current) | interp(voltage) | interp(phase) |
- ==========================================================================================
- 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 |
- Query OK, 1 row(s) in set (0.002652s)
-```
+**使用说明**:
-如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。
+- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定;
+- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
-```
- taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005';
- Query OK, 0 row(s) in set (0.004022s)
- taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV);
- interp(ts) | interp(current) | interp(voltage) | interp(phase) |
- ==========================================================================================
- 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 |
- Query OK, 1 row(s) in set (0.003056s)
-```
+#### TO_JSON
-如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。
-
-```
- taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a);
- ts | interp(current) |
- =================================================
- 2017-07-14 18:40:00.000 | 10.04179 |
- 2017-07-14 18:40:00.010 | 10.16123 |
- Query OK, 2 row(s) in set (0.003487s)
+```sql
+SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
```
-### TAIL
+**功能说明**: 将字符串常量转换为 JSON 类型。
-```
-SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
-```
+**返回结果数据类型**: JSON
-**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。
+**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。
-**参数范围**:k: [1,100] offset_val: [0,100]。
-
-**返回结果数据类型**:同应用的字段。
+**适用于**: 表和超级表
-**应用字段**:适合于除时间主列外的任何类型字段。
+**嵌套子查询支持**:适用于内层查询和外层查询。
-**支持版本**:2.6.0.0 及之后的版本。
-**示例**:
+#### TO_UNIXTIMESTAMP
+```sql
+SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
```
-taos> select ts,dbig from tail2;
- ts | dbig |
-==================================================
-2021-10-15 00:31:33.000 | 1 |
-2021-10-17 00:31:31.000 | NULL |
-2021-12-24 00:31:34.000 | 2 |
-2022-01-01 08:00:05.000 | 19 |
-2022-01-01 08:00:06.000 | NULL |
-2022-01-01 08:00:07.000 | 9 |
-Query OK, 6 row(s) in set (0.001952s)
-taos> select tail(dbig,2,2) from tail2;
-ts | tail(dbig,2,2) |
-==================================================
-2021-12-24 00:31:34.000 | 2 |
-2022-01-01 08:00:05.000 | 19 |
-Query OK, 2 row(s) in set (0.002307s)
-```
+**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
-### UNIQUE
+**返回结果数据类型**:长整型 INT64。
-```
-SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
-```
+**应用字段**:字符串常量或是 VARCHAR/NCHAR 类型的列。
-**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳。
+**适用于**:表、超级表。
-**返回结果数据类型**:同应用的字段。
+**使用说明**:
-**应用字段**:适合于除时间类型以外的字段。
+- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。
+- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
-**支持版本**:2.6.0.0 及之后的版本。
-**使用说明**:
+### 时间和日期函数
-- 该函数可以应用在普通表和超级表上。不能和窗口操作一起使用,例如 interval/state_window/session_window 。
-- 由于返回数据量未知,考虑到内存因素,为了函数可以正常返回结果,建议不重复的数据量在 10 万级别,否则会报错。
+时间和日期函数对时间戳类型进行操作。
-**示例**:
+所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。
-```
-taos> select ts,voltage from unique1;
- ts | voltage |
-==================================================
-2021-10-17 00:31:31.000 | 1 |
-2022-01-24 00:31:31.000 | 1 |
-2021-10-17 00:31:31.000 | 1 |
-2021-12-24 00:31:31.000 | 2 |
-2022-01-01 08:00:01.000 | 19 |
-2021-10-17 00:31:31.000 | NULL |
-2022-01-01 08:00:02.000 | NULL |
-2022-01-01 08:00:03.000 | 9 |
-Query OK, 8 row(s) in set (0.003018s)
+#### NOW
-taos> select unique(voltage) from unique1;
-ts | unique(voltage) |
-==================================================
-2021-10-17 00:31:31.000 | 1 |
-2021-10-17 00:31:31.000 | NULL |
-2021-12-24 00:31:31.000 | 2 |
-2022-01-01 08:00:01.000 | 19 |
-2022-01-01 08:00:03.000 | 9 |
-Query OK, 5 row(s) in set (0.108458s)
+```sql
+SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
+SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
+INSERT INTO tb_name VALUES (NOW(), ...);
```
-## 计算函数
-
-### DIFF
-
- ```sql
- SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
- ```
-
-**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
+**功能说明**:返回客户端当前系统时间。
-**返回结果数据类型**:同应用字段。
+**返回结果数据类型**:TIMESTAMP 时间戳类型。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
**适用于**:表、超级表。
-**使用说明**:
-
-- 输出结果行数是范围内总行数减一,第一行没有结果输出。
-- 从 2.1.3.0 版本开始,DIFF 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
-- 从 2.6.0 开始,DIFF 函数支持 ignore_negative 参数
-
-**示例**:
-
- ```sql
- taos> SELECT DIFF(current) FROM d1001;
- ts | diff(current) |
- =================================================
- 2018-10-03 14:38:15.000 | 2.30000 |
- 2018-10-03 14:38:16.800 | -0.30000 |
- Query OK, 2 row(s) in set (0.001162s)
- ```
-
-### DERIVATIVE
-
-```
-SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
-```
-
-**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
-
-**返回数据类型**:双精度浮点数。
-
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
-
-**适用于**:表、超级表
-
-**使用说明**:
+**使用说明**:
-- 从 2.1.3.0 及以后版本可用;输出结果行数是范围内总行数减一,第一行没有结果输出。
-- DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下:
+ b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。
+- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
-**示例**:
-```
-taos> select derivative(current, 10m, 0) from t1;
- ts | derivative(current, 10m, 0) |
-========================================================
- 2021-08-20 10:11:22.790 | 0.500000000 |
- 2021-08-20 11:11:22.791 | 0.166666620 |
- 2021-08-20 12:11:22.791 | 0.000000000 |
- 2021-08-20 13:11:22.792 | 0.166666620 |
- 2021-08-20 14:11:22.792 | -0.666666667 |
-Query OK, 5 row(s) in set (0.004883s)
-```
+#### TIMEDIFF
-### SPREAD
-
-```
-SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
+```sql
+SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:统计表/超级表中某列的最大值和最小值之差。
+**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
-**返回数据类型**:双精度浮点数。
+**返回结果数据类型**:长整型 INT64。
-**应用字段**:不能应用在 binary、nchar、bool 类型字段。
+**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
**适用于**:表、超级表。
-**使用说明**:可用于 TIMESTAMP 字段,此时表示记录的时间覆盖范围。
-
-**示例**:
-
-```
-taos> SELECT SPREAD(voltage) FROM meters;
- spread(voltage) |
-============================
- 5.000000000 |
-Query OK, 1 row(s) in set (0.001792s)
+**使用说明**:
+- 支持的时间单位 time_unit 如下:
+ 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
+- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
-taos> SELECT SPREAD(voltage) FROM d1001;
- spread(voltage) |
-============================
- 3.000000000 |
-Query OK, 1 row(s) in set (0.000836s)
-```
-### CEIL
+#### TIMETRUNCATE
+```sql
+SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
```
-SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
-
-**功能说明**:获得指定列的向上取整数的结果。
-
-**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列,无论 tag 列的类型是什么类型。
-
-**适用于**: 普通表、超级表。
-
-**嵌套子查询支持**:适用于内层查询和外层查询。
-
-**使用说明**:
-
-- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-
-### FLOOR
+**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
-```
-SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**返回结果数据类型**:TIMESTAMP 时间戳类型。
-**功能说明**:获得指定列的向下取整数的结果。
- 其他使用说明参见 CEIL 函数描述。
+**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
-### ROUND
+**适用于**:表、超级表。
-```
-SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**使用说明**:
+- 支持的时间单位 time_unit 如下:
+ 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
+- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
-**功能说明**:获得指定列的四舍五入的结果。
- 其他使用说明参见 CEIL 函数描述。
-### CSUM
+#### TIMEZONE
```sql
- SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
+SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
```
- **功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
+**功能说明**:返回客户端当前时区信息。
- **返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。
+**返回结果数据类型**:VARCHAR 类型。
- **适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在标签之上。
+**应用字段**:无
- **嵌套子查询支持**: 适用于内层查询和外层查询。
+**适用于**:表、超级表。
- **使用说明**:
-
- - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
- - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
- - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
-**支持版本**: 从2.3.0.x开始支持
-
-### MAVG
+#### TODAY
```sql
- SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
+SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
+INSERT INTO tb_name VALUES (TODAY(), ...);
```
- **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
-
- **返回结果类型**: 返回双精度浮点数类型。
-
- **适用数据类型**: 不能应用在 timestamp、binary、nchar、bool 类型上;在超级表查询中使用时,不能应用在标签之上。
-
- **嵌套子查询支持**: 适用于内层查询和外层查询。
-
- **使用说明**:
-
- - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- - 该函数可以应用在普通表和超级表上;使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
+**功能说明**:返回客户端当日零时的系统时间。
-**支持版本**: 从2.3.0.x开始支持
+**返回结果数据类型**:TIMESTAMP 时间戳类型。
-### SAMPLE
+**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
-```sql
- SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
-```
+**适用于**:表、超级表。
- **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
+**使用说明**:
- **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
+- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下:
+ b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。
+- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
- **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
- **嵌套子查询支持**: 适用于内层查询和外层查询。
+## 聚合函数
- **使用说明**:
-
- - 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- - 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。
+聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。
-**支持版本**: 从2.3.0.x开始支持
+TDengine 支持针对数据的聚合查询。提供如下聚合函数。
-### ASIN
+### AVG
-```sql
- SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT AVG(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的反正弦结果
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:统计表/超级表中某列的平均值。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:双精度浮点数 Double。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表、超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### ACOS
+### COUNT
-```sql
- SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的反余弦结果
+**功能说明**:统计表/超级表中记录行数或某列的非空值个数。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回数据类型**:长整型 INT64。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**适用数据类型**:应用全部字段。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用于**:表、超级表。
-**使用说明**:
+**使用说明**:
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
+- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
+- 针对同一表的(不包含 NULL 值)字段查询结果均相同。
+- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。
-### ATAN
-```sql
- SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+### ELAPSED
+
+```mysql
+SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
```
-**功能说明**:获得指定列的反正切结果
+**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:Double
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**适用数据类型**:Timestamp类型
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**支持的版本**:2.6.0.0 及以后的版本。
-**使用说明**:
+**适用于**: 表,超级表,嵌套查询的外层查询
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
+**说明**:
+- field_name参数只能是表的第一列,即timestamp主键列。
+- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。
+- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。
+- order by asc/desc不影响差值的计算结果。
+- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。
+- 对于普通表,不支持和group by子句组合使用。
+- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。
+- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。
-### SIN
+### LEASTSQUARES
-```sql
- SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的正弦结果
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:字符串表达式(斜率, 截距)。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:field_name 必须是数值类型。
-**使用说明**:
+**适用于**:表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### COS
+### MODE
-```sql
- SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT MODE(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的余弦结果
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:同应用的字段。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**: 数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### TAN
+### SPREAD
-```sql
- SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:获得指定列的正切结果
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:统计表/超级表中某列的最大值和最小值之差。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:双精度浮点数。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:数值类型或TIMESTAMP类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### POW
+### STDDEV
-```sql
- SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的指数为 power 的幂
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:统计表中某列的均方差。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:双精度浮点数 Double。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### LOG
+### SUM
-```sql
- SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT SUM(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列对于底数 base 的对数
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**功能说明**:统计表/超级表中某列的和。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回数据类型**:双精度浮点数 Double 和长整型 INT64。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### ABS
+### HYPERLOGLOG
-```sql
- SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:获得指定列的绝对值
-
-**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。
+**功能说明**:
+ - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
+ - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
+**返回结果类型**:整形。
-**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用数据类型**:任何类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-### SQRT
+### HISTOGRAM
-```sql
- SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
```
-**功能说明**:获得指定列的平方根
-
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
-
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列
-
-**嵌套子查询支持**:适用于内层查询和外层查询。
-
-**使用说明**:
-
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
-- 该函数可以应用在普通表和超级表上。
-- 版本2.6.0.x后支持
-
-### CAST
+**功能说明**:统计数据按照用户指定区间的分布。
-```sql
- SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
-```
+**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。
-**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,不支持 tag 列,只适用于 select 子句中。
+**适用数据类型**:数值型字段。
-**返回结果类型**:CAST 中指定的类型(type_name)。
+**适用于**: 表和超级表。
-**适用数据类型**:
+**详细说明**:
+1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。
+2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
+ - "user_input": "[1, 3, 5, 7]"
+ 用户指定 bin 的具体数值。
+
+ - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
+ "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
+ 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
+
+ - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
+ "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
+ 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
+3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
-- 输入参数 expression 的类型可以是除 JSON 外目前所有类型字段(BOOL/TINYINT/SMALLINT/INT/BIGINT/FLOAT/DOUBLE/BINARY(M)/TIMESTAMP/NCHAR(M)/TINYINT UNSIGNED/SMALLINT UNSIGNED/INT UNSIGNED/BIGINT UNSIGNED);
-- 输出目标类型只支持 BIGINT/BINARY(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED。
-**使用说明**:
+## 选择函数
-- 对于不能支持的类型转换会直接报错。
-- 如果输入值为NULL则输出值也为NULL。
-- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
- 1)BINARY/NCHAR转BIGINT/BIGINT UNSIGNED时可能出现的无效字符情况,例如"a"可能转为0。
- 2)有符号数或TIMESTAMP转BIGINT UNSIGNED可能遇到的溢出问题。
- 3)BIGINT UNSIGNED转BIGINT可能遇到的溢出问题。
- 4)FLOAT/DOUBLE转BIGINT/BIGINT UNSIGNED可能遇到的溢出问题。
-- 版本2.6.0.x后支持
+选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
-### CONCAT
+### APERCENTILE
-```sql
- SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT APERCENTILE(field_name, P[, algo_type])
+FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:字符串连接函数。
-
-**返回结果类型**:同输入参数类型,BINARY 或者 NCHAR。
+**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
-**适用数据类型**:输入参数或者全部是 BINARY 格式的字符串或者列,或者全部是 NCHAR 格式的字符串或者列。不能应用在 TAG 列。
+**返回数据类型**: 双精度浮点数 Double。
-**使用说明**:
+**适用数据类型**:数值类型。P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。如果不指定 algo_type 则使用默认算法 。
-- 如果输入值为NULL,输出值为NULL。
-- 该函数最小参数个数为2个,最大参数个数为8个。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
+**适用于**:表、超级表。
-### CONCAT_WS
+### BOTTOM
```
- SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:带分隔符的字符串连接函数。
+**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
-**返回结果类型**:同输入参数类型,BINARY 或者 NCHAR。
+**返回数据类型**:同应用的字段。
-**适用数据类型**:输入参数或者全部是 BINARY 格式的字符串或者列,或者全部是 NCHAR 格式的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 如果separator值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串
-- 该函数最小参数个数为3个,最大参数个数为9个。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
+**使用说明**:
+
+- *k*值取值范围 1≤*k*≤100;
+- 系统同时返回该记录关联的时间戳列;
+- 限制:BOTTOM 函数不支持 FILL 子句。
-### LENGTH
+### FIRST
```
- SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:以字节计数的字符串长度。
+**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。
-**返回结果类型**:INT。
+**返回数据类型**:同应用的字段。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:所有字段。
-**使用说明**
+**适用于**:表和超级表。
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
+**使用说明**:
-### CHAR_LENGTH
+- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*);
+- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;
+- 如果结果集中所有列全部为 NULL 值,则不返回结果。
+
+### INTERP
```
- SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
-**功能说明**:以字符计数的字符串长度。
+**功能说明**:返回指定时间截面指定列的记录值或插值。
-**返回结果类型**:INT。
+**返回数据类型**:同字段类型。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:数值类型。
+
+**适用于**:表、超级表。
**使用说明**
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
+- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
+- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
+- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。
+- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
+- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
+- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。
+- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。
-### LOWER
+### LAST
```
- SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:将字符串参数值转换为全小写字母。
+**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。
-**返回结果类型**:同输入类型。
+**返回数据类型**:同应用的字段。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:所有字段。
-**使用说明**:
+**适用于**:表和超级表。
+
+**使用说明**:
+
+- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);
+- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
+- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
-### UPPER
+### LAST_ROW
```
- SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
-**功能说明**:将字符串参数值转换为全大写字母。
+**功能说明**:返回表/超级表的最后一条记录。
+
+**返回数据类型**:同应用的字段。
-**返回结果类型**:同输入类型。
+**适用数据类型**:所有字段。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用于**:表和超级表。
**使用说明**:
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
+- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
+- 不能与 INTERVAL 一起使用。
-### LTRIM
+### MAX
```
- SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:返回清除左边空格后的字符串。
+**功能说明**:统计表/超级表中某列的值最大值。
-**返回结果类型**:同输入类型。
+**返回数据类型**:同应用的字段。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
-### RTRIM
+### MIN
```
- SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
-**功能说明**:返回清除右边空格后的字符串。
+**功能说明**:统计表/超级表中某列的值最小值。
-**返回结果类型**:同输入类型。
+**返回数据类型**:同应用的字段。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用数据类型**:数值类型。
-**使用说明**:
+**适用于**:表和超级表。
-- 如果输入值为NULL,输出值为NULL。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
-### SUBSTR
+### PERCENTILE
```
- SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
+SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
-**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
+**功能说明**:统计表中某列的值百分比分位数。
+
+**返回数据类型**: 双精度浮点数 Double。
-**返回结果类型**:同输入类型。
+**应用字段**:数值类型。
-**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。
+**适用于**:表。
-**使用说明**:
+**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
-- 如果输入值为NULL,输出值为NULL。
-- 输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。
-- 该函数可以应用在普通表和超级表上。
-- 该函数适用于内层查询和外层查询。
-- 版本2.6.0.x后支持
-### 四则运算
+### TAIL
```
-SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
+SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
```
-**功能说明**:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
+**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。
-**返回数据类型**:双精度浮点数。
+**参数范围**:k: [1,100] offset_val: [0,100]。
-**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
+**返回数据类型**:同应用的字段。
+
+**适用数据类型**:适合于除时间主列外的任何类型。
**适用于**:表、超级表。
-**使用说明**:
-- 支持两列或多列之间进行计算,可使用括号控制计算优先级;
-- NULL 字段不参与计算,如果参与计算的某行中包含 NULL,该行的计算结果为 NULL。
+### TOP
```
-taos> SELECT current + voltage * phase FROM d1001;
-(current+(voltage*phase)) |
-============================
- 78.190000713 |
- 84.540003240 |
- 80.810000718 |
-Query OK, 3 row(s) in set (0.001046s)
+SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
-### STATECOUNT
+**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
-```
-SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
-```
+**返回数据类型**:同应用的字段。
-**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
+**适用数据类型**:数值类型。
-**参数范围**:
+**适用于**:表、超级表。
-- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
-- val : 数值型
+**使用说明**:
-**返回结果类型**:整形。
+- *k*值取值范围 1≤*k*≤100;
+- 系统同时返回该记录关联的时间戳列;
+- 限制:TOP 函数不支持 FILL 子句。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上。
+### UNIQUE
-**嵌套子查询支持**:不支持应用在子查询上。
+```
+SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
+```
-**支持的版本**:2.6 开始的版本。
+**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳。
-**使用说明**:
+**返回数据类型**:同应用的字段。
-- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
-- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
+**适用数据类型**:适合于除时间类型以外的字段。
-**示例**:
+**适用于**: 表和超级表。
-```
-taos> select ts,dbig from statef2;
- ts | dbig |
-========================================================
-2021-10-15 00:31:33.000000000 | 1 |
-2021-10-17 00:31:31.000000000 | NULL |
-2021-12-24 00:31:34.000000000 | 2 |
-2022-01-01 08:00:05.000000000 | 19 |
-2022-01-01 08:00:06.000000000 | NULL |
-2022-01-01 08:00:07.000000000 | 9 |
-Query OK, 6 row(s) in set (0.002977s)
-taos> select stateCount(dbig,GT,2) from statef2;
-ts | dbig | statecount(dbig,gt,2) |
-================================================================================
-2021-10-15 00:31:33.000000000 | 1 | -1 |
-2021-10-17 00:31:31.000000000 | NULL | NULL |
-2021-12-24 00:31:34.000000000 | 2 | -1 |
-2022-01-01 08:00:05.000000000 | 19 | 1 |
-2022-01-01 08:00:06.000000000 | NULL | NULL |
-2022-01-01 08:00:07.000000000 | 9 | 2 |
-Query OK, 6 row(s) in set (0.002791s)
-```
+## 时序数据特有函数
-### STATEDURATION
+时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。
+
+### CSUM
```sql
-SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
+ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
-
-**参数范围**:
-
-- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
-- val : 数值型
-- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。
+**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
-**返回结果类型**:整形。
+**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。
-**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上。
+**适用数据类型**:数值类型。
-**嵌套子查询支持**:不支持应用在子查询上。
+**嵌套子查询支持**: 适用于内层查询和外层查询。
-**支持的版本**:2.6 开始的版本。
+**适用于**:表和超级表
-**使用说明**:
+**使用说明**:
+
+ - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
+ - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
+ - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
-- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
-- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
-**示例**:
+### DERIVATIVE
```
-taos> select ts,dbig from statef2;
- ts | dbig |
-========================================================
-2021-10-15 00:31:33.000000000 | 1 |
-2021-10-17 00:31:31.000000000 | NULL |
-2021-12-24 00:31:34.000000000 | 2 |
-2022-01-01 08:00:05.000000000 | 19 |
-2022-01-01 08:00:06.000000000 | NULL |
-2022-01-01 08:00:07.000000000 | 9 |
-Query OK, 6 row(s) in set (0.002407s)
-
-taos> select stateDuration(dbig,GT,2) from statef2;
-ts | dbig | stateduration(dbig,gt,2) |
-===================================================================================
-2021-10-15 00:31:33.000000000 | 1 | -1 |
-2021-10-17 00:31:31.000000000 | NULL | NULL |
-2021-12-24 00:31:34.000000000 | 2 | -1 |
-2022-01-01 08:00:05.000000000 | 19 | 0 |
-2022-01-01 08:00:06.000000000 | NULL | NULL |
-2022-01-01 08:00:07.000000000 | 9 | 2 |
-Query OK, 6 row(s) in set (0.002613s)
+SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
```
-## 时间函数
-
-从 2.6.0.0 版本开始,TDengine 查询引擎支持以下时间相关函数:
+**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
-### NOW
+**返回数据类型**:双精度浮点数。
-```sql
-SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
-INSERT INTO tb_name VALUES (NOW(), ...);
-```
+**适用数据类型**:数值类型。
-**功能说明**:返回客户端当前系统时间。
+**适用于**:表、超级表
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**使用说明**: DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
-**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
-**适用于**:表、超级表。
+### DIFF
-**使用说明**:
+ ```sql
+ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
+ ```
-- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下:
- b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。
-- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
+**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
-**示例**:
+**返回数据类型**:同应用字段。
-```sql
-taos> SELECT NOW() FROM meters;
- now() |
-==========================
- 2022-02-02 02:02:02.456 |
-Query OK, 1 row(s) in set (0.002093s)
+**适用数据类型**:数值类型。
-taos> SELECT NOW() + 1h FROM meters;
- now() + 1h |
-==========================
- 2022-02-02 03:02:02.456 |
-Query OK, 1 row(s) in set (0.002093s)
+**适用于**:表、超级表。
-taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < NOW();
- count(voltage) |
-=============================
- 5 |
-Query OK, 5 row(s) in set (0.004475s)
+**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。
-taos> INSERT INTO d1001 VALUES (NOW(), 10.2, 219, 0.32);
-Query OK, 1 of 1 row(s) in database (0.002210s)
-```
-### TODAY
+### IRATE
-```sql
-SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
-INSERT INTO tb_name VALUES (TODAY(), ...);
+```
+SELECT IRATE(field_name) FROM tb_name WHERE clause;
```
-**功能说明**:返回客户端当日零时的系统时间。
+**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**返回数据类型**:双精度浮点数 Double。
-**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
+**适用数据类型**:数值类型。
**适用于**:表、超级表。
-**使用说明**:
+### MAVG
-- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下:
- b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。
-- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
+```sql
+ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+```
-**示例**:
+ **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
-```sql
-taos> SELECT TODAY() FROM meters;
- today() |
-==========================
- 2022-02-02 00:00:00.000 |
-Query OK, 1 row(s) in set (0.002093s)
+ **返回结果类型**: 返回双精度浮点数类型。
+
+ **适用数据类型**: 数值类型。
-taos> SELECT TODAY() + 1h FROM meters;
- today() + 1h |
-==========================
- 2022-02-02 01:00:00.000 |
-Query OK, 1 row(s) in set (0.002093s)
+ **嵌套子查询支持**: 适用于内层查询和外层查询。
-taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < TODAY();
- count(voltage) |
-=============================
- 5 |
-Query OK, 5 row(s) in set (0.004475s)
+ **适用于**:表和超级表
-taos> INSERT INTO d1001 VALUES (TODAY(), 10.2, 219, 0.32);
-Query OK, 1 of 1 row(s) in database (0.002210s)
-```
+ **使用说明**:
+
+ - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
+ - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
+ - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
-### TIMEZONE
+### SAMPLE
```sql
-SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
+ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:返回客户端当前时区信息。
+ **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
-**返回结果数据类型**:BINARY 类型。
+ **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
-**应用字段**:无
+ **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
-**适用于**:表、超级表。
+ **嵌套子查询支持**: 适用于内层查询和外层查询。
-**示例**:
+ **适用于**:表和超级表
-```sql
-taos> SELECT TIMEZONE() FROM meters;
- timezone() |
-=================================
- UTC (UTC, +0000) |
-Query OK, 1 row(s) in set (0.002093s)
-```
+ **使用说明**:
+
+ - 不能参与表达式计算;该函数可以应用在普通表和超级表上;
+ - 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。
-### TO_ISO8601
+### STATECOUNT
-```sql
-SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
+```
+SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。
+**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
-**返回结果数据类型**:BINARY 类型。
+**参数范围**:
-**应用字段**:UNIX 时间戳常量或是 TIMESTAMP 类型的列
+- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
+- val : 数值型
-**适用于**:表、超级表。
+**返回结果类型**:整形。
-**使用说明**:
+**适用数据类型**:数值类型。
-- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定;
-- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
+**嵌套子查询支持**:不支持应用在子查询上。
-**示例**:
+**适用于**:表和超级表。
-```sql
-taos> SELECT TO_ISO8601(1643738400) FROM meters;
- to_iso8601(1643738400) |
-==============================
- 2022-02-02T02:00:00+0800 |
+**使用说明**:
-taos> SELECT TO_ISO8601(ts) FROM meters;
- to_iso8601(ts) |
-==============================
- 2022-02-02T02:00:00+0800 |
- 2022-02-02T02:00:00+0800 |
- 2022-02-02T02:00:00+0800 |
-```
+- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
+- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
-### TO_UNIXTIMESTAMP
+
+### STATEDURATION
```sql
-SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
+SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
+**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
-**返回结果数据类型**:长整型 INT64。
+**参数范围**:
+
+- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
+- val : 数值型
+- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。
-**应用字段**:字符串常量或是 BINARY/NCHAR 类型的列。
+**返回结果类型**:整形。
-**适用于**:表、超级表。
+**适用数据类型**:数值类型。
-**使用说明**:
+**嵌套子查询支持**:不支持应用在子查询上。
-- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。
-- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
+**适用于**:表和超级表。
-**示例**:
+**使用说明**:
-```sql
-taos> SELECT TO_UNIXTIMESTAMP("2022-02-02T02:00:00.000Z") FROM meters;
-to_unixtimestamp("2022-02-02T02:00:00.000Z") |
-==============================================
- 1643767200000 |
+- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
+- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
-taos> SELECT TO_UNIXTIMESTAMP(col_binary) FROM meters;
- to_unixtimestamp(col_binary) |
-========================================
- 1643767200000 |
- 1643767200000 |
- 1643767200000 |
-```
-### TIMETRUNCATE
+### TWA
-```sql
-SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
+```
+SELECT TWA(field_name) FROM tb_name WHERE clause;
```
-**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
+**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**返回数据类型**:双精度浮点数 Double。
-**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
+**适用数据类型**:数值类型。
**适用于**:表、超级表。
-**使用说明**:
-- 支持的时间单位 time_unit 如下:
- 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
-- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
+**使用说明**: TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
-**示例**:
-```sql
-taos> SELECT TIMETRUNCATE(1643738522000, 1h) FROM meters;
- timetruncate(1643738522000, 1h) |
-===================================
- 2022-02-02 02:00:00.000 |
-Query OK, 1 row(s) in set (0.001499s)
+## 系统信息函数
-taos> SELECT TIMETRUNCATE("2022-02-02 02:02:02", 1h) FROM meters;
- timetruncate("2022-02-02 02:02:02", 1h) |
-===========================================
- 2022-02-02 02:00:00.000 |
-Query OK, 1 row(s) in set (0.003903s)
+### DATABASE
-taos> SELECT TIMETRUNCATE(ts, 1h) FROM meters;
- timetruncate(ts, 1h) |
-==========================
- 2022-02-02 02:00:00.000 |
- 2022-02-02 02:00:00.000 |
- 2022-02-02 02:00:00.000 |
-Query OK, 3 row(s) in set (0.003903s)
+```
+SELECT DATABASE();
```
-### TIMEDIFF
+**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。
-```sql
-SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
-```
-**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
+### CLIENT_VERSION
-**返回结果数据类型**:长整型 INT64。
+```
+SELECT CLIENT_VERSION();
+```
-**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
+**说明**:返回客户端版本。
-**适用于**:表、超级表。
+### SERVER_VERSION
-**使用说明**:
-- 支持的时间单位 time_unit 如下:
- 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
-- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
+```
+SELECT SERVER_VERSION();
+```
-**示例**:
+**说明**:返回服务端版本。
+
+### SERVER_STATUS
-```sql
-taos> SELECT TIMEDIFF(1643738400000, 1643742000000) FROM meters;
- timediff(1643738400000, 1643742000000) |
-=========================================
- 3600000 |
-Query OK, 1 row(s) in set (0.002553s)
-taos> SELECT TIMEDIFF(1643738400000, 1643742000000, 1h) FROM meters;
- timediff(1643738400000, 1643742000000, 1h) |
-=============================================
- 1 |
-Query OK, 1 row(s) in set (0.003726s)
-
-taos> SELECT TIMEDIFF("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) FROM meters;
- timediff("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) |
-=============================================================
- 1 |
-Query OK, 1 row(s) in set (0.001937s)
-
-taos> SELECT TIMEDIFF(ts_col1, ts_col2, 1h) FROM meters;
- timediff(ts_col1, ts_col2, 1h) |
-===================================
- 1 |
-Query OK, 1 row(s) in set (0.001937s)
```
+SELECT SERVER_VERSION();
+```
+
+**说明**:返回服务端当前的状态。
diff --git a/docs-cn/12-taos-sql/08-interval.md b/docs-cn/12-taos-sql/08-interval.md
index d62e11b0dbd0ba49ceedb3807e05361f060969b3..b0619ea5ce3759e9bca1234b76e2a16176511547 100644
--- a/docs-cn/12-taos-sql/08-interval.md
+++ b/docs-cn/12-taos-sql/08-interval.md
@@ -11,7 +11,7 @@ TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如
INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。
-
+
INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法:
@@ -33,7 +33,7 @@ _ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
-
+
使用 STATE_WINDOW 来确定状态窗口划分的列。例如:
@@ -45,7 +45,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
-
+
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs-cn/12-taos-sql/09-limit.md
index 3c86a3862174377e6a00d046fb69627c773fe76e..7673e24a83cc1ba5335b11f29803cf9f3eae26e5 100644
--- a/docs-cn/12-taos-sql/09-limit.md
+++ b/docs-cn/12-taos-sql/09-limit.md
@@ -7,9 +7,9 @@ title: 边界限制
- 数据库名最大长度为 32。
- 表名最大长度为 192,不包括数据库名前缀和分隔符
-- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
+- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 列名最大长度为 64,最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列
-- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符。
+- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords.md
similarity index 71%
rename from docs-cn/12-taos-sql/12-keywords/index.md
rename to docs-cn/12-taos-sql/12-keywords.md
index 608d4e080967cfd97072706cf0963ae669960be6..5c68e5da7e8c537e7514c5f9cfba43084d72189b 100644
--- a/docs-cn/12-taos-sql/12-keywords/index.md
+++ b/docs-cn/12-taos-sql/12-keywords.md
@@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字
去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格)
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
-- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
-- 表的列名:不能包含特殊字符,不能超过 64 个字符
+- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB
+- 表的列名:不能包含特殊字符,不能超过 64 个字节
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列)
-- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
-- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte
+- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置)
+- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节
- 数据库副本数:不能超过 3
-- 用户名:不能超过 23 个 byte
-- 用户密码:不能超过 15 个 byte
+- 用户名:不能超过 23 个 字节
+- 用户密码:不能超过 15 个 字节
- 标签(Tags)数量:不能超过 128 个,可以 0 个
-- 标签的总长度:不能超过 16K byte
+- 标签的总长度:不能超过 16KB
- 记录条数:仅受存储空间限制
- 表的个数:仅受节点个数限制
- 库的个数:仅受节点个数限制
@@ -85,3 +85,47 @@ title: TDengine 参数限制与保留关键字
| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
| CONNS | ID | NOTNULL | STABLE | WAL |
| COPY | IF | NOW | STABLES | WHERE |
+| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART |
+| _WSTOP | _WDURATION | _ROWTS |
+
+## 特殊说明
+### TBNAME
+`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。
+
+获取一个超级表所有的子表名及相关的标签信息:
+
+```mysql
+SELECT TBNAME, location FROM meters;
+```
+
+统计超级表下辖子表数量:
+
+```mysql
+SELECT COUNT(TBNAME) FROM meters;
+```
+
+以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如:
+```mysql
+taos> SELECT TBNAME, location FROM meters;
+ tbname | location |
+==================================================================
+ d1004 | California.SanFrancisco |
+ d1003 | California.SanFrancisco |
+ d1002 | California.LosAngeles |
+ d1001 | California.LosAngeles |
+Query OK, 4 row(s) in set (0.000881s)
+
+taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
+ count(tbname) |
+========================
+ 2 |
+Query OK, 1 row(s) in set (0.001091s)
+```
+### _QSTART/_QSTOP/_QDURATION
+表示查询过滤窗口的起始,结束以及持续时间。
+
+### _WSTART/_WSTOP/_WDURATION
+窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间。
+
+### _c0/_ROWTS
+_c0 _ROWTS 等价,表示表或超级表的第一列
diff --git a/docs-cn/12-taos-sql/12-keywords/_category_.yml b/docs-cn/12-taos-sql/12-keywords/_category_.yml
deleted file mode 100644
index 67738650a4564477f017542aea81767b3de72922..0000000000000000000000000000000000000000
--- a/docs-cn/12-taos-sql/12-keywords/_category_.yml
+++ /dev/null
@@ -1 +0,0 @@
-label: 参数限制与保留关键字
\ No newline at end of file
diff --git a/docs-cn/12-taos-sql/13-operators.md b/docs-cn/12-taos-sql/13-operators.md
new file mode 100644
index 0000000000000000000000000000000000000000..22b78455fb35e9ebe5978b30505819e1a2b678c8
--- /dev/null
+++ b/docs-cn/12-taos-sql/13-operators.md
@@ -0,0 +1,66 @@
+---
+sidebar_label: 运算符
+title: 运算符
+---
+
+## 算术运算符
+
+| # | **运算符** | **支持的类型** | **说明** |
+| --- | :--------: | -------------- | -------------------------- |
+| 1 | +, - | 数值类型 | 表达正数和负数,一元运算符 |
+| 2 | +, - | 数值类型 | 表示加法和减法,二元运算符 |
+| 3 | \*, / | 数值类型 | 表示乘法和除法,二元运算符 |
+| 4 | % | 数值类型 | 表示取余运算,二元运算符 |
+
+## 位运算符
+
+| # | **运算符** | **支持的类型** | **说明** |
+| --- | :--------: | -------------- | ------------------ |
+| 1 | & | 数值类型 | 按位与,二元运算符 |
+| 2 | \| | 数值类型 | 按位或,二元运算符 |
+
+## JSON 运算符
+
+`->` 运算符可以对 JSON 类型的列按键取值。`->` 左侧是列标识符,右侧是键的字符串常量,如 `col->'name'`,返回键 `'name'` 的值。
+
+## 集合运算符
+
+集合运算符将两个查询的结果合并为一个结果。包含集合运算符的查询称之为复合查询。复合查询中每条查询的选择列表中的相应表达式在数量上必须匹配,且结果类型以第一条查询为准,后续查询的结果类型必须可转换到第一条查询的结果类型,转换规则同 CAST 函数。
+
+TDengine 支持 `UNION ALL` 和 `UNION` 操作符。UNION ALL 将查询返回的结果集合并返回,并不去重。UNION 将查询返回的结果集合并并去重后返回。在同一个 SQL 语句中,集合操作符最多支持 100 个。
+
+## 比较运算符
+
+| # | **运算符** | **支持的类型** | **说明** |
+| --- | :---------------: | -------------------------------------------------------------------- | -------------------- |
+| 1 | = | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 相等 |
+| 2 | <\>, != | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 不相等 |
+| 3 | \>, < | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于,小于 |
+| 4 | \>=, <= | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于等于,小于等于 |
+| 5 | IS [NOT] NULL | 所有类型 | 是否为空值 |
+| 6 | [NOT] BETWEEN AND | 除 BOOL、BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 闭区间比较 |
+| 7 | IN | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 与列表内的任意值相等 |
+| 8 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配 |
+| 9 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
+| 10 | CONTAINS | JSON | JSON 中是否存在某键 |
+
+LIKE 条件使用通配符字符串进行匹配检查,规则如下:
+
+- '%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
+- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 \_,即加一个反斜线来进行转义。
+- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。
+
+MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
+
+- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
+- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。
+- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
+
+## 逻辑运算符
+
+| # | **运算符** | **支持的类型** | **说明** |
+| --- | :--------: | -------------- | --------------------------------------------------------------------------- |
+| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE, 则返回 TRUE。如果任一为 FALSE,则返回 FALSE |
+| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE, 则返回 TRUE。如果两者都是 FALSE,则返回 FALSE |
+
+TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND,第一个条件为 FALSE,则不再计算第二个条件,直接返回 FALSE;对于 OR,第一个条件为 TRUE,则不再计算第二个条件,直接返回 TRUE。
diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md
index 269bc1d2b5ddfa25c42652d8f639bfe2fb1d42e5..cb01b3a918778abc6c7891c1ff185f1db32d3d36 100644
--- a/docs-cn/12-taos-sql/index.md
+++ b/docs-cn/12-taos-sql/index.md
@@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。
-TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。
-
本章节 SQL 语法遵循如下约定:
- <\> 里的内容是用户需要输入的,但不要输入 <\> 本身
@@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
-```
\ No newline at end of file
+```
diff --git a/docs-cn/12-taos-sql/timewindow-1.webp b/docs-cn/12-taos-sql/timewindow-1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df
Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-1.webp differ
diff --git a/docs-cn/12-taos-sql/timewindow-2.webp b/docs-cn/12-taos-sql/timewindow-2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3
Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-2.webp differ
diff --git a/docs-cn/12-taos-sql/timewindow-3.webp b/docs-cn/12-taos-sql/timewindow-3.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9
Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-3.webp differ
diff --git a/docs-cn/13-operation/11-optimize.md b/docs-cn/13-operation/11-optimize.md
deleted file mode 100644
index 1ca9e8c44492a5882613a0b55d959d7abca8b5f6..0000000000000000000000000000000000000000
--- a/docs-cn/13-operation/11-optimize.md
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: 性能优化
----
-
-因数据行 [update](/train-faq/faq/#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程:
-
-```sql
-COMPACT VNODES IN (vg_id1, vg_id2, ...)
-```
-
-COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 2 时表示对应的 VGroup 处于排队等待进行重整的状态,值为 1 时表示正在进行碎片重整,为 0 时则表示并没有处于重整状态(未要求进行重整或已经完成重整)。
-
-需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
-
-## 存储参数优化
-
-不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine 提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值):
-
-| # | 配置参数名称 | 单位 | 含义 | **取值范围** | **缺省值** |
-| --- | ------------ | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------- |
-| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | 1-3650 | 10 |
-| 2 | keep | 天 | (可通过 alter database 修改)数据库中数据保留的天数。 | 1-36500 | 3650 |
-| 3 | cache | MB | 内存块的大小 | 1-128 | 16 |
-| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache \* blocks)。 | 3-10000 | 6 |
-| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 |
-| 6 | minRows | | 文件块中记录的最小条数 | 10-1000 | 100 |
-| 7 | maxRows | | 文件块中记录的最大条数 | 200-10000 | 4096 |
-| 8 | comp | | (可通过 alter database 修改)文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 |
-| 9 | walLevel | | (作为 database 的参数时名为 wal;在 taos.cfg 中作为参数时需要写作 walLevel)WAL 级别 | 1:写 WAL,但不执行 fsync;2:写 WAL, 而且执行 fsync | 1 |
-| 10 | fsync | 毫秒 | 当 wal 设置为 2 时,执行 fsync 的周期。设置为 0,表示每次写入,立即执行 fsync。 | | 3000 |
-| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 |
-| 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒,us 表示微秒,ns 表示纳秒 | ms |
-| 13 | update | | 是否允许数据更新(从 2.1.7.0 版本开始此参数支持 0 ~ 2 的取值范围,在此之前取值只能是 [0, 1];而 2.0.8.0 之前的版本在 SQL 指令中不支持此参数。) | 0:不允许;1:允许更新整行;2:允许部分列更新。 | 0 |
-| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0 ~ 3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非 NULL 值;3:同时打开缓存最近行和列功能 | 0 |
-
-对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine 允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述 SQL:
-
-```sql
- CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1;
-```
-
-该 SQL 创建了一个库 demo, 每个数据文件存储 10 天数据,内存块为 32 兆字节,每个 VNODE 占用 8 个内存块,副本数为 3,允许更新,而其他参数与系统配置完全一致。
-
-一个数据库创建成功后,仅部分参数可以修改并实时生效,其余参数不能修改:
-
-| **参数名** | **能否修改** | **范围** | **修改语法示例** |
-| ----------- | ------------ | ---------------------------------------------------------- | -------------------------------------- |
-| name | | | |
-| create time | | | |
-| ntables | | | |
-| vgroups | | | |
-| replica | **YES** | 在线 dnode 数目为: 1:1-1; 2:1-2; \>=3:1-3 | ALTER DATABASE REPLICA _n_ |
-| quorum | **YES** | 1-2 | ALTER DATABASE QUORUM _n_ |
-| days | | | |
-| keep | **YES** | days-365000 | ALTER DATABASE KEEP _n_ |
-| cache | | | |
-| blocks | **YES** | 3-1000 | ALTER DATABASE BLOCKS _n_ |
-| minrows | | | |
-| maxrows | | | |
-| wal | | | |
-| fsync | | | |
-| comp | **YES** | 0-2 | ALTER DATABASE COMP _n_ |
-| precision | | | |
-| status | | | |
-| update | | | |
-| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE CACHELAST _n_ |
-
-**说明:**在 2.1.3.0 版本之前,通过 ALTER DATABASE 语句修改这些参数后,需要重启服务器才能生效。
-
-TDengine 集群中加入一个新的 dnode 时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
-
-- numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。)
-- mnodeEqualVnodeNum: 一个 mnode 等同于 vnode 消耗的个数。默认值:4。
-- offlineThreshold: dnode 离线阈值,超过该时间将导致该 dnode 从集群中删除。单位为秒,默认值:86400\*10(即 10 天)。
-- statusInterval: dnode 向 mnode 报告状态时长。单位为秒,默认值:1。
-- maxTablesPerVnode: 每个 vnode 中能够创建的最大表个数。默认值:1000000。
-- maxVgroupsPerDb: 每个数据库中能够使用的最大 vgroup 个数。
-- arbitrator: 系统中裁决器的 endpoint,缺省为空。
-- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
-- balance:是否启用负载均衡。0:否,1:是。默认值:1。
-- flowctrl:是否启用非阻塞流控。0:否,1:是。默认值:1。
-- slaveQuery:是否启用 slave vnode 参与查询。0:否,1:是。默认值:1。
-- adjustMaster:是否启用 vnode master 负载均衡。0:否,1:是。默认值:1。
-
-为方便调试,可通过 SQL 语句临时调整每个 dnode 的日志配置,系统重启后会失效:
-
-```sql
-ALTER DNODE
-```
-
-- dnode_id: 可以通过 SQL 语句"SHOW DNODES"命令获取
-- config: 要调整的日志参数,在如下列表中取值
- > resetlog 截断旧日志文件,创建一个新日志文件
- > debugFlag < 131 | 135 | 143 > 设置 debugFlag 为 131、135 或者 143
-
-例如:
-
-```
-alter dnode 1 debugFlag 135;
-```
diff --git a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx b/docs-cn/14-reference/02-rest-api/02-rest-api.mdx
index c7680ab3e9e109dbb328711f62881283241444fb..43099319b9c5bb1420c199cfa9f7def0b2c44d3d 100644
--- a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs-cn/14-reference/02-rest-api/02-rest-api.mdx
@@ -16,7 +16,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。
-下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常。
+下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
diff --git a/docs-cn/14-reference/03-connector/03-connector.mdx b/docs-cn/14-reference/03-connector/03-connector.mdx
index c0e714f148a7821e070be38a5484484fdd747e9a..7a4a85276ef4bb4ab829250fcf67076962dbb871 100644
--- a/docs-cn/14-reference/03-connector/03-connector.mdx
+++ b/docs-cn/14-reference/03-connector/03-connector.mdx
@@ -4,7 +4,7 @@ title: 连接器
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
-
+
## 支持的平台
diff --git a/docs-cn/14-reference/03-connector/connector.webp b/docs-cn/14-reference/03-connector/connector.webp
new file mode 100644
index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be
Binary files /dev/null and b/docs-cn/14-reference/03-connector/connector.webp differ
diff --git a/docs-cn/14-reference/03-connector/cpp.mdx b/docs-cn/14-reference/03-connector/cpp.mdx
index aba1d6c717dfec9228f38e89f90cbf1be0021045..aecf9fde12dfae8026d5f838d6467340a891f372 100644
--- a/docs-cn/14-reference/03-connector/cpp.mdx
+++ b/docs-cn/14-reference/03-connector/cpp.mdx
@@ -114,7 +114,6 @@ TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#
订阅和消费
```c
-{{#include examples/c/subscribe.c}}
```
diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx
index 55abf84fd50fe1c4b5b6a07b28731a00d4534a05..267757160634b28ab198ae0fd759188cf4ccc5cc 100644
--- a/docs-cn/14-reference/03-connector/java.mdx
+++ b/docs-cn/14-reference/03-connector/java.mdx
@@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem';
`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。
-
+
上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式:
@@ -208,10 +208,10 @@ url 中的配置参数如下:
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
```sql
-INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
+INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
-- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6);
+- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
@@ -563,7 +563,7 @@ public class ParameterBindingDemo {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
- pstmt.setTagNString(0, "北京-abc");
+ pstmt.setTagNString(0, "California.SanFrancisco");
// set columns
ArrayList tsList = new ArrayList<>();
@@ -574,7 +574,7 @@ public class ParameterBindingDemo {
ArrayList f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
- f1List.add("北京-abc");
+ f1List.add("California.LosAngeles");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
@@ -633,7 +633,7 @@ public class SchemalessInsertTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
- private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
+ private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx
index 12345fa9fe995c41828df07703f0efb61a2e029d..9f2bed9e97cb33aeabfce3d69dc3774931b426c0 100644
--- a/docs-cn/14-reference/03-connector/node.mdx
+++ b/docs-cn/14-reference/03-connector/node.mdx
@@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
-import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx";
`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。
@@ -189,14 +188,8 @@ let cursor = conn.cursor();
### 查询数据
-#### 同步查询
-
-#### 异步查询
-
-
-
## 更多示例程序
| 示例程序 | 示例程序描述 |
diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png
deleted file mode 100644
index 1cb8401ea30b01d8db652ed4ea70ecc511de7461..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ
diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp
new file mode 100644
index 0000000000000000000000000000000000000000..0956d6005ffc5e90727d49d7566158affdda09c2
Binary files /dev/null and b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp differ
diff --git a/docs-cn/14-reference/04-taosadapter.md b/docs-cn/14-reference/04-taosadapter.md
index 90a31ec94c94559311e2c91cd34f75af7e87e9a0..6e259391d40acfd48d8db8db3246ad2196ce0520 100644
--- a/docs-cn/14-reference/04-taosadapter.md
+++ b/docs-cn/14-reference/04-taosadapter.md
@@ -24,7 +24,7 @@ taosAdapter 提供以下功能:
## taosAdapter 架构图
-
+
## taosAdapter 部署方法
diff --git a/docs-cn/14-reference/06-taosdump.md b/docs-cn/14-reference/06-taosdump.md
index 7131493ec9439225d8047288ed86026c887f0aac..3a9f2e9acd215be102991a1d91fba285ef6315bb 100644
--- a/docs-cn/14-reference/06-taosdump.md
+++ b/docs-cn/14-reference/06-taosdump.md
@@ -38,7 +38,7 @@ taosdump 有两种安装方式:
:::tip
- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。
-- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数挑战为更小的值进行尝试。
+- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。
:::
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png
deleted file mode 100644
index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png
deleted file mode 100644
index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png
deleted file mode 100644
index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png
deleted file mode 100644
index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp
new file mode 100644
index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png
deleted file mode 100644
index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp
new file mode 100644
index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png
deleted file mode 100644
index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp
new file mode 100644
index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png
deleted file mode 100644
index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp
new file mode 100644
index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png
deleted file mode 100644
index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png
deleted file mode 100644
index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png
deleted file mode 100644
index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png
deleted file mode 100644
index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp
new file mode 100644
index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png
deleted file mode 100644
index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp
new file mode 100644
index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png
deleted file mode 100644
index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp
new file mode 100644
index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png
deleted file mode 100644
index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp
new file mode 100644
index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png
deleted file mode 100644
index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp
new file mode 100644
index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png
deleted file mode 100644
index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png
deleted file mode 100644
index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp
new file mode 100644
index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png
deleted file mode 100644
index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png
deleted file mode 100644
index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png
deleted file mode 100644
index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp
new file mode 100644
index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png
deleted file mode 100644
index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png
deleted file mode 100644
index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png
deleted file mode 100644
index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png
deleted file mode 100644
index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp
new file mode 100644
index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png
deleted file mode 100644
index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png
deleted file mode 100644
index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ
diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b
Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ
diff --git a/docs-cn/14-reference/07-tdinsight/index.md b/docs-cn/14-reference/07-tdinsight/index.md
index a554d7ee6b36797940282fa8401df2f22c4cf579..5990a831b8bc1788deaddfb38f717f2723969362 100644
--- a/docs-cn/14-reference/07-tdinsight/index.md
+++ b/docs-cn/14-reference/07-tdinsight/index.md
@@ -233,33 +233,33 @@ sudo systemctl enable grafana-server
指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。
-
+
搜索并选择**TDengine**。
-
+
配置 TDengine 数据源。
-
+
保存并测试,正常情况下会报告 'TDengine Data source is working'。
-
+
### 导入仪表盘
指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。
-
+
在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。
-
+
导入完成后,TDinsight 的完整页面视图如下所示。
-
+
## TDinsight 仪表盘详细信息
@@ -269,7 +269,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### 集群状态
-
+
这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。
@@ -289,7 +289,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### DNodes 状态
-
+
- **DNodes Status**:`show dnodes` 的简单表格视图。
- **DNodes Lifetime**:从创建 dnode 开始经过的时间。
@@ -298,14 +298,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### MNode 概述
-
+
1. **MNodes Status**:`show mnodes` 的简单表格视图。
2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。
### 请求
-
+
1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。
2. **Requests (Selects)**:查询请求数及变化率(count of second)。
@@ -313,7 +313,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### 数据库
-
+
数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。
@@ -325,7 +325,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### DNode 资源使用情况
-
+
数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括:
@@ -346,13 +346,13 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
### 登录历史
-
+
目前只报告每分钟登录次数。
### 监控 taosAdapter
-
+
支持监控 taosAdapter 请求统计和状态详情。包括:
diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md
index cbb3833b5bb170720c2aa7bea6687a50feeae7d5..89c414a5b8479d8253b2a1fa1e3ab3b684f75e78 100644
--- a/docs-cn/14-reference/12-config/index.md
+++ b/docs-cn/14-reference/12-config/index.md
@@ -80,7 +80,7 @@ taos --dump-config
| 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 |
:::note
-对于端口,TDengine 会使用从 serverPort 起 13 个连续的 TCP 和 UDP 端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从 6030 到 6042 共 13 个端口,而且必须 TCP 和 UDP 都打开。(详细的端口情况请参见下表)
+确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表)
:::
| 协议 | 默认端口 | 用途说明 | 修改方法 |
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
@@ -590,7 +590,7 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅服务端适用 |
| 含义 | 每个 DB 中 能够使用的最大 vnode 个数 |
| 取值范围 | 0-8192 |
-| 缺省值 | |
+| 缺省值 | 0 |
### maxTablesPerVnode
diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md
index 4de310c248d7763690acef80cdca1c50f609d63b..f2712f2814593bddd65401cb129c8c58ee55a316 100644
--- a/docs-cn/14-reference/13-schemaless/13-schemaless.md
+++ b/docs-cn/14-reference/13-schemaless/13-schemaless.md
@@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs-cn/14-reference/taosAdapter-architecture.png b/docs-cn/14-reference/taosAdapter-architecture.png
deleted file mode 100644
index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000
Binary files a/docs-cn/14-reference/taosAdapter-architecture.png and /dev/null differ
diff --git a/docs-cn/14-reference/taosAdapter-architecture.webp b/docs-cn/14-reference/taosAdapter-architecture.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570
Binary files /dev/null and b/docs-cn/14-reference/taosAdapter-architecture.webp differ
diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx
index 9a4c33d8aceb086ff8ba8dca0f38b1bcbf762005..40b5c0ff4f2de8ff9eeb3afa61728ca7a899f5ea 100644
--- a/docs-cn/20-third-party/01-grafana.mdx
+++ b/docs-cn/20-third-party/01-grafana.mdx
@@ -18,21 +18,22 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
## 配置 Grafana
-TDengine 的 Grafana 插件托管在 GitHub,可从 下载,当前最新版本为 3.1.4。
-
-推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。
+使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。
```bash
-sudo -u grafana grafana-cli \
- --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.7/tdengine-datasource-3.1.7.zip \
- plugins install tdengine-datasource
+grafana-cli plugins install tdengine-datasource
+# with sudo
+sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-或者下载到本地并解压到 Grafana 插件目录。
+或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下:
```bash
-GF_VERSION=3.1.7
+GF_VERSION=3.2.2
+# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+# from Grafana
+wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download
```
以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
@@ -41,52 +42,41 @@ wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/td
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
-:::note
-3.1.6 和更早版本未签名,会在 Grafana 7.3+ / 8.x 版本签名检查时失败导致无法加载插件,需要在 grafana.ini 文件中修改配置如下:
-
-```ini
-[plugins]
-allow_loading_unsigned_plugins = tdengine-datasource
-```
-
-:::
-
-在 Docker 环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件:
+如果 Grafana 在 Docker 环境下运行,可以使用如下的环境变量设置自动安装 TDengine 数据源插件:
```bash
-GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine-datasource
-GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
+GF_INSTALL_PLUGINS=tdengine-datasource
```
## 使用 Grafana
### 配置数据源
-用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
+用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
-
+
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
-
+
进入数据源配置页面,按照默认提示修改相应配置即可:
-
+
-- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。
+- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 。
- User:TDengine 用户名。
- Password:TDengine 用户密码。
点击 `Save & Test` 进行测试,成功会有如下提示:
-
+
### 创建 Dashboard
回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面:
-
+
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
@@ -96,7 +86,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
-
+
> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。
diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md
index f57ccb20e6517c51b55093d11fa767bef7d0c9a8..833fa97e2e5f9f138718e18bb16aa3e65abca8cc 100644
--- a/docs-cn/20-third-party/09-emq-broker.md
+++ b/docs-cn/20-third-party/09-emq-broker.md
@@ -45,25 +45,25 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`
-
+
### 创建规则(Rule)
选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮:
-
+
### 编辑 SQL 字段
-
+
### 新增“动作(action handler)”
-
+
### 新增“资源(Resource)”
-
+
选择“发送数据到 Web 服务“并点击“新建资源”按钮:
@@ -71,13 +71,13 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。
-
+
### 编辑“动作(action)”
编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。
-
+
## 编写模拟测试程序
@@ -164,7 +164,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。
-
+
## 执行测试模拟发送 MQTT 数据
@@ -173,19 +173,19 @@ npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org
node mock.js
```
-
+
## 验证 EMQX 接收到数据
在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到:
-
+
## 验证数据写入到 TDengine
使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中:
-
+
TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。
EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。
diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md
index d12d5fab75671d8a1e7356e766d0e8979c6519c2..8369806adcfe1b195348e7d60160609cde9150e8 100644
--- a/docs-cn/20-third-party/11-kafka.md
+++ b/docs-cn/20-third-party/11-kafka.md
@@ -7,17 +7,17 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng
## 什么是 Kafka Connect?
-Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
+Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
-
+
TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。
-
+
## 什么是 Confluent?
-Confluent 在 Kafka 的基础上增加很多扩展功能。包括:
+[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
1. Schema Registry
2. REST 代理
@@ -26,7 +26,7 @@ Confluent 在 Kafka 的基础上增加很多扩展功能。包括:
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
-
+
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
@@ -81,10 +81,10 @@ Development: false
git clone https://github.com:taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
mvn clean package
-unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip
+unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。安装插件的路径在配置文件 `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties` 中。默认的路径为 `$CONFLUENT_HOME/share/confluent-hub-components/`。
+以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
### 用 confluent-hub 安装
@@ -98,7 +98,7 @@ confluent local services start
```
:::note
-一定要先安装插件再启动 Confluent, 否则会出现找不到类的错误。Kafka Connect 的日志(默认路径: /tmp/confluent.xxxx/connect/logs/connect.log)中会输出成功安装的插件,据此可判断插件是否安装成功。
+一定要先安装插件再启动 Confluent, 否则加载插件会失败。
:::
:::tip
@@ -125,6 +125,61 @@ Control Center is [UP]
清空数据可执行 `rm -rf /tmp/confluent.106668`。
:::
+### 验证各个组件是否启动成功
+
+输入命令:
+
+```
+confluent local services status
+```
+
+如果各组件都启动成功,会得到如下输出:
+
+```
+Connect is [UP]
+Control Center is [UP]
+Kafka is [UP]
+Kafka REST is [UP]
+ksqlDB Server is [UP]
+Schema Registry is [UP]
+ZooKeeper is [UP]
+```
+
+### 验证插件是否安装成功
+
+在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
+
+```
+confluent local services connect plugin list
+```
+
+如果成功安装,会输出如下:
+
+```txt {4,9}
+Available Connect Plugins:
+[
+ {
+ "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "type": "sink",
+ "version": "1.0.0"
+ },
+ {
+ "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "type": "source",
+ "version": "1.0.0"
+ },
+......
+```
+
+如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
+```
+echo `cat /tmp/confluent.current`/connect/connect.stdout
+```
+该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
+
+与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
+
+
## TDengine Sink Connector 的使用
TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。
@@ -144,7 +199,7 @@ vi sink-demo.properties
sink-demo.properties 内容如下:
```ini title="sink-demo.properties"
-name=tdengine-sink-demo
+name=TDengineSinkConnector
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
tasks.max=1
topics=meters
@@ -153,6 +208,7 @@ connection.user=root
connection.password=taosdata
connection.database=power
db.schemaless=line
+data.precision=ns
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
@@ -179,6 +235,7 @@ confluent local services connect connector load TDengineSinkConnector --config .
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
"connection.user": "root",
"connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "data.precision": "ns",
"db.schemaless": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"tasks.max": "1",
@@ -196,10 +253,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
准备测试数据的文本文件,内容如下:
```txt title="test-data.txt"
-meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
-meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
-meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
-meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
+meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
+meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
+meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
+meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
```
使用 kafka-console-producer 向主题 meters 添加测试数据。
@@ -223,10 +280,10 @@ Database changed.
taos> select * from meters;
ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
- 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian |
- 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian |
- 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian |
- 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian |
+ 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
+ 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
+ 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
+ 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
Query OK, 4 row(s) in set (0.004208s)
```
@@ -275,7 +332,7 @@ DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
-INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
+INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
```
使用 TDengine CLI, 执行 SQL 文件。
@@ -302,8 +359,8 @@ kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topi
```
......
-meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
-meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
+meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
+meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
......
```
@@ -356,21 +413,33 @@ confluent local services connect connector unload TDengineSourceConnector
2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。
3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。
4. `max.retries`: 发生错误时的最大重试次数。默认为 1。
-5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认 3000。
-6. `db.schemaless`: 数据格式,必须指定为: line、json、telnet 中的一个。分别代表 InfluxDB 行协议格式、 OpenTSDB JSON 格式、 OpenTSDB Telnet 行协议格式。
+5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。
+6. `db.schemaless`: 数据格式,可选值为:
+ 1. line :代表 InfluxDB 行协议格式
+ 2. json : 代表 OpenTSDB JSON 格式
+ 3. telnet :代表 OpenTSDB Telnet 行协议格式
+7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为:
+ 1. ms : 表示毫秒
+ 2. us : 表示微秒
+ 3. ns : 表示纳秒。默认为纳秒。
### TDengine Source Connector 特有的配置
1. `connection.database`: 源数据库名称,无缺省值。
2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
-3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。
-4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。
+3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。
+4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
-6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认 line。
+6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
+
+## 其他说明
+
+1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
+2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
## 问题反馈
-https://github.com/taosdata/kafka-connect-tdengine/issues
+无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
## 参考
diff --git a/docs-cn/20-third-party/add_datasource1.webp b/docs-cn/20-third-party/add_datasource1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6
Binary files /dev/null and b/docs-cn/20-third-party/add_datasource1.webp differ
diff --git a/docs-cn/20-third-party/add_datasource2.webp b/docs-cn/20-third-party/add_datasource2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1
Binary files /dev/null and b/docs-cn/20-third-party/add_datasource2.webp differ
diff --git a/docs-cn/20-third-party/add_datasource3.webp b/docs-cn/20-third-party/add_datasource3.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f
Binary files /dev/null and b/docs-cn/20-third-party/add_datasource3.webp differ
diff --git a/docs-cn/20-third-party/add_datasource4.webp b/docs-cn/20-third-party/add_datasource4.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e
Binary files /dev/null and b/docs-cn/20-third-party/add_datasource4.webp differ
diff --git a/docs-cn/20-third-party/create_dashboard1.webp b/docs-cn/20-third-party/create_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d
Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard1.webp differ
diff --git a/docs-cn/20-third-party/create_dashboard2.webp b/docs-cn/20-third-party/create_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b
Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard2.webp differ
diff --git a/docs-cn/20-third-party/dashboard-15146.webp b/docs-cn/20-third-party/dashboard-15146.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fae586f5c74317621002416b2824830a7bdf3982
Binary files /dev/null and b/docs-cn/20-third-party/dashboard-15146.webp differ
diff --git a/docs-cn/20-third-party/emqx/add-action-handler.png b/docs-cn/20-third-party/emqx/add-action-handler.png
deleted file mode 100644
index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/add-action-handler.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/add-action-handler.webp b/docs-cn/20-third-party/emqx/add-action-handler.webp
new file mode 100644
index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a
Binary files /dev/null and b/docs-cn/20-third-party/emqx/add-action-handler.webp differ
diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.png b/docs-cn/20-third-party/emqx/check-result-in-taos.png
deleted file mode 100644
index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/check-result-in-taos.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.webp b/docs-cn/20-third-party/emqx/check-result-in-taos.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d
Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-result-in-taos.webp differ
diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.png b/docs-cn/20-third-party/emqx/check-rule-matched.png
deleted file mode 100644
index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/check-rule-matched.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.webp b/docs-cn/20-third-party/emqx/check-rule-matched.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513
Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-rule-matched.webp differ
diff --git a/docs-cn/20-third-party/emqx/client-num.png b/docs-cn/20-third-party/emqx/client-num.png
deleted file mode 100644
index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/client-num.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/client-num.webp b/docs-cn/20-third-party/emqx/client-num.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda
Binary files /dev/null and b/docs-cn/20-third-party/emqx/client-num.webp differ
diff --git a/docs-cn/20-third-party/emqx/create-resource.png b/docs-cn/20-third-party/emqx/create-resource.png
deleted file mode 100644
index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/create-resource.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/create-resource.webp b/docs-cn/20-third-party/emqx/create-resource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb
Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-resource.webp differ
diff --git a/docs-cn/20-third-party/emqx/create-rule.png b/docs-cn/20-third-party/emqx/create-rule.png
deleted file mode 100644
index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/create-rule.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/create-rule.webp b/docs-cn/20-third-party/emqx/create-rule.webp
new file mode 100644
index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203
Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-rule.webp differ
diff --git a/docs-cn/20-third-party/emqx/edit-action.png b/docs-cn/20-third-party/emqx/edit-action.png
deleted file mode 100644
index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/edit-action.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/edit-action.webp b/docs-cn/20-third-party/emqx/edit-action.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0
Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-action.webp differ
diff --git a/docs-cn/20-third-party/emqx/edit-resource.png b/docs-cn/20-third-party/emqx/edit-resource.png
deleted file mode 100644
index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/edit-resource.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/edit-resource.webp b/docs-cn/20-third-party/emqx/edit-resource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98
Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-resource.webp differ
diff --git a/docs-cn/20-third-party/emqx/login-dashboard.png b/docs-cn/20-third-party/emqx/login-dashboard.png
deleted file mode 100644
index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/login-dashboard.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/login-dashboard.webp b/docs-cn/20-third-party/emqx/login-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30
Binary files /dev/null and b/docs-cn/20-third-party/emqx/login-dashboard.webp differ
diff --git a/docs-cn/20-third-party/emqx/rule-engine.png b/docs-cn/20-third-party/emqx/rule-engine.png
deleted file mode 100644
index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/rule-engine.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/rule-engine.webp b/docs-cn/20-third-party/emqx/rule-engine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0
Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-engine.webp differ
diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.png b/docs-cn/20-third-party/emqx/rule-header-key-value.png
deleted file mode 100644
index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/rule-header-key-value.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.webp b/docs-cn/20-third-party/emqx/rule-header-key-value.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d
Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-header-key-value.webp differ
diff --git a/docs-cn/20-third-party/emqx/run-mock.png b/docs-cn/20-third-party/emqx/run-mock.png
deleted file mode 100644
index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/emqx/run-mock.png and /dev/null differ
diff --git a/docs-cn/20-third-party/emqx/run-mock.webp b/docs-cn/20-third-party/emqx/run-mock.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037
Binary files /dev/null and b/docs-cn/20-third-party/emqx/run-mock.webp differ
diff --git a/docs-cn/20-third-party/import_dashboard1.webp b/docs-cn/20-third-party/import_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d
Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard1.webp differ
diff --git a/docs-cn/20-third-party/import_dashboard2.webp b/docs-cn/20-third-party/import_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c
Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard2.webp differ
diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.png b/docs-cn/20-third-party/kafka/Kafka_Connect.png
deleted file mode 100644
index f3dc02ea2a743c6e1ae5531e14f820e3adeca29a..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/kafka/Kafka_Connect.png and /dev/null differ
diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.webp b/docs-cn/20-third-party/kafka/Kafka_Connect.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171
Binary files /dev/null and b/docs-cn/20-third-party/kafka/Kafka_Connect.webp differ
diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.png b/docs-cn/20-third-party/kafka/confluentPlatform.png
deleted file mode 100644
index f8e69f2c7f64d809996b2d1bf1370b67b8030850..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/kafka/confluentPlatform.png and /dev/null differ
diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.webp b/docs-cn/20-third-party/kafka/confluentPlatform.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847
Binary files /dev/null and b/docs-cn/20-third-party/kafka/confluentPlatform.webp differ
diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png
deleted file mode 100644
index 26d8a866d706180c900d69bb6f57ca2dff0047dd..0000000000000000000000000000000000000000
Binary files a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png and /dev/null differ
diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp
new file mode 100644
index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c
Binary files /dev/null and b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ
diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md
index 6f479efc1ad13e27899e7819d194a2df59ed3ad1..433cb4808b60ce73c639a23beef45fb8e1afb7dd 100644
--- a/docs-cn/21-tdinternal/01-arch.md
+++ b/docs-cn/21-tdinternal/01-arch.md
@@ -11,7 +11,7 @@ TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何
TDengine 分布式架构的逻辑结构图如下:
-
+
图 1 TDengine架构示意图
@@ -41,7 +41,7 @@ TDengine 分布式架构的逻辑结构图如下:
- 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。
- 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。
-因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的 serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port)
+因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port)
**集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。
@@ -63,7 +63,7 @@ TDengine 分布式架构的逻辑结构图如下:
为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。
-
+
图 2 TDengine 典型的操作流程
@@ -135,7 +135,7 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区
Master Vnode 遵循下面的写入流程:
-
+
图 3 TDengine Master 写入流程
@@ -150,7 +150,7 @@ Master Vnode 遵循下面的写入流程:
对于 slave vnode,写入流程是:
-
+
图 4 TDengine Slave 写入流程
@@ -284,7 +284,7 @@ SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14
TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
-
+
图 5 多表聚合查询原理图
diff --git a/docs-cn/21-tdinternal/02-replica.md b/docs-cn/21-tdinternal/02-replica.md
deleted file mode 100644
index 6a384b982d22956dd514d8df05dc827ca6f8b729..0000000000000000000000000000000000000000
--- a/docs-cn/21-tdinternal/02-replica.md
+++ /dev/null
@@ -1,256 +0,0 @@
----
-sidebar_label: 数据复制模块设计
-title: 数据复制模块设计
----
-
-## 数据复制概述
-
-数据复制(Replication)是指同一份数据在多个物理地点保存。它的目的是防止数据丢失,提高系统的高可用性(High Availability),而且通过应用访问多个副本,提升数据查询性能。
-
-在高可靠的大数据系统里,数据复制是必不可少的一大功能。数据复制又分为实时复制与非实时复制。实时复制是指任何数据的更新(包括数据的增加、删除、修改)操作,会被实时的复制到所有副本,这样任何一台机器宕机或网络出现故障,整个系统还能提供最新的数据,保证系统的正常工作。而非实时复制,是指传统的数据备份操作,按照固定的时间周期,将一份数据全量或增量复制到其他地方。如果主节点宕机,副本是很大可能没有最新数据,因此在有些场景是无法满足要求的。
-
-TDengine面向的是物联网场景,需要支持数据的实时复制,来最大程度保证系统的可靠性。实时复制有两种方式,一种是异步复制,一种是同步复制。异步复制(Asynchronous Replication)是指数据由Master转发给Slave后,Master并不需要等待Slave回复确认,这种方式效率高,但有极小的概率会丢失数据。同步复制是指Master将数据转发给Slave后,需要等待Slave的回复确认,才会通知应用写入成功,这种方式效率偏低,但能保证数据绝不丢失。
-
-数据复制是与数据存储(写入、读取)密切相关的,但两者又是相对独立,可以完全脱耦的。在TDengine系统中,有两种不同类型的数据,一种是时序数据,由TSDB模块负责;一种是元数据(Meta Data), 由MNODE负责。这两种性质不同的数据都需要同步功能。数据复制模块通过不同的实例启动配置参数,为这两种类型数据都提供同步功能。
-
-在阅读本文之前,请先阅读《[TDengine 2.0 整体架构](/tdinternal/arch/)》,了解TDengine的集群设计和基本概念
-
-特别注明:本文中提到数据更新操作包括数据的增加、删除与修改。
-
-## 基本概念和定义
-
-TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgroup也指mnode group, 除非特别注明。
-
-**版本(version)**:
-
-一个虚拟节点组里多个虚拟节点互为备份,来保证数据的有效与可靠,是依靠虚拟节点组的数据版本号来维持的。TDengine2.0设计里,对于版本的定义如下:客户端发起增加、删除、修改的流程,无论是一条记录还是多条,只要是在一个请求里,这个数据更新请求被TDengine的一个虚拟节点收到后,经过合法性检查后,可以被写入系统时,就会被分配一个版本号。这个版本号在一个虚拟节点里从1开始,是单调连续递增的。无论这条记录是采集的时序数据还是meta data, 一样处理。当Master转发一个写入请求到slave时,必须带上版本号。一个虚拟节点将一数据更新请求写入WAL时,需要带上版本号。
-
-不同虚拟节点组的数据版本号是完全独立的,互不相干的。版本号本质上是数据更新记录的transaction ID,但用来标识数据集的版本。
-
-**角色(role):**
-
-一个虚拟节点可以是master, slave, unsynced或offline状态。
-
-- master: 具有最新的数据,容许客户端往里写入数据,一个虚拟节点组,至多一个master.
-- slave:与master是同步的,但不容许客户端往里写入数据,根据配置,可以容许客户端对其进行查询。
-- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务。
-- offline: 由于宕机或网络原因,无法访问到某虚拟节点时,其他虚拟节点将该虚拟节点标为离线。但请注意,该虚拟节点本身的状态可能是unsynced或其他,但不会是离线。
-
-**Quorum:**
-
-指数据写入成功所需要的确认数。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replication(副本数)。这个参数在启动一个同步模块实例时需要提供。
-
-**WAL:**
-
-TDengine的WAL(Write Ahead Log)与cassandra的commit log, mySQL的bin log, Postgres的WAL没本质区别。没有写入数据库文件,还保存在内存的数据都会先存在WAL。当数据已经成功写入数据库数据文件,相应的WAL会被删除。但需要特别指明的是,在TDengine系统里,有几点:
-
-- 每个虚拟节点有自己独立的wal
-- WAL里包含而且仅仅包含来自客户端的数据更新操作,每个更新操作都会被打上一个版本号
-
-**复制实例:**
-
-复制模块只是一可执行的代码,复制实例是指正在运行的复制模块的一个实例,一个节点里,可以存在多个实例。原则上,一个节点有多少虚拟节点,就可以启动多少实例。对于副本数为1的场景,应用可以决定是否需要启动同步实例。应用启动一个同步模块的实例时,需要提供的就是虚拟节点组的配置信息,包括:
-
-- 虚拟节点个数,即replication number
-- 各虚拟节点所在节点的信息,包括node的end point
-- quorum, 需要的数据写入成功的确认数
-- 虚拟节点的初始版本号
-
-## 数据复制模块的基本工作原理
-
-TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性算法比较一致。总结下来,有几点:
-
-1. 一个vgroup里有一到多个虚拟节点,每个虚拟节点都有自己的角色
-2. 客户端只能向角色是master的虚拟节点发起数据更新操作,因为master具有最新版本的数据,如果向非Master发起数据更新操作,会直接收到错误
-3. 客户端可以向master, 也可以向角色是Slave的虚拟节点发起查询操作,但不能对unsynced的虚拟节点发起任何操作
-4. 如果master不存在,这个vgroup是不能对外提供数据更新和查询服务的
-5. master收到客户端的数据更新操作时,会将其转发给slave节点
-6. 一个虚拟节点的版本号比master低的时候,会发起数据恢复流程,成功后,才会成为slave
-
-数据实时复制有三个主要流程:选主、数据转发、数据恢复。后续做详细讨论。
-
-## 虚拟节点之间的网络连接
-
-虚拟节点之间通过TCP进行连接,节点之间的状态交换、数据包的转发都是通过这个TCP连接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP连接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP连接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该连接。
-
-一旦作为客户端的节点连接不成或中断,它将周期性的每隔一秒钟去试图去连接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。
-
-如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP连接(syncFd)。数据恢复完成后,该连接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。
-
-任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的连接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将连接请求关闭。在TDengine代码里,mnode group的vgId设置为1。
-
-## 选主流程
-
-当同一组的两个虚拟节点之间(vnode A, vnode B)建立连接后,他们互换status消息。status消息里包含本地存储的同一虚拟节点组内所有虚拟节点的role和version。
-
-如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
-
-1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced.
-2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程。
-3. 如果master不存在,则会检查自己保存的各虚拟节点的状态信息与从另一节点接收到的是否一致,如果一致,说明节点组里状态已经稳定一致,则会触发选举流程。如果不一致,说明状态还没趋于一致,即使master不存在,也不进行选主。由于要求状态信息一致才进行选举,每个虚拟节点根据同样的信息,会选出同一个虚拟节点做master,无需投票表决。
-4. 自己的状态是根据规则自己决定并修改的,并不需要其他节点同意,包括成为master。一个节点无权修改其他节点的状态。
-5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version)。
-
-具体的流程图如下:
-
-
-
-选择Master的具体规则如下:
-
-1. 如果只有一个副本,该副本永远就是master
-2. 所有副本都在线时,版本最高的被选为master
-3. 在线的虚拟节点数过半,而且有虚拟节点是slave的话,该虚拟节点自动成为master
-4. 对于2和3,如果多个虚拟节点满足成为master的要求,那么虚拟节点组的节点列表里,最前面的选为master
-
-按照上面的规则,如果所有虚拟节点都是unsynced(比如全部重启),只有所有虚拟节点上线,才能选出master,该虚拟节点组才能开始对外提供服务。当一个虚拟节点的role发生改变时,sync模块回通过回调函数notifyRole通知应用。
-
-## 数据转发流程
-
-如果vnode A是master, vnode B是slave, vnode A能接受客户端的写请求,而vnode B不能。当vnode A收到写的请求后,遵循下面的流程:
-
-
-
-1. 应用对写请求做基本的合法性检查,通过,则给该请求包打上一个版本号(version, 单调递增)
-2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
-3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
-4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
-5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。
-6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
-7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。
-8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。
-9. 如果quorum为1,上述6,7,8步不会发生。
-10. 如果要等待slave的确认,master会启动2秒的定时器(可配置),如果超时,则认为失败。
-
-对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP连接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。
-
-## 数据恢复流程
-
-如果一虚拟节点(vnode B) 处于unsynced状态,master存在(vnode A),而且其版本号比master的低,它将立即启动数据恢复流程。在理解恢复流程时,需要澄清几个关于文件的概念和处理规则。
-
-1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mode里的文件,数量是固定的,对应于acct, user, db, table等文件。
-2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。
-3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。
-4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则
- * index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。
- * 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用
- * 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用
- * 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。
-5. 当sync模块调用回调函数getWalInfo获得wal信息时,有如下规则
- * index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字
- * 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件
- * 返回的文件名为空,那表示没有WAL文件
-6. 无论是getFileInfo, 还是getWalInfo, 只要获取出错(不是文件不存在),返回-1即可,系统会报错,停止同步
-
-整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下:
-
-
-
-1. 通过已经建立的TCP连接,发送sync req给master节点
-2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd)
-3. 新的TCP连接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程
-4. Retrieve/Restore流程里,先处理所有archived data (vnode里的data, head, last文件),后处理WAL data。
-5. 对于archived data,master将通过回调函数getFileInfo获取数据文件的基本信息,包括文件名、magic以及文件大小。
-6. master 将获得的文件名、magic以及文件大小发给vnode B
-7. vnode B将回调函数getFile获得magic和文件大小,如果两者一致,就认为无需同步,如果两者不一致 ,就认为需要同步。vnode B将结果通过消息FileAck发回master
-8. 如果文件需要同步,master就调用sendfile把整个文件发往vnode B
-9. 如果文件不需要同步,master(vnode A)就重复5,6,7,8,直到所有文件被处理完
-
-对于WAL同步,流程如下:
-
-1. master节点调用回调函数getWalInfo,获取WAL的文件名。
-2. 如果getWalInfo返回值大于0,表示该文件还不是最后一个WAL,因此master调用sendfile一下把该文件发送给vnode B
-3. 如果getWalInfo返回时为0,表示该文件是最后一个WAL,因为文件可能还处于写的状态中,sync模块要根据WAL Head的定义逐条读出记录,然后发往vnode B。
-4. vnode A读取TCP连接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。
-5. 上述流程循环,直到所有WAL文件都被处理完。处理完后,master就会将新来的数据包通过Forward消息转发给slave。
-
-从同步文件启动起,sync模块会通过inotify监控所有处理过的file以及wal。一旦发现被处理过的文件有更新变化,同步流程将中止,会重新启动。因为有可能落盘操作正在进行(比如历史数据导入,内存数据落盘),把已经处理过的文件进行了修改,需要重新同步才行。
-
-对于最后一个WAL (LastWal)的处理逻辑有点复杂,因为这个文件往往是打开写的状态,有很多场景需要考虑,比如:
-
-- LastWal文件size在增长,需要重新读;
-- LastWal文件虽然已经打开写,但内容为空;
-- LastWal文件已经被关闭,应用生成了新的Last WAL文件;
-- LastWal文件没有被关闭,但数据落盘的原因,没有读到完整的一条记录;
-- LastWal文件没有被关闭,但数据落盘的原因,还有部分记录暂时读取不到;
-
-sync模块通过inotify监控LastWal文件的更新和关闭操作。而且在确认已经尽可能读完LastWal的数据后,会将对方同步状态设置为SYNC_CACHE。该状态下,master节点会将新的记录转发给vnode B,而此时vnode B并没有完成同步,需要把这些转发包先存在recv buffer里,等WAL处理完后,vnode A再把recv buffer里的数据包通过回调writeToCache交给应用处理。
-
-等vnode B把这些buffered forwards处理完,同步流程才算结束,vnode B正式变为slave。
-
-## Master分布均匀性问题
-
-因为Master负责写、转发,消耗的资源会更多,因此Master在整个集群里分布均匀比较理想。
-
-但在TDengine的设计里,如果多个虚拟节点都符合master条件,TDengine选在列表中最前面的做Master, 这样是否导致在集群里,Master数量的分布不均匀问题呢?这取决于应用的设计。
-
-给一个具体例子,系统里仅仅有三个节点,IP地址分别为IP1, IP2, IP3. 在各个节点上,TDengine创建了多个虚拟节点组,每个虚拟节点组都有三个副本。如果三个副本的顺序在所有虚拟节点组里都是IP1, IP2, IP3, 那毫无疑问,master将集中在IP1这个节点,这是我们不想看到的。
-
-但是,如果在创建虚拟节点组时,增加随机性,这个问题就不存在了。比如在vgroup 1, 顺序是IP1, IP2, IP3, 在vgroup 2里,顺序是IP2, IP3, IP1, 在vgroup 3里,顺序是IP3, IP1, IP2。最后master的分布会是均匀的。
-
-因此在创建一个虚拟节点组时,应用需要保证节点的顺序是round robin或完全随机。
-
-## 少数虚拟节点写入成功的问题
-
-在某种情况下,写入成功的确认数大于0,但小于配置的Quorum, 虽然有虚拟节点数据更新成功,master仍然会认为数据更新失败,并通知客户端写入失败。
-
-这个时候,系统存在数据不一致的问题,因为有的虚拟节点已经写入成功,而有的写入失败。一个处理方式是,Master重置(reset)与其他虚拟节点的连接,该虚拟节点组将自动进入选举流程。按照规则,已经成功写入数据的虚拟节点将成为新的master,组内的其他虚拟节点将从master那里恢复数据。
-
-因为写入失败,客户端会重新写入数据。但对于TDengine而言,是OK的。因为时序数据都是有时间戳的,时间戳相同的数据更新操作,第一次会执行,但第二次会自动扔掉。对于Meta Data(增加、删除库、表等等)的操作,也是OK的。一张表、库已经被创建或删除,再创建或删除,不会被执行的。
-
-在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP连接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个连接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。
-
-## Split Brain的问题
-
-选举流程中,有个强制要求,那就是一定有超过半数的虚拟节点在线。但是如果replication正好是偶数,这个时候,完全可能存在splt brain问题。
-
-为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的连接请求,并保持它。
-
-在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立连接,但如果是偶数个副本,就会主动去建立连接。
-
-Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系统时,会在bin目录生成。命令行参数“-?”查看可以配置的参数,比如绑定的IP地址,监听的端口号。
-
-## 与RAFT相比的异同
-
-数据一致性协议流行的有两种,Paxos与Raft. 本设计的实现与Raft有很多类同之处,下面做一些比较
-
-相同之处:
-
-- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程。
-- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的。
-- 数据转发流程完全一样,Master(leader)需要等待回复确认。
-- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步。
-
-不同之处:
-
-- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。
-- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。
-
-如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。
-
-## Meta Data的数据复制
-
-TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析。
-
-TDengine里Meta Data包括以下:
-
-- account 信息
-- 一个account下面,可以有多个user, 多个DB
-- 一个DB下面有多个vgroup
-- 一个DB下面有多个stable
-- 一个vgroup下面有多个table
-- 整个系统有多个mnode, dnode
-- 一个dnode可以有多个vnode
-
-上述的account, user, DB, vgroup, table, stable, mnode, dnode都有自己的属性,这些属性是TDengine自己定义的,不会开放给用户进行修改。这些Meta Data的查询都比较简单,都可以采用key-value模型进行存储。这些Meta Data还具有几个特点:
-
-1. 上述的Meta Data之间有一定的层级关系,比如必须先创建DB,才能创建table, stable。只有先创建dnode,才可能创建vnode, 才可能创建vgroup。因此他们创建的顺序是绝对不能错的。
-2. 在客户端应用的数据更新操作得到TDengine服务器侧确认后,所执行的数据更新操作绝对不能丢失。否则会造成客户端应用与服务器的数据不一致。
-3. 上述的Meta Data是容许重复操作的。比如插入新记录后,再插入一次,删除一次后,再删除一次,更新一次后,再更新一次,不会对系统产生任何影响,不会改变系统任何状态。
-
-对于特点1,本设计里,数据的写入是单线程的,按照到达的先后顺序,给每个数据更新操作打上版本号,版本号大的记录一定是晚于版本号小的写入系统,数据写入顺序是100%保证的,绝对不会让版本号大的记录先写入。复制过程中,数据块的转发也是严格按照顺序进行的,因此TDengine的数据复制设计是能保证Meta Data的创建顺序的。
-
-对于特点2,只要Quorum数设置等于replica,那么一定能保证回复确认过的数据更新操作不会在服务器侧丢失。即使某节点永不起来,只要超过一半的节点还是online, 查询服务不会受到任何影响。这时,如果某个节点离线超过一定时长,系统可以自动补充新的节点,以保证在线的节点数在绝大部分时间是100%的。
-
-对于特点3,完全可能发生,服务器确实持久化存储了某一数据更新操作,但客户端应用出了问题,认为操作不成功,它会重新发起操作。但对于Meta Data而言,没有关系,客户端可以再次发起同样的操作,不会有任何影响。
-
-总结来看,只要quorum设置大于一,本数据复制的设计是能满足Meta Data的需求的。目前,还没有发现漏洞。
diff --git a/docs-cn/21-tdinternal/03-taosd.md b/docs-cn/21-tdinternal/03-taosd.md
deleted file mode 100644
index 6a5734102c85db291339ce93a2231cb8196053f6..0000000000000000000000000000000000000000
--- a/docs-cn/21-tdinternal/03-taosd.md
+++ /dev/null
@@ -1,119 +0,0 @@
----
-sidebar_label: taosd 的设计
-title: taosd的设计
----
-
-逻辑上,TDengine 系统包含 dnode,taosc 和 App,dnode 是服务器侧执行代码 taosd 的一个运行实例,因此 taosd 是 TDengine 的核心,本文对 taosd 的设计做一简单的介绍,模块内的实现细节请见其他文档。
-
-## 系统模块图
-
-taosd 包含 rpc,dnode,vnode,tsdb,query,cq,sync,wal,mnode,http,monitor 等模块,具体如下图:
-
-
-
-taosd 的启动入口是 dnode 模块,dnode 然后启动其他模块,包括可选配置的 http,monitor 模块。taosc 或 dnode 之间交互的消息都是通过 rpc 模块进行,dnode 模块根据接收到的消息类型,将消息分发到 vnode 或 mnode 的消息队列,或由 dnode 模块自己消费。dnode 的工作线程(worker)消费消息队列里的消息,交给 mnode 或 vnode 进行处理。下面对各个模块做简要说明。
-
-## RPC 模块
-
-该模块负责 taosd 与 taosc,以及其他数据节点之间的通讯。TDengine 没有采取标准的 HTTP 或 gRPC 等第三方工具,而是实现了自己的通讯模块 RPC。
-
-考虑到物联网场景下,数据写入的包一般不大,因此除支持 TCP 连接之外,RPC 还支持 UDP 连接。当数据包小于 15K 时,RPC 将采用 UDP 方式进行连接,否则将采用 TCP 连接。对于查询类的消息,RPC 不管包的大小,总是采取 TCP 连接。对于 UDP 连接,RPC 实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。
-
-RPC 模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数 compressMsgSize,RPC 在传输中将自动压缩数据,以节省带宽。
-
-为保证数据的安全和数据的 integrity,RPC 模块采用 MD5 做数字签名,对数据的真实性和完整性进行认证。
-
-## DNODE 模块
-
-该模块是整个 taosd 的入口,它具体负责如下任务:
-
-- 系统的初始化,包括
- - 从文件 taos.cfg 读取系统配置参数,从文件 dnodeCfg.json 读取数据节点的配置参数;
- - 启动 RPC 模块,并建立起与 taosc 通讯的 server 连接,与其他数据节点通讯的 server 连接;
- - 启动并初始化 dnode 的内部管理,该模块将扫描该数据节点已有的 vnode ,并打开它们;
- - 初始化可配置的模块,如 mnode,http,monitor 等。
-- 数据节点的管理,包括
- - 定时的向 mnode 发送 status 消息,报告自己的状态;
- - 根据 mnode 的指示,创建、改变、删除 vnode;
- - 根据 mnode 的指示,修改自己的配置参数;
-- 消息的分发、消费,包括
- - 为每一个 vnode 和 mnode 的创建并维护一个读队列、一个写队列;
- - 将从 taosc 或其他数据节点来的消息,根据消息类型,将其直接分发到不同的消息队列,或由自己的管理模块直接消费;
- - 维护一个读的线程池,消费读队列的消息,交给 vnode 或 mnode 处理。为支持高并发,一个读线程(worker)可以消费多个队列的消息,一个读队列可以由多个 worker 消费;
- - 维护一个写的线程池,消费写队列的消息,交给 vnode 或 mnode 处理。为保证写操作的序列化,一个写队列只能由一个写线程负责,但一个写线程可以负责多个写队列。
-
-taosd 的消息消费由 dnode 通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下:
-
-
-
-## VNODE 模块
-
-vnode 是一独立的数据存储查询逻辑单元,但因为一个 vnode 只能容许一个 DB ,因此 vnode 内部没有 account,DB,user 等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的 TSDB,负责查询的 query,负责数据复制的 sync,负责数据库日志的的 WAL,负责连续查询的 cq(continuous query),负责事件触发的流计算的 event 等模块,这些子模块只与 vnode 模块发生关系,与其他模块没有任何调用关系。模块图如下:
-
-
-
-vnode 模块向下,与 dnodeVRead,dnodeVWrite 发生互动,向上,与子模块发生互动。它主要的功能有:
-
-- 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过 vnode 模块进行;
-- 对于来自 taosc 或 mnode 的写操作,vnode 模块将其分解为写日志(WAL),转发(sync),本地存储(TSDB)子模块的操作;
-- 对于查询操作,分发到 query 模块进行。
-
-一个数据节点里有多个 vnode,因此 vnode 模块是有多个运行实例的。每个运行实例是完全独立的。
-
-vnode 与其子模块是通过 API 直接调用,而不是通过消息队列传递。而且各个子模块只与 vnode 模块有交互,不与 dnode,rpc 等模块发生任何直接关联。
-
-## MNODE 模块
-
-mnode 是整个系统的大脑,负责整个系统的资源调度,负责 meta data 的管理与存储。
-
-一个运行的系统里,只有一个 mnode,但它有多个副本(由系统配置参数 numOfMnodes 控制)。这些副本分布在不同的 dnode 里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个 Master,其他副本是 slave。所有数据更新类的操作,都只能在 master 上进行,而查询类的可以在 slave 节点上进行。代码实现上,同步模块与 vnode 共享,但 mnode 被分配一个特殊的 vgroup ID: 1,而且 quorum 大于 1。整个集群系统是由多个 dnode 组成的,运行的 mnode 的副本数不可能超过 dnode 的个数,但不会超过配置的副本数。如果某个 mnode 副本宕机一段时间,只要超过半数的 mnode 副本仍在运行,运行的 mnode 会自动根据整个系统的资源情况,在其他 dnode 里再启动一个 mnode,以保证运行的副本数。
-
-各个 dnode 通过信息交换,保存有 mnode 各个副本的 End Point 列表,并向其中的 master 节点定时(间隔由系统配置参数 statusInterval 控制)发送 status 消息,消息体里包含该 dnode 的 CPU、内存、剩余存储空间、vnode 个数,以及各个 vnode 的状态(存储空间、原始数据大小、记录条数、角色等)。这样 mnode 就了解整个系统的资源情况,如果用户创建新的表,就可以决定需要在哪个 dnode 创建;如果增加或删除 dnode,或者监测到某 dnode 数据过热、或离线太长,就可以决定需要挪动那些 vnode,以实现负载均衡。
-
-mnode 里还负责 account,user,DB,stable,table,vgroup,dnode 的创建、删除与更新。mnode 不仅把这些 entity 的 meta data 保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在 mnode(保存在 vnode),而且子表不维护自己的 schema,而是与 stable 共享。为减小 mnode 的查询压力,taosc 会缓存 table、stable 的 schema。对于查询类的操作,各个 slave mnode 也可以提供,以减轻 master 压力。
-
-## TSDB 模块
-
-TSDB 模块是 vnode 中的负责快速高并发地存储和读取属于该 vnode 的表的元数据及采集的时序数据的引擎。除此之外,TSDB 还提供了表结构的修改、表标签值的修改等功能。TSDB 提供 API 供 vnode 和 query 等模块调用。TSDB 中存储了两类数据,1:元数据信息;2:时序数据
-
-### 元数据信息
-
-TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schema 的定义等。对于超级表和超级表下的子表而言,又包含了 tag 的 schema 定义以及子表的 tag 值等。对于元数据信息而言,TSDB 就相当于一个全内存的 KV 型数据库,属于该 vnode 的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB 还对其中的子表,按照 tag 的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB 中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到 meta 文件中。meta 文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB 也提供了对于元数据的修改操作,如表 schema 的修改,tag schema 的修改以及 tag 值的修改等。
-
-### 时序数据
-
-每个 TSDB 在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入 TSDB 时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的 1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。
-
-而时序数据在写入到 TSDB 的数据文件时,是以列(column)的形式存储的。TSDB 中的数据文件包含多个数据文件组,每个数据文件组中又包含 .head、.data 和 .last 三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB 中的数据文件组是按照时间跨度进行分片的,默认是 10 天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在 TSDB 的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head 文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在 .head 文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head 和 .last 文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入 .data 文件中,否则,会写入 .last 文件中,等待下次落盘时合并数据写入 .data 文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。
-
-## Query 模块
-
-该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0 查询模块设计》。
-
-## SYNC 模块
-
-该模块实现数据的多副本复制,包括 vnode 与 mnode 的数据复制,支持异步和同步两种复制方式,以满足 meta data 与时序数据不同复制的需求。因为它为 mnode 与 vnode 共享,系统为 mnode 副本预留了一个特殊的 vgroup ID:1。因此 vnode group 的 ID 是从 2 开始的。
-
-每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](/tdinternal/replica/)
-
-## WAL 模块
-
-该模块负责将新插入的数据写入 write ahead log(WAL),为 vnode,mnode 共享。以保证服务器 crash 或其他故障,能从 WAL 中恢复数据。
-
-每个 vnode/mnode 模块实例会有一对应的 WAL 模块实例,是完全一一对应的。WAL 的落盘操作由两个参数 walLevel,fsync 控制。看具体场景,如果要 100% 保证数据不会丢失,需要将 walLevel 配置为 2,fsync 设置为 0,每条数据插入请求,都会实时落盘后,才会给应用确认
-
-## HTTP 模块
-
-该模块负责处理系统对外的 RESTful 接口,可以通过配置,由 dnode 启动或停止 。(仅 2.2 及之前的版本中存在)
-
-该模块将接收到的 RESTful 请求,做了各种合法性检查后,将其变成标准的 SQL 语句,通过 taosc 的异步接口,将请求发往整个系统中的任一 dnode 。收到处理后的结果后,再翻译成 HTTP 协议,返回给应用。
-
-如果 HTTP 模块启动,就意味着启动了一个 taosc 的实例。任一一个 dnode 都可以启动该模块,以实现对 RESTful 请求的分布式处理。
-
-## Monitor 模块
-
-该模块负责检测一个 dnode 的运行状态,可以通过配置,由 dnode 启动或停止。原则上,每个 dnode 都应该启动一个 monitor 实例。
-
-Monitor 采集 TDengine 里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集 CPU、内存、网络等资源的使用情况(采集周期由系统配置参数 monitorInterval 控制)。获得这些数据后,monitor 模块将采集的数据写入系统的日志库(DB 名字由系统配置参数 monitorDbName 控制)。
-
-Monitor 模块使用 taosc 来将采集的数据写入系统,因此每个 monitor 实例,都有一个 taosc 运行实例。
diff --git a/docs-cn/21-tdinternal/12-tsz-compress.md b/docs-cn/21-tdinternal/12-tsz-compress.md
deleted file mode 100644
index baf5df15db3b44edc9e0bd6909e46fa84b676a0b..0000000000000000000000000000000000000000
--- a/docs-cn/21-tdinternal/12-tsz-compress.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-title: TSZ 压缩算法
----
-
-TSZ 压缩算法是 TDengine 为浮点数据类型提供更加丰富的压缩功能,可以实现浮点数的有损至无损全状态压缩,相比原来在 TDengine 中原有压缩算法,TSZ 压缩算法压缩选项更丰富,压缩率更高,即使切到无损状态下对浮点数压缩,压缩率也会比原来的压缩算法高一倍。
-
-## 适合场景
-
-TSZ 压缩算法压缩率比原来的要高,但压缩时间会更长,即开启 TSZ 压缩算法写入速度会有一些下降,通常情况下会有 20% 左右的下降。影响写入速度是因为需要更多的 CPU 计算,所以从原始数据到压缩好数据的交付时间变长,导致写入速度变慢。如果您的服务器 CPU 配置很高的话,这个影响会变小甚至没有。
-
-另外如果设备产生了大量的高精度浮点数,存储占用的空间非常庞大,但实际使用并不需要那么高的精度时,可以通过 TSZ 压缩的有损压缩功能,把精度压缩至指定的长度,节约存储空间。
-
-总结:采集到了大量浮点数,存储时占用空间过大或出有存储空间不足,需要超高压缩率的场景。
-
-## 使用步骤
-
-- 检查版本支持,2.4.0.10 及之后 TDengine 的版本都支持此功能
-
-- 配置选项开启功能,在 TDengine 的配置文件 taos.cfg 增加一行以下内容,打开 TSZ 功能
-
-```TSZ
-lossyColumns float|double
-```
-
-- 根据自己需要配置其它选项,如果不配置都会按默认值处理。
-
-- 重启服务,配置生效。
-- 确认功能已开启,在服务启动过程中输出的信息如果有前面配置的内容,表明功能已生效:
-
-```TSZ Test
-02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
-```
-
-## 注意事项
-
-- 确认版本是否支持
-
-- 除了服务器启动时的输出的配置成功信息外,不再会有其它的信息输出是使用的哪种压缩算法,可以通过配置前后数据库文件大小来比较效果
-
-- 如果浮点数类型列较少,看整体数据文件大小效果会不太明显
-
-- 此压缩产生的数据文件中浮点数据部分将不能被 2.4.0.10 以下的版本解析,即不向下兼容,使用时避免更换回旧版本,以免数据不能被读取出来。
-
-- 在使用过程中允许反复开启和关闭 TSZ 压缩选项的操作,前后两种压缩算法产生的数据都能正常读取。
diff --git a/docs-cn/21-tdinternal/30-iot-big-data.md b/docs-cn/21-tdinternal/30-iot-big-data.md
deleted file mode 100644
index a234713f883056e3d1a0dcbfe8e2e47a82865f81..0000000000000000000000000000000000000000
--- a/docs-cn/21-tdinternal/30-iot-big-data.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: 物联网大数据
-description: "物联网、工业互联网大数据的特点;物联网大数据平台应具备的功能和特点;通用大数据架构为什么不适合处理物联网数据;物联网、车联网、工业互联网大数据平台,为什么推荐使用 TDengine"
----
-
-- [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html)
-- [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html)
-- [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html)
-- [物联网、车联网、工业互联网大数据平台,为什么推荐使用 TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html)
diff --git a/docs-cn/21-tdinternal/dnode.webp b/docs-cn/21-tdinternal/dnode.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8
Binary files /dev/null and b/docs-cn/21-tdinternal/dnode.webp differ
diff --git a/docs-cn/21-tdinternal/message.webp b/docs-cn/21-tdinternal/message.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5
Binary files /dev/null and b/docs-cn/21-tdinternal/message.webp differ
diff --git a/docs-cn/21-tdinternal/modules.webp b/docs-cn/21-tdinternal/modules.webp
new file mode 100644
index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523
Binary files /dev/null and b/docs-cn/21-tdinternal/modules.webp differ
diff --git a/docs-cn/21-tdinternal/multi_tables.webp b/docs-cn/21-tdinternal/multi_tables.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2
Binary files /dev/null and b/docs-cn/21-tdinternal/multi_tables.webp differ
diff --git a/docs-cn/21-tdinternal/replica-forward.webp b/docs-cn/21-tdinternal/replica-forward.webp
new file mode 100644
index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e
Binary files /dev/null and b/docs-cn/21-tdinternal/replica-forward.webp differ
diff --git a/docs-cn/21-tdinternal/replica-master.webp b/docs-cn/21-tdinternal/replica-master.webp
new file mode 100644
index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee
Binary files /dev/null and b/docs-cn/21-tdinternal/replica-master.webp differ
diff --git a/docs-cn/21-tdinternal/replica-restore.webp b/docs-cn/21-tdinternal/replica-restore.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a
Binary files /dev/null and b/docs-cn/21-tdinternal/replica-restore.webp differ
diff --git a/docs-cn/21-tdinternal/structure.webp b/docs-cn/21-tdinternal/structure.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3
Binary files /dev/null and b/docs-cn/21-tdinternal/structure.webp differ
diff --git a/docs-cn/21-tdinternal/vnode.webp b/docs-cn/21-tdinternal/vnode.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32
Binary files /dev/null and b/docs-cn/21-tdinternal/vnode.webp differ
diff --git a/docs-cn/21-tdinternal/write_master.webp b/docs-cn/21-tdinternal/write_master.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652
Binary files /dev/null and b/docs-cn/21-tdinternal/write_master.webp differ
diff --git a/docs-cn/21-tdinternal/write_slave.webp b/docs-cn/21-tdinternal/write_slave.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96
Binary files /dev/null and b/docs-cn/21-tdinternal/write_slave.webp differ
diff --git a/docs-cn/25-application/01-telegraf.md b/docs-cn/25-application/01-telegraf.md
index f63a6701eed2b4c5b98f577d5b2867ae6dada387..95df8699ef85b02d6e9dba398c787644fc9089b2 100644
--- a/docs-cn/25-application/01-telegraf.md
+++ b/docs-cn/25-application/01-telegraf.md
@@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图:
-
+
## 安装步骤
@@ -75,7 +75,7 @@ sudo systemctl start telegraf
点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
-
+
## 总结
diff --git a/docs-cn/25-application/02-collectd.md b/docs-cn/25-application/02-collectd.md
index 5e6bc6577b2f4c8564e4533ced745d0b214ec748..78c61bb969092d7040ddcb3d02ce7bd29a784858 100644
--- a/docs-cn/25-application/02-collectd.md
+++ b/docs-cn/25-application/02-collectd.md
@@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图:
-
+
## 安装步骤
@@ -81,12 +81,12 @@ repeater 部分添加 { host:'', port:
-### 18. go 语言编写组件编译失败怎样解决?
+### 19. go 语言编写组件编译失败怎样解决?
TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
@@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct
如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
-### 19. 如何查询数据占用的存储空间大小?
+### 20. 如何查询数据占用的存储空间大小?
默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
@@ -193,3 +204,38 @@ go env -w GOPROXY=https://goproxy.cn,direct
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0)
+
+### 21. 客户端连接串如何保证高可用?
+
+请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
+
+### 22. 时间戳的时区信息是怎样处理的?
+
+TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
+
+客户端在处理时间戳字符串时,会采取如下逻辑:
+
+1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。
+2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
+3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
+4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
+
+### 23. TDengine 2.0 都会用到哪些网络端口?
+
+使用到的网络端口请看文档:[serverport](/reference/config/#serverport)
+
+需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
+
+### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功??
+
+taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
+
+需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
+
+有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/)
+
+### 25. 发生了 OOM 怎么办?
+
+OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
+
+TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
diff --git a/docs-cn/27-train-faq/02-video.mdx b/docs-cn/27-train-faq/02-video.mdx
deleted file mode 100644
index b644412332fe817ea7fdc2c9ddc176ecc9858c56..0000000000000000000000000000000000000000
--- a/docs-cn/27-train-faq/02-video.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
----
-title: 视频教程
----
-
-## 技术公开课
-
-- [技术公开课:开源、高效的物联网大数据平台,TDengine 内核技术剖析](https://www.taosdata.com/blog/2020/12/25/2126.html)
-
-## 视频教程
-
-- [TDengine 视频教程 - 快速上手](https://www.taosdata.com/blog/2020/11/11/1941.html)
-- [TDengine 视频教程 - 数据建模](https://www.taosdata.com/blog/2020/11/11/1945.html)
-- [TDengine 视频教程 - 集群搭建](https://www.taosdata.com/blog/2020/11/11/1961.html)
-- [TDengine 视频教程 - Go Connector](https://www.taosdata.com/blog/2020/11/11/1951.html)
-- [TDengine 视频教程 - JDBC Connector](https://www.taosdata.com/blog/2020/11/11/1955.html)
-- [TDengine 视频教程 - Node.js Connector](https://www.taosdata.com/blog/2020/11/11/1957.html)
-- [TDengine 视频教程 - Python Connector](https://www.taosdata.com/blog/2020/11/11/1963.html)
-- [TDengine 视频教程 - RESTful Connector](https://www.taosdata.com/blog/2020/11/11/1965.html)
-- [TDengine 视频教程 - “零”代码运维监控](https://www.taosdata.com/blog/2020/11/11/1959.html)
-
-## 微课堂
-
-关注 TDengine 视频号, 有精心制作的微课堂。
-
-
diff --git a/docs-cn/27-train-faq/03-docker.md b/docs-cn/27-train-faq/03-docker.md
index 845a8751846c0995a43fb1c01e6ace3080176838..7791569b25e102b4634f0fb899fc0973cacc0aa1 100644
--- a/docs-cn/27-train-faq/03-docker.md
+++ b/docs-cn/27-train-faq/03-docker.md
@@ -209,7 +209,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0
Press enter key to continue or Ctrl-C to stop
```
- 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
+ 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。
最后共插入 1 亿条记录。
@@ -279,7 +279,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0
$ taos> select groupid, location from test.d0;
groupid | location |
=================================
- 0 | shanghai |
+ 0 | California.SanDieo |
Query OK, 1 row(s) in set (0.003490s)
```
diff --git a/docs-cn/eco_system.png b/docs-cn/eco_system.png
deleted file mode 100644
index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000
Binary files a/docs-cn/eco_system.png and /dev/null differ
diff --git a/docs-cn/eco_system.webp b/docs-cn/eco_system.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13
Binary files /dev/null and b/docs-cn/eco_system.webp differ
diff --git a/docs-en/02-intro/eco_system.png b/docs-en/02-intro/eco_system.png
deleted file mode 100644
index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000
Binary files a/docs-en/02-intro/eco_system.png and /dev/null differ
diff --git a/docs-en/02-intro/eco_system.webp b/docs-en/02-intro/eco_system.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13
Binary files /dev/null and b/docs-en/02-intro/eco_system.webp differ
diff --git a/docs-en/02-intro/index.md b/docs-en/02-intro/index.md
index e2309943f3983dcbf7957ef6d478aefa64d7a902..f6766f910f4d7560b782bf02ffa97922523e6167 100644
--- a/docs-en/02-intro/index.md
+++ b/docs-en/02-intro/index.md
@@ -5,39 +5,39 @@ toc_max_heading_level: 2
TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation.
-This section introduces the major features, competitive advantages, suited scenarios and benchmarks to help you get a high level picture for TDengine.
+This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
## Major Features
The major features are listed below:
-1. Besides [using SQL to insert](/develop/insert-data/sql-writing),it supports [Schemaless writing](/reference/schemaless/),and it supports [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) and other protocols.
-2. Support for seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). Without a line of code, those agents can write data points into TDengine just by configuration.
-3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation, etc.
-4. Support for [user defined functions](/develop/udf)
+1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
+2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code.
+3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others.
+4. Support for [user defined functions](/develop/udf).
5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios.
6. Support for [continuous query](/develop/continuous-query).
7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions.
8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication.
-9. Provides interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc query.
+9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
10. Provides many ways to [import](/operation/import) and [export](/operation/export) data.
-11. Provides [monitoring](/operation/monitor) on TDengine running instances.
+11. Provides [monitoring](/operation/monitor) on running instances of TDengine.
12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
13. Provides a [REST API](/reference/rest-api/).
-14. Supports the seamless integration with [Grafana](/third-party/grafana) for visualization.
+14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization.
15. Supports seamless integration with Google Data Studio.
-For more detail on features, please read through the whole documentation.
+For more details on features, please read through the entire documentation.
## Competitive Advantages
-TDengine makes full use of [the characteristics of time series data](https://tdengine.com/2019/07/09/86.html), such as structured, no transaction, rarely delete or update, etc., and builds its own innovative storage engine and computing engine to differentiate itself from other time series databases with the following advantages.
+Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages.
-- **[High Performance](https://tdengine.com/fast)**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine.
+- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs.
- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
-- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion.
+- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion.
- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain.
@@ -45,24 +45,24 @@ TDengine makes full use of [the characteristics of time series data](https://tde
- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools.
-- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, there are zero learning costs.
+- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs.
-- **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming.
+- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming.
-With TDengine, the total cost of ownership of time-series data platform can be greatly reduced. Because 1: with its superior performance, the computing and storage resources are reduced significantly; 2:with SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly; 3: with its simple architecture and zero management, the operation and maintenance costs are reduced.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced.
## Technical Ecosystem
-In the time-series data processing platform, TDengine stands in a role like this diagram below:
+This is how TDengine would be situated, in a typical time-series data processing platform:
-
+
Figure 1. TDengine Technical Ecosystem
-On the left side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides interactive command-line interface and web interface for management and maintenance.
+On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
-## Suited Scenarios
+## Typical Use Cases
-As a high-performance, scalable and SQL supported time-series database, TDengine's typical application scenarios include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM, etc. This section makes a more detailed analysis of the applicable scenarios.
+As a high-performance, scalable and SQL supported time-series database, TDengine's typical use case include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data. As such, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM and so on. More generally TDengine is not a suitable storage engine for non-time-series data. This section makes a more detailed analysis of the applicable scenarios.
### Characteristics and Requirements of Data Sources
diff --git a/docs-en/04-concept/index.md b/docs-en/04-concept/index.md
index abc553ab6d90042cb2389ba0b71d3b5395dcebfd..850f705146c4829db579f14be1a686ef9052f678 100644
--- a/docs-en/04-concept/index.md
+++ b/docs-en/04-concept/index.md
@@ -2,7 +2,7 @@
title: Concepts
---
-In order to explain the basic concepts and provide some sample code, the TDengine documentation takes smart meters as a typical time series data scenario. Assuming that each smart meter collects three metrics of current, voltage, and phase, there are multiple smart meters, and each meter has static attributes like location and group ID, the collected data will be similar to the following table:
+In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
@@ -29,7 +29,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
10.3
219
0.31
-Beijing.Chaoyang
+California.SanFrancisco
2
@@ -38,7 +38,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
10.2
220
0.23
-Beijing.Chaoyang
+California.SanFrancisco
3
@@ -47,7 +47,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
11.5
221
0.35
-Beijing.Haidian
+California.LosAngeles
3
@@ -56,7 +56,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
13.4
223
0.29
-Beijing.Haidian
+California.LosAngeles
2
@@ -65,7 +65,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
12.6
218
0.33
-Beijing.Chaoyang
+California.SanFrancisco
2
@@ -74,7 +74,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
11.8
221
0.28
-Beijing.Haidian
+California.LosAngeles
2
@@ -83,7 +83,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
10.3
218
0.25
-Beijing.Chaoyang
+California.SanFrancisco
3
@@ -92,7 +92,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
12.3
221
0.31
-Beijing.Chaoyang
+California.SanFrancisco
2
@@ -112,7 +112,7 @@ Label/Tag refers to the static properties of sensors, equipment or other types o
## Data Collection Point
-Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipments, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car, so in this example the car would have three data collection points.
+Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points.
## Table
@@ -122,10 +122,10 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
-3. The metric data from a DCP is continuously stored in block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
-4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, this allows for a higher compression rate.
+3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
+4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
-If the metric data of multiple DCPs are traditionally written into a single table, due to the uncontrollable network delay, the timing of the data from different DCPs arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest extent.**
+If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
@@ -139,7 +139,7 @@ In the design of TDengine, **a table is used to represent a specific data collec
## Subtable
-When creating a table for a specific data collection point, the user can use a STable as a template and specifies the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is:
+When creating a table for a specific data collection point, the user can use a STable as a template and specify the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is:
1. Subtable is a table, all SQL commands applied on a regular table can be applied on subtable.
2. Subtable is a table with extensions, it has static tags (labels), and these tags can be added, deleted, and updated after it is created. But a regular table does not have tags.
3. A subtable belongs to only one STable, but a STable may have many subtables. Regular tables do not belong to a STable.
@@ -151,7 +151,7 @@ The relationship between a STable and the subtables created based on this STable
2. The schema of metrics or labels cannot be adjusted through subtables, and it can only be changed via STable. Changes to the schema of a STable takes effect immediately for all associated subtables.
3. STable defines only one template and does not store any data or label information by itself. Therefore, data cannot be written to a STable, only to subtables.
-Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation across multiple DCPs.
+Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs.
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP.
@@ -167,4 +167,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
-TDengine does not recommend using an IP address to access the cluster, FQDN is recommended for cluster management.
+TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
diff --git a/docs-en/05-get-started/_pkg_install.mdx b/docs-en/05-get-started/_pkg_install.mdx
index af04d2b70bda7575e57cc49a5aa60f19689113e6..cf10497c96ba1d777e45340b0312d97c127b6fcb 100644
--- a/docs-en/05-get-started/_pkg_install.mdx
+++ b/docs-en/05-get-started/_pkg_install.mdx
@@ -12,6 +12,6 @@ Between two major release versions, some beta versions may be delivered for user
For the details please refer to [Install and Uninstall](/operation/pkg-install)。
-To see the details of versions, please refer to [Download List](https://www.taosdata.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases).
+To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases).
diff --git a/docs-en/05-get-started/index.md b/docs-en/05-get-started/index.md
index 39b2d02eca3c15aebd5715ee64e455781c8236e5..56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a 100644
--- a/docs-en/05-get-started/index.md
+++ b/docs-en/05-get-started/index.md
@@ -10,7 +10,7 @@ import AptGetInstall from "./\_apt_get_install.mdx";
## Quick Install
-The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to the connectors of multiple languages, [RESTful interface](/reference/rest-api) is also provided by [taosAdapter](/reference/taosadapter) in TDengine. Prior to version 2.4.0.0, however, there is no taosAdapter, the RESTful interface is provided by the built-in HTTP service of taosd.
+The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd.
TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future.
@@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao
taosBenchmark
```
-This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "beijing" or "shanghai".
+This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego".
This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server.
@@ -152,10 +152,10 @@ query the average, maximum, minimum of 100 million rows:
taos> select avg(current), max(voltage), min(phase) from test.meters;
```
-query the total number of rows with location="beijing":
+query the total number of rows with location="California.SanFrancisco":
```sql
-taos> select count(*) from test.meters where location="beijing";
+taos> select count(*) from test.meters where location="California.SanFrancisco";
```
query the average, maximum, minimum of all rows with groupId=10:
diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md
index ee11c8f5445233abe44e3bc006e1f15846b54ada..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644
--- a/docs-en/07-develop/01-connect/index.md
+++ b/docs-en/07-develop/01-connect/index.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Connection
-title: Connect to TDengine
-description: "This document explains how to establish connection to TDengine, and briefly introduce how to install and use TDengine connectors."
+sidebar_label: Connect
+title: Connect
+description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors."
---
import Tabs from "@theme/Tabs";
@@ -19,25 +19,24 @@ import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.md
import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx";
import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx";
-Any application programs running on any kind of platforms can access TDengine through the REST API provided by TDengine. For the details, please refer to [REST API](/reference/rest-api/). Besides, application programs can use the connectors of multiple programming languages to access TDengine, including C/C++, Java, Python, Go, Node.js, C#, and Rust. This chapter describes how to establish connection to TDengine and briefly introduce how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/)
+Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, and Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/)
## Establish Connection
There are two ways for a connector to establish connections to TDengine:
-1. Connection through the REST API provided by taosAdapter component, this way is called "REST connection" hereinafter.
+1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter.
2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter.
-Either way, same or similar APIs are provided by connectors to access database or execute SQL statements, no obvious difference can be observed.
-
Key differences:
-1. With REST connection, it's not necessary to install TDengine client driver (taosc), it's more friendly for cross-platform with the cost of 30% performance downgrade. When taosc has an upgrade, application does not need to make changes.
-2. With native connection, full compatibility of TDengine can be utilized, like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. But taosc has to be installed, some platforms may not be supported.
+1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
+2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions.
+3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
## Install Client Driver taosc
-If choosing to use native connection and the application is not on the same host as TDengine server, TDengine client driver taosc needs to be installed on the host where the application is. If choosing to use REST connection or the application is on the same host as server side, this step can be skipped. It's better to use same version of taosc as the server.
+If you are choosing to use the native connection and the the application is not on the same host as TDengine server, the TDengine client driver taosc needs to be installed on the application host. If choosing to use the REST connection or the application is on the same host as TDengine server, this step can be skipped. It's better to use same version of taosc as the TDengine server.
### Install
diff --git a/docs-en/07-develop/02-model/index.mdx b/docs-en/07-develop/02-model/index.mdx
index 962a75338f0384ee8facb4682342e25e536e4ecb..86853aaaa3f7285fe042a892e2ec903d57894111 100644
--- a/docs-en/07-develop/02-model/index.mdx
+++ b/docs-en/07-develop/02-model/index.mdx
@@ -2,19 +2,26 @@
title: Data Model
---
-The data model employed by TDengine is similar to relational database, you need to create databases and tables. For a specific application, the design of databases, STables (abbreviated for super table), and tables need to be considered. This chapter will explain the big picture without syntax details.
+The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
## Create Database
-The characteristics of data from different data collection points may be different, such as collection frequency, days to keep, number of replicas, data block size, whether it's allowed to update data, etc. For TDengine to operate with the best performance, it's strongly suggested to put the data with different characteristics into different databases because different storage policy can be set for each database. When creating a database, there are a lot of parameters that can be configured, such as the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, compress or not, the time range of the data in single data file, etc. Below is an example of the SQL statement for creating a database.
+The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database.
```sql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1;
```
-In the above SQL statement, a database named "power" will be created, the data in it will be kept for 365 days, which means the data older than 365 days will be deleted automatically, a new data file will be created every 10 days, the number of memory blocks is 6, data is allowed to be updated. For more details please refer to [Database](/taos-sql/database).
+In the above SQL statement:
+- a database named "power" will be created
+- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically
+- a new data file will be created every 10 days
+- the number of memory blocks is 6
+- data is allowed to be updated
-After creating a database, the current database in use can be switched using SQL command `USE`, for example below SQL statement switches the current database to `power`. Without current database specified, table name must be preceded with the corresponding database name.
+For more details please refer to [Database](/taos-sql/database).
+
+After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name.
```sql
USE power;
@@ -23,14 +30,14 @@ USE power;
:::note
- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready.
-- JOIN operation can't be performed tables from two different databases.
+- JOIN operations can't be performed on tables from two different databases.
- Timestamp needs to be specified when inserting rows or querying historical rows.
:::
## Create STable
-In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), below SQL statement can be used to create the super table.
+In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the SQL statement below can be used to create the super table.
```sql
CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
@@ -41,44 +48,46 @@ If you are using versions prior to 2.0.15, the `STable` keyword needs to be repl
:::
-Similar to creating a regular table, when creating a STable, name and schema need to be provided too. In the STable schema, the first column must be timestamp (like ts in the example), and other columns (like current, voltage and phase in the example) are the data collected. The type of a column can be integer, float, double, string ,etc. Besides, the schema for tags need to be provided, like location and groupId in the example. The type of a tag can be integer, float, string, etc. The static properties of a data collection point can be defined as tags, like the location, device type, device group ID, manager ID, etc. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details.
+Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details.
-For each kind of data collection points, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another point for environmental data like temperature, humidity and wind direction, multiple STables are required for such kind of device.
+For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
-At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to bo collected for a data collection point, multiple STables are required for such kind of data collection point. There can be multiple databases in system, while one or more STables can exist in a database.
+At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database.
## Create Table
-A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
+A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
```sql
-CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
+CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "Beijing.Chaoyang" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
+In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
-In TDengine system, it's recommended to create a table for a data collection point via STable. Table created via STable is called subtable in some parts of TDengine document. All SQL commands applied on regular table can be applied on subtable.
+In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables.
:::warning
It's not recommended to create a table in a database while using a STable from another database as template.
:::tip
-It's suggested to use the global unique ID of a data collection point as the table name, for example the device serial number. If there isn't such a unique ID, multiple IDs that are not global unique can be combined to form a global unique ID. It's not recommended to use a global unique ID as tag value.
+It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
## Create Table Automatically
-In some circumstances, it's not sure whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist.
+In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists.
```sql
-INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
+INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32);
```
-In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"Beijing.Chaoyang", 2`.
+In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting).
## Single Column vs Multiple Column
-Multiple columns data model is supported in TDengine. As long as multiple metrics are collected by same data collection point at same time, i.e. the timestamp are identical, these metrics can be put in single stable as columns. However, there is another kind of design, i.e. single column data model, a table is created for each metric, which means a STable is required for each kind of metric. For example, 3 STables are required for current, voltage and phase.
+A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns.
+
+However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase.
-It's recommended to use multiple column data model as much as possible because it's better in the performance of inserting or querying rows. In some cases, however, the metrics to be collected vary frequently and correspondingly the STable schema needs to be changed frequently too. In such case, it's more convenient to use single column data model.
+It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model.
diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
index 9f66992d3de755389c3a0722ebb09097177742f1..397b1a14fd76c1372c79eb88575f2bf21cb62050 100644
--- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
@@ -1,5 +1,5 @@
---
-sidebar_label: SQL
+sidebar_label: Insert Using SQL
title: Insert Using SQL
---
@@ -22,11 +22,11 @@ import CStmt from "./_c_stmt.mdx";
## Introduction
-Application program can execute `INSERT` statement through connectors to insert rows. TAOS CLI can be launched manually to insert data too.
+Application programs can execute `INSERT` statement through connectors to insert rows. The TAOS CLI can also be used to manually insert data.
### Insert Single Row
-Below SQL statement is used to insert one row into table "d1001".
+The below SQL statement is used to insert one row into table "d1001".
```sql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
@@ -34,7 +34,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
### Insert Multiple Rows
-Multiple rows can be inserted in single SQL statement. Below example inserts 2 rows into table "d1001".
+Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001".
```sql
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
@@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3,
### Insert into Multiple Tables
-Data can be inserted into multiple tables in same SQL statement. Below example inserts 2 rows into table "d1001" and 1 row into table "d1002".
+Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002".
```sql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
@@ -52,14 +52,14 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
:::info
-- Inserting in batch can gain better performance. Normally, the higher the batch size, the better the performance. Please be noted each single row can't exceed 16K bytes and each single SQL statement can't exceed 1M bytes.
-- Inserting with multiple threads can gain better performance too. However, depending on the system resources on the application side and the server side, with the number of inserting threads grows to a specific point, the performance may drop instead of growing. The proper number of threads need to be tested in a specific environment to find the best number.
+- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB.
+- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number.
:::
:::warning
-- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (also the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row.
+- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row.
- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted.
:::
@@ -95,13 +95,13 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
:::note
1. With either native connection or REST connection, the above samples can work well.
-2. Please be noted that `use db` can't be used with REST connection because REST connection is stateless, so in the samples `dbName.tbName` is used to specify the table name.
+2. Please note that `use db` can't be used with a REST connection because REST connections are stateless, so in the samples `dbName.tbName` is used to specify the table name.
:::
### Insert with Parameter Binding
-TDengine also provides Prepare API that support parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has been improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements.
+TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements.
Parameter binding is available only with native connection.
diff --git a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx
index 172003d203fa309ce51b3ecae9a7490a59f513d7..be46ebf0c97a29b57c1b57eb8ea5c9394f85b93a 100644
--- a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -15,13 +15,13 @@ import CLine from "./_c_line.mdx";
## Introduction
-A single line of text is used in InfluxDB Line protocol format represents one row of data, each line contains 4 parts as shown below.
+In the InfluxDB Line protocol format, a single line of text is used to represent one row of data. Each line contains 4 parts as shown below.
```
measurement,tag_set field_set timestamp
```
-- `measurement` will be used as the STable name
+- `measurement` will be used as the name of the STable
- `tag_set` will be used as tags, with format like `=,=`
- `field_set`will be used as data columns, with format like `=,=`
- `timestamp` is the primary key timestamp corresponding to this row of data
@@ -29,13 +29,13 @@ measurement,tag_set field_set timestamp
For example:
```
-meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
+meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
```
:::note
-- All the data in `tag_set` will be converted to ncahr type automatically .
-- Each data in `field_set` must be self-description for its data type. For example 1.2f32 means a value 1.2 of float type, it will be treated as double without the "f" type suffix.
+- All the data in `tag_set` will be converted to nchar type automatically .
+- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
:::
diff --git a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 66bb67c25669b906183526377f60b969ea3d1e85..18a695cda8efbef075451ff53e542d9e69c58e0b 100644
--- a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -15,21 +15,21 @@ import CTelnet from "./_c_opts_telnet.mdx";
## Introduction
-A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs single column data model, so one line can only contains single data column. There can be multiple tags. Each line contains 4 parts as below:
+A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs a single column data model, so each line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below:
```
=[ =]
```
-- `metric` will be used as STable name.
-- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. second and millisecond time precision are supported.\
+- `metric` will be used as the STable name.
+- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
- `value` is a metric which must be a numeric value, the corresponding column name is "value".
-- The last part is tag sets separated by space, all tags will be converted to nchar type automatically.
+- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically.
For example:
```txt
-meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3
+meters.current 1648432611250 11.3 location=California.LoSangeles groupid=3
```
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
@@ -60,7 +60,7 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te
-2 STables will be crated automatically while each STable has 4 rows of data in the above sample code.
+2 STables will be created automatically and each STable has 4 rows of data in the above sample code.
```cmd
taos> use test;
@@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
tbname | ts | value | groupid | location |
==================================================================================================================================
- t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian |
- t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian |
- t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang |
- t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang |
+ t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles |
+ t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles |
+ t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco |
+ t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
diff --git a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx
index d4f723dcdeb78c54ba31fd4f6aa2528a90376c5f..3a239440311c736159d6060db5e730c5e5665bcb 100644
--- a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -47,7 +47,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
:::note
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
-- Only data in array format is accepted, array must be used even there is only one row.
+- Only data in array format is accepted and so an array must be used even if there is only one row.
:::
@@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
ts | value | groupid | location |
===================================================================================================================
- 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang |
- 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang |
+ 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
+ 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md
index ee80d436f11f19b422df261845f1c209620251f2..1a71e719a56448e4b535632e570ce8a04d2282bb 100644
--- a/docs-en/07-develop/03-insert-data/index.md
+++ b/docs-en/07-develop/03-insert-data/index.md
@@ -1,12 +1,12 @@
---
-title: Insert
+title: Insert Data
---
-TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, OpenTSDB JSON protocol. Data can be inserted row by row, or in batch. Data from one or more collecting points can be inserted simultaneously. In the meantime, data can be inserted with multiple threads, out of order data and historical data can be inserted too. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create stable and table in advance if using schemaless protocols, and the schemas can be adjusted automatically according to the data to be inserted.
+TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
-```
\ No newline at end of file
+```
diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml
index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..809db34621a63505ceace7ba182e07c698bdbddb 100644
--- a/docs-en/07-develop/04-query-data/_category_.yml
+++ b/docs-en/07-develop/04-query-data/_category_.yml
@@ -1 +1 @@
-label: Select Data
+label: Query Data
diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx
index 4016f8453ba9e0679a2798b92cd40efcb926343b..a212fa9529215fc24c55c95a166cfc1a407359b2 100644
--- a/docs-en/07-develop/04-query-data/index.mdx
+++ b/docs-en/07-develop/04-query-data/index.mdx
@@ -1,6 +1,6 @@
---
-Sidebar_label: Select
-title: Select
+Sidebar_label: Query data
+title: Query data
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
---
@@ -20,7 +20,7 @@ import CAsync from "./_c_async.mdx";
## Introduction
-SQL is used by TDengine as the query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine CLI `taos` can also be used to execute SQL Ad-Hoc query. Here is the list of major query functionalities supported by TDengine:
+SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns:>, <, =, <\>, like
@@ -31,7 +31,7 @@ SQL is used by TDengine as the query language. Application programs can send SQL
- Join query with timestamp alignment
- Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff
-For example, below SQL statement can be executed in TDengine CLI `taos` to select the rows whose voltage column is bigger than 215 and limit the output to only 2 rows.
+For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows.
```sql
select * from d1001 where voltage > 215 order by ts desc limit 2;
@@ -46,46 +46,46 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
Query OK, 2 row(s) in set (0.001100s)
```
-To meet the requirements in many use cases, some special functions have been added in TDengine, for example `twa` (Time Weighted Average), `spared` (The difference between the maximum and the minimum), `last_row` (the last row), more and more functions will be added to better perform in many use cases. Furthermore, continuous query is also supported in TDengine.
+To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine.
For detailed query syntax please refer to [Select](/taos-sql/select).
## Aggregation among Tables
-In many use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviated for super table), is used in TDengine to represent a kind of data collection points, and a table is used to represent a specific data collection point. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same kind of data collection points, can be. Aggregate functions applicable for tables can be used directly on STables, syntax is exactly same.
+In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same.
-In summary, for a STable, its subtables can be aggregated by a simple query on STable, it's kind of join operation. But tables belong to different STables could not be aggregated.
+In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated.
### Example 1
-In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in BeiJing grouped by location.
+In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location.
```
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
- 222.000000000 | Beijing.Haidian |
- 219.200000000 | Beijing.Chaoyang |
+ 222.000000000 | California.LosAngeles |
+ 219.200000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.002136s)
```
### Example 2
-In TDengine CLI `taos`, use below SQL to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2.
+In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2.
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
- cunt(*) | max(current) |
+ count(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
-Join query is allowed between only the tables of same STable. In [Select](/taos-sql/select), all query operations are marked as whether it supports STable or not.
+Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not.
## Down Sampling and Interpolation
-In IoT use cases, down sampling is widely used to aggregate the data by time range. `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, below SQL statement can be used to get the sum of current every 10 seconds from meters table d1001.
+In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001.
```
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
@@ -96,10 +96,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
Query OK, 2 row(s) in set (0.000883s)
```
-Down sampling can also be used for STable. For example, below SQL statement can be used to get the sum of current from all meters in BeiJing.
+Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California.
```
-taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
+taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
@@ -110,7 +110,7 @@ taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s
Query OK, 5 row(s) in set (0.001538s)
```
-Down sampling also supports time offset. For example, below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds.
+Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds.
```
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
@@ -124,7 +124,7 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
Query OK, 5 row(s) in set (0.001521s)
```
-In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle by themselves in many systems. In TDengine, it's easy to achieve the alignment using down sampling.
+In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling.
Interpolation can be performed in TDengine if there is no data in a time range.
@@ -162,16 +162,16 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
:::note
-1. With either REST connection or native connection, the above sample code work well.
-2. Please be noted that `use db` can't be used in case of REST connection because it's stateless.
+1. With either REST connection or native connection, the above sample code works well.
+2. Please note that `use db` can't be used in case of REST connection because it's stateless.
:::
### Asynchronous Query
-Besides synchronous query, asynchronous query API is also provided by TDengine to insert or query data more efficiently. With similar hardware and software environment, async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other works to improve the performance of the whole application system. Async APIs perform especially better in case of poor network.
+Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks.
-Please be noted that async query can only be used with native connection.
+Please note that async query can only be used with a native connection.
diff --git a/docs-en/07-develop/05-continuous-query.mdx b/docs-en/07-develop/05-continuous-query.mdx
index 97e32a17ff325a9f67ac0a732be3dd72ccca8888..1aea5783fc8116a4e02a4b5345d341707cd399ea 100644
--- a/docs-en/07-develop/05-continuous-query.mdx
+++ b/docs-en/07-develop/05-continuous-query.mdx
@@ -1,18 +1,18 @@
---
sidebar_label: Continuous Query
-description: "Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing."
+description: "Continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing."
title: "Continuous Query"
---
-Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing. Continuous query can be performed on a table or STable in TDengine. The result of continuous query can be pushed to client or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively.
+A continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing. A continuous query can be performed on a table or STable in TDengine. The results of a continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively.
-Continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With continuous query, the result can be generated according to time window to achieve down sampling of original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to client or written to TDengine.
+A continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With a continuous query, the result can be generated based on a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine.
There are some differences between continuous query in TDengine and time window computation in stream computing:
- The computation is performed and the result is returned in real time in stream computing, but the computation in continuous query is only started when a time window closes. For example, if the time window is 1 day, then the result will only be generated at 23:59:59.
-- If a historical data row is written in to a time widow for which the computation has been finished, the computation will not be performed again and the result will not be pushed to client again either. If the result has been written into TDengine, there will be no update for the result.
-- In continuous query, if the result is pushed to client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server either. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous.
+- If a historical data row is written in to a time window for which the computation has already finished, the computation will not be performed again and the result will not be pushed to client applications again. If the results have already been written into TDengine, they will not be updated.
+- In continuous query, if the result is pushed to a client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous.
## Syntax
@@ -30,15 +30,15 @@ SLIDING: The time step for which the time window moves forward each time
## How to Use
-In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and sub tables have been created using below SQL statement.
+In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and subtables have been created using the SQL statements below.
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
-create table D1001 using meters tags ("Beijing.Chaoyang", 2);
-create table D1002 using meters tags ("Beijing.Haidian", 2);
+create table D1001 using meters tags ("California.SanFrancisco", 2);
+create table D1002 using meters tags ("California.LosAngeles", 2);
```
-The average voltage for each time window of one minute with 30 seconds as the length of moving forward can be retrieved using below SQL statement.
+The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds.
```sql
select avg(voltage) from meters interval(1m) sliding(30s);
@@ -50,13 +50,13 @@ Whenever the above SQL statement is executed, all the existing data will be comp
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
-Another easier way for same purpose is prepend `create table {tableName} as` before the `select`.
+An easier way to achieve this is to prepend `create table {tableName} as` before the `select`.
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
-A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minutes, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example:
+A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minute, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example:
```sql
taos> select * from avg_vol;
@@ -68,16 +68,16 @@ taos> select * from avg_vol;
2020-07-29 13:39:00.000 | 223.0800000 |
```
-Please be noted that the minimum allowed time window is 10 milliseconds, and no upper limit.
+Please note that the minimum allowed time window is 10 milliseconds, and there is no upper limit.
-Besides, it's allowed to specify the start and end time of continuous query. If the start time is not specified, the timestamp of the first original row will be considered as the start time; if the end time is not specified, the continuous will be performed infinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in below SQL statement will be started from now and terminated one hour later.
+It's possible to specify the start and end time of a continuous query. If the start time is not specified, the timestamp of the first row will be considered as the start time; if the end time is not specified, the continuous query will be performed indefinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in the SQL statement below will be started from now and terminated one hour later.
```sql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
-`now` in above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. Besides, to avoid the trouble caused by the delay of original data as much as possible, the actual computation in continuous query is also started with a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result can only be available a little time later, normally within one minute, after the time window closes.
+`now` in the above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. To avoid the trouble caused by a delay in receiving data as much as possible, the actual computation in a continuous query is started after a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result are available after a little time, normally within one minute, after the time window closes.
## How to Manage
-`show streams` command can be used in TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query.
+`show streams` command can be used in the TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query.
diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx
index 56f4ed83d8ebc6f21afbdd2eca2e01f11b313883..782fcdbaf221419dd231bd10958e26b8f4f856e5 100644
--- a/docs-en/07-develop/06-subscribe.mdx
+++ b/docs-en/07-develop/06-subscribe.mdx
@@ -1,6 +1,6 @@
---
-sidebar_label: Subscription
-description: "Lightweight service for data subscription and pushing, the time series data inserted into TDengine continuously can be pushed automatically to the subscribing clients."
+sidebar_label: Data Subscription
+description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients."
title: Data Subscription
---
@@ -16,9 +16,9 @@ import CDemo from "./_sub_c.mdx";
## Introduction
-According to the time series nature of the data, data inserting in TDengine is similar to data publishing in message queues, they both can be considered as a new data record with timestamp is inserted into the system. Data is stored in ascending order of timestamp inside TDengine, so essentially each table in TDengine can be considered as a message queue.
+Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue.
-Lightweight service for data subscription and pushing is built in TDengine. With the API provided by TDengine, client programs can used `select` statement to subscribe the data from one or more tables. The subscription and and state maintenance is performed on the client side, the client programs polls the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start for retrieving new data is up to the client side.
+A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side.
There are 3 major APIs related to subscription provided in the TDengine client driver.
@@ -28,11 +28,11 @@ taos_consume
taos_unsubscribe
```
-For more details about these API please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and sub tables please refer to the previous section "continuous query". Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c).
+For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c).
-If we want to get notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways:
+If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways:
-The first way is to query on each sub table and record the last timestamp matching the criteria, then after some time query on the data later than recorded timestamp and repeat this process. The SQL statements for this way are as below.
+The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below.
```sql
select * from D1001 where ts > {last_timestamp1} and current > 10;
@@ -40,7 +40,7 @@ select * from D1002 where ts > {last_timestamp2} and current > 10;
...
```
-The above way works, but the problem is that the number of `select` statements increases with the number of meters grows. Finally the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number.
+The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number.
A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below:
@@ -48,9 +48,9 @@ A better way is to query on the STable, only one `select` is enough regardless o
select * from meters where ts > {last_timestamp} and current > 10;
```
-However, how to choose `last_timestamp` becomes a new problem if using this way. Firstly, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Secondly, the time when the data from different meters may arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fasted" meters is used as `last_timestamp`, some data from other meters may be missed.
+However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed.
-All the problems mentioned above can be resolved thoroughly using subscription provided by TDengine.
+All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine.
The first step is to create subscription using `taos_subscribe`.
@@ -65,31 +65,33 @@ if (async) {
}
```
-The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing, `subscribe_callback` is a call back function provided by the client program and it's suggested not to do time consuming operation in the call back function.
+The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function.
-The parameter `taos` is an established connection. There is nothing special in sync subscription mode. In async subscription, it should be exclusively by current thread, otherwise unpredictable error may occur.
+The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors.
-The parameter `sql` is a `select` statement in which `where` clause can be used to specify filter conditions. In our example, the data whose current exceeds 10A needs to be subscribed like below SQL statement:
+The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement:
```sql
select * from meters where current > 10;
```
-Please be noted that, all the data will be processed because no start time is specified. If only the data from one day ago needs to be processed, a time related condition can be added:
+Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added:
```sql
select * from meters where ts > now - 1d and current > 10;
```
-The parameter `topic` is the name of the subscription, it needs to be guaranteed unique in the client program, but it's not necessary to be globally unique because subscription is implemented in the APIs on client side.
+The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side.
-If the subscription named as `topic` doesn't exist, parameter `restart` would be ignored. If the subscription named as `topic` has been created before by the client program which then exited, when the client program is restarted to use this `topic`, parameter `restart` is used to determine retrieving data from beginning or from the last point where the subscription was broken. If the value of `restart` is **true** (i.e. a non-zero value), the data will be retrieved from beginning, or if it is **false** (i.e. zero), the data already consumed before will not be processed again.
+If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken.
-The last parameter of `taos_subscribe` is the polling interval in unit of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` would be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function.
+If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again.
-The last second parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode.
+The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function.
-After a subscription is created, its data can be consumed and processed, below is the sample code of how to consume data in sync mode, in the else part if `if (async)`.
+The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode.
+
+After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`.
```c
if (async) {
@@ -106,7 +108,7 @@ if (async) {
}
```
-In the above sample code, there is an infinite loop, each time carriage return is entered `taos_consume` is invoked, the return value of `taos_consume` is the selected result set, exactly as the input of `taos_use_result`, in the above sample `print_result` is used instead to simplify the sample. Below is the implementation of `print_result`.
+In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`.
```c
void print_result(TAOS_RES* res, int blockFetch) {
@@ -133,9 +135,9 @@ void print_result(TAOS_RES* res, int blockFetch) {
}
```
-In the above code `taos_print_row` is used to process the data consumed. All the matching rows will be printed.
+In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed.
-In async mode, the data consuming is simpler as below.
+In async mode, consuming data is simpler as shown below.
```c
void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
@@ -149,22 +151,22 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
-The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value in when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with same name as `topic` for each subscription, the subscription will be restarted from beginning if the corresponding progress file is removed.
+The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed.
Now let's see the effect of the above sample code, assuming below prerequisites have been done.
- The sample code has been downloaded to local system
- TDengine has been installed and launched properly on same system
-- The database, STable, sub tables required in the sample code have been ready
+- The database, STable, and subtables required in the sample code are ready
-It's ready to launch below command in the directory where the sample code resides to compile and start the program.
+Launch the command below in the directory where the sample code resides to compile and start the program.
```bash
make
./subscribe -sql='select * from meters where current > 10;'
```
-After the program is started, open another terminal and launch TDengine CLI `taos`, then use below SQL commands to insert a row whose current is 12A into table **D1001**.
+After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**.
```sql
use test;
@@ -175,7 +177,7 @@ Then, this row of data will be shown by the example program on the first termina
## Examples
-Below example program demonstrates how to subscribe the data rows whose current exceeds 10A using connectors.
+The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A.
### Prepare Data
@@ -187,8 +189,8 @@ taos> use power;
# create super table "meters"
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
# create tabes using the schema defined by super table "meters"
-taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
-taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
+taos> create table d1001 using meters tags ("California.SanFrancisco", 2);
+taos> create table d1002 using meters tags ("California.LoSangeles", 2);
# insert some rows
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
@@ -196,11 +198,11 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08
taos> select * from meters where current > 10;
ts | current | voltage | phase | location | groupid |
===========================================================================================================
- 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
- 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
- 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
- 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
- 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
+ 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 |
+ 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 |
+ 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 |
+ 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 |
+ 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 |
Query OK, 5 row(s) in set (0.004896s)
```
@@ -232,14 +234,14 @@ Query OK, 5 row(s) in set (0.004896s)
### Run the Examples
-The example programs firstly consume all historical data matching the criteria.
+The example programs first consume all historical data matching the criteria.
```bash
-ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
-ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
-ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
-ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
-ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
+ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
+ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2
+ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
+ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2
+ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2
```
Next, use TDengine CLI to insert a new row.
@@ -250,8 +252,8 @@ taos> use power;
taos> insert into d1001 values(now, 12.4, 220, 1);
```
-Because the current in inserted row exceeds 10A, it will be consumed by the example program.
+Because the current in the inserted row exceeds 10A, it will be consumed by the example program.
```
-ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
+ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2
```
diff --git a/docs-en/07-develop/07-cache.md b/docs-en/07-develop/07-cache.md
index 13db6c363802abed290cfc4d4466d40e48852f3d..743452faff6a2be8466318a7dab61a44e33c3664 100644
--- a/docs-en/07-develop/07-cache.md
+++ b/docs-en/07-develop/07-cache.md
@@ -4,16 +4,16 @@ title: Cache
description: "The latest row of each table is kept in cache to provide high performance query of latest state."
---
-The cache management policy in TDengine is First-In-First-Out (FIFO), which is also known as insert driven cache management policy and different from read driven cache management, i.e. Least-Recent-Used (LRU). It simply stores the latest data in cache and flushes the oldest data in cache to disk when the cache usage reaches a threshold. In IoT use cases, the most cared about data is the latest data, i.e. current state. The cache policy in TDengine is based the nature of IoT data.
+The cache management policy in TDengine is First-In-First-Out (FIFO). FIFO is also known as insert driven cache management policy and it is different from read driven cache management, which is more commonly known as Least-Recently-Used (LRU). FIFO simply stores the latest data in cache and flushes the oldest data in cache to disk, when the cache usage reaches a threshold. In IoT use cases, it is the current state i.e. the latest or most recent data that is important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
-Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as caching system without deploying another separate caching system to simplify the system architecture and minimize the operation cost. The cache will be emptied after TDengine is restarted, TDengine doesn't reload data from disk into cache like a real key-value caching system.
+Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as a caching system without deploying another separate caching system. This simplifies the system architecture and minimizes operational costs. The cache is emptied after TDengine is restarted. TDengine does not reload data from disk into cache, like a key-value caching system.
-The memory space used by TDengine cache is fixed in size, according to the configuration based on application requirement and system resources. Independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine, there is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode.
+The memory space used by the TDengine cache is fixed in size and configurable. It should be allocated based on application requirements and system resources. An independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine. There is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode.
-Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. It's better to set the size of each block to hold at least tends of rows.
+The memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache` and the number of blocks for each vnode is determined by the parameter `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records, to be efficient.
-`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example below SQL statement retrieves the latest voltage of all meters in Chaoyang district of Beijing.
+`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco, California.
```sql
-select last_row(voltage) from meters where location='Beijing.Chaoyang';
+select last_row(voltage) from meters where location='California.SanFrancisco';
```
diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md
index 61639e34404477d3bb5785da129a1d922a4d020e..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 100644
--- a/docs-en/07-develop/08-udf.md
+++ b/docs-en/07-develop/08-udf.md
@@ -1,24 +1,31 @@
---
sidebar_label: UDF
-title: User Defined Functions
-description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand the query capability"
+title: User Defined Functions(UDF)
+description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability"
---
-In some use cases, the query capability required by application programs can't be achieved directly by builtin functions. With UDF, the functions developed by users can be utilized by query framework to meet some special requirements. UDF normally takes one column of data as input, but can also support the result of sub query as input.
+In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input.
-From version 2.2.0.0, UDF programmed in C/C++ language can be supported by TDengine.
+From version 2.2.0.0, UDF written in C/C++ are supported by TDengine.
-Two kinds of functions can be implemented by UDF: scalar function and aggregate function.
-## Define UDF
+## Types of UDF
+
+Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions.
+
+Scalar functions return multiple rows and aggregate functions return either 0 or 1 row.
+
+In the case of a scalar function you only have to implement the "normal" function template.
+
+In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below.
### Scalar Function
-Below function template can be used to define your own scalar function.
+As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function.
`void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)`
-`udfNormalFunc` is the place holder of function name, a function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine.
+`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine.
- Definitions of the parameters:
@@ -30,20 +37,24 @@ Below function template can be used to define your own scalar function.
- numOfRows:the number of rows in the input data
- ts: the column of timestamp corresponding to the input data
- dataOutput:the buffer for output data, total size is `oBytes * numberOfRows`
- - interBuf:the buffer for intermediate result, its size is specified by `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result, it's allocated and freed by TDengine.
+ - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine.
- tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL
- numOfOutput:the number of rows in output data
- buf:for the state exchange between UDF and TDengine
- [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of the simplest UDF implementations, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a column passed in which can be filtered using `where` clause and outputs the result.
+ [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result.
### Aggregate Function
-Below function template can be used to define your own aggregate function.
+For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates.
-`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`
+#### Merge Function Template
-`udfMergeFunc` is the place holder of function name, the function implemented with the above template is used to aggregate the intermediate result, only can be used in the aggregate query for STable.
+The function template below can be used to define your own merge function for an aggregate UDF.
+
+`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`
+
+`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable.
Definitions of the parameters:
@@ -53,17 +64,11 @@ Definitions of the parameters:
- numOfOutput:number of rows in the output data
- buf:for the state exchange between UDF and TDengine
-[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an user defined aggregate function to get the maximum from the absolute value of a column.
-
-The internal processing is that the data affected by the select statement will be divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate of each sub table, then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate to generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc` to generate the final result, which contain either 0 or 1 row.
-
-Other typical scenarios, like covariance, can also be achieved by aggregate UDF.
+#### Finalize Function Template
-### Finalize
+The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used.
-Below function template can be used to finalize the result of your own UDF, normally used when interBuf is used.
-
-`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`
+`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`
`udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below:
@@ -72,47 +77,64 @@ Below function template can be used to finalize the result of your own UDF, norm
- numOfOutput:number of output data, can only be 0 or 1 for aggregate function
- buf:for state exchange between UDF and TDengine
-## UDF Conventions
+### Example abs_max.c
+
+[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column.
+
+The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row.
+
+Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF.
-The naming of 3 kinds of UDF, i.e. udfNormalFunc, udfMergeFunc, and udfFinalizeFunc is required to have same prefix, i.e. the actual name of udfNormalFunc, which means udfNormalFunc doesn't need a suffix following the function name. While udfMergeFunc should be udfNormalFunc followed by `_merge`, udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. The naming convention is part of UDF framework, TDengine follows this convention to invoke corresponding actual functions.\
+## UDF Naming Conventions
-According to the kind of UDF to implement, the functions that need to be implemented are different.
+The naming convention for the 3 kinds of function templates required by UDF is as follows:
+ - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name.
+ - udfMergeFunc should be udfNormalFunc followed by `_merge`
+ - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`.
+
+The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions.
-- Scalar function:udfNormalFunc is required
-- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required
+Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different.
-To be more accurate, assuming we want to implement a UDF named "foo". If the function is a scalar function, what we really need to implement is `foo`; if the function is aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. For aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation.
+- Scalar function:udfNormalFunc is required.
+- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required.
+
+For clarity, assuming we want to implement a UDF named "foo":
+- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`.
+- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation.
## Compile UDF
-The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library.
+The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL).
-For example, the example UDF `add_one.c` mentioned in previous sections need to be compiled into DLL using below command on Linux Shell.
+For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell.
```bash
gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
```
-The generated DLL file `dd_one.so` can be used later when creating UDF. It's recommended to use GCC not older than 7.5.
+The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5.
## Create and Use UDF
+When a UDF is created in a TDengine instance, it is available across the databases in that instance.
+
### Create UDF
-SQL command can be executed on the same hos where the generated UDF DLL resides to load the UDF DLL into TDengine, this operation can't be done through REST interface or web console. Once created, all the clients of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
+SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
-When creating UDF, it needs to be clarified as either scalar function or aggregate function. If the specified type is wrong, the SQL statements using the function would fail with error. Besides, the input type and output type don't need to be same in UDF, but the input data type and output data type need to be consistent with the UDF definition.
+When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition.
- Create Scalar Function
```sql
-CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ];
+CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B];
```
-- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc`
-- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes
-- typename(Z):the output data type, the value is the literal string of the type
-- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512]
+- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file).
+- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
+- outputtype:The output data type, the value is the literal string of the supported TDengine data type.
+- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512].
For example, below SQL statement can be used to create a UDF from `add_one.so`.
@@ -123,17 +145,17 @@ CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT;
- Create Aggregate Function
```sql
-CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ];
+CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ];
```
-- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc`
-- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes
-- typename(Z):the output data type, the value is the literal string of the type
+- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file).
+- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes.
+- OUTPUTTYPE:the output data type, the value is the literal string of the type
- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512]
For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c).
-For example, below SQL statement can be used to create a UDF rom `demo.so`.
+For example, below SQL statement can be used to create a UDF from `demo.so`.
```sql
CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14;
@@ -176,11 +198,11 @@ In current version there are some restrictions for UDF
1. Only Linux is supported when creating and invoking UDF for both client side and server side
2. UDF can't be mixed with builtin functions
3. Only one UDF can be used in a SQL statement
-4. Single column is supported as input for UDF
+4. Only a single column is supported as input for UDF
5. Once created successfully, UDF is persisted in MNode of TDengineUDF
6. UDF can't be created through REST interface
7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc`
-8. The name name of UDF name should not conflict with any of builtin functions
+8. The name of a UDF should not conflict with any of TDengine's built-in functions
## Examples
diff --git a/docs-en/07-develop/index.md b/docs-en/07-develop/index.md
index 122dd0d870ac42b62c4f9e694cf79eec3ca122a5..e3f55f290753f79ac1708337082ce90bb050b21f 100644
--- a/docs-en/07-develop/index.md
+++ b/docs-en/07-develop/index.md
@@ -2,15 +2,15 @@
title: Developer Guide
---
-To develop an application using TDengine to process time-series data, we recommend taking the following steps:
+To develop an application to process time-series data using TDengine, we recommend taking the following steps:
-1. Choose the way for connection to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language.
-2. Design the data model based on your own application scenarios. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" concept; learn about static labels, collected metrics, and subtables. According to the data characteristics, you may decide to create one or more databases, and you should design the STable schema to fit your data.
-3. Decide how to insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
-4. Based on business requirements, find out what SQL query statements need to be written.
+1. Choose the method to connect to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language.
+2. Design the data model based on your own use cases. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" (STable) concept; learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you may decide to create one or more databases, and you should design the STable schema to fit your data.
+3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
+4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL.
5. If you want to run real-time analysis based on time series data, including various dashboards, it is recommended that you use the TDengine continuous query feature instead of deploying complex streaming processing systems such as Spark or Flink.
6. If your application has modules that need to consume inserted data, and they need to be notified when new data is inserted, it is recommended that you use the data subscription function provided by TDengine without the need to deploy Kafka.
-7. In many scenarios (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
+7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem.
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/).
diff --git a/docs-en/10-cluster/01-deploy.md b/docs-en/10-cluster/01-deploy.md
index 8c921797ec038fb8afbf382a980b8f7a197fa898..200da1be3f8185818bd21dd3fcdc78c124a36831 100644
--- a/docs-en/10-cluster/01-deploy.md
+++ b/docs-en/10-cluster/01-deploy.md
@@ -6,29 +6,35 @@ title: Deployment
### Step 1
-The FQDN of all hosts need to be setup properly, all the FQDNs need to be configured in the /etc/hosts of each host. It must be guaranteed that each FQDN can be accessed (by ping, for example) from any other hosts.
+The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command.
-On each host command `hostname -f` can be executed to get the hostname. `ping` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, need to be checked and revised to make any two hosts accessible to each other.
+To get the hostname on any host, the command `hostname -f` can be executed. `ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other.
:::note
-- The host where the client program runs also needs to configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster.
+- The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster.
-- It's suggested to disable the firewall for all hosts in the cluster. At least TCP/UDP for port 6030~6042 need to be open if firewall is enabled.
+- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster.
:::
### Step 2
-If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
+If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
+
+:::note
+
+As a best practice, before cleaning up any data files or directories, please ensure that your data has been backed up correctly, if required by your data integrity, backup, security, or other standard operating protocols (SOP).
+
+:::
### Step 3
-Now it's time to install TDengine on all hosts without starting `taosd`, the versions on all hosts should be same. If it's prompted to input the existing TDengine cluster, simply press carriage return to ignore it. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install).
+Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install).
### Step 4
-Now each physical node (referred to as `dnode` hereinafter, it's abbreviation for "data node") of TDengine need to be configured properly. Please be noted that one dnode doesn't stand for one host, multiple TDengine nodes can be started on single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
+Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host. Multiple TDengine dnodes can be started on a single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
```c
// firstEp is the end point to connect to when any dnode starts
@@ -44,9 +50,9 @@ serverPort 6030
#arbitrator ha.taosdata.com:6042
```
-`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please also make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting.
+`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting.
-For all the dnodes in a TDengine cluster, below parameters must be configured as exactly same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
+For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
| **#** | **Parameter** | **Definition** |
| ----- | ------------------ | --------------------------------------------------------------------------------- |
@@ -61,15 +67,17 @@ For all the dnodes in a TDengine cluster, below parameters must be configured as
| 9 | maxVgroupsPerDb | Maximum number vgroups that can be used by each DB |
:::note
-Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must be configured as same too for each dnode.
+Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must also be configured the same for each dnode.
:::
## Start Cluster
+In the following example we assume that first dnode has FQDN h1.taosdata.com and the second dnode has FQDN h2.taosdata.com.
+
### Start The First DNODE
-The first dnode can be started following the instructions in [Get Started](/get-started/), for example h1.taosdata.com. Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
+The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
@@ -80,27 +88,41 @@ Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time |
=====================================================================================
- 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
+ 1 | h1.taosdata.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
Query OK, 1 row(s) in set (0.006385s)
taos>
```
-From the above output, it is shown that the end point of the started dnode is "h1.taos.com:6030", which is the `firstEp` of the cluster.
+From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
### Start Other DNODEs
There are a few steps necessary to add other dnodes in the cluster.
-Firstly, start `taosd` as instructed in [Get Started](/get-started/), assuming it's for the second dnode. Before starting `taosd`, please making sure the configuration is correct, especially `firstEp`, `FQDN` and `serverPort`, `firstEp` must be same as the dnode shown in the section "Start First DNODE", i.e. "h1.taosdata.com" in this example.
+Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. First we make sure the configuration is correct.
+
+```c
+// firstEp is the end point to connect to when any dnode starts
+firstEp h1.taosdata.com:6030
+
+// must be configured to the FQDN of the host where the dnode is launched
+fqdn h2.taosdata.com
+
+// the port used by the dnode, default is 6030
+serverPort 6030
+
+```
+
+Second, we can start `taosd` as instructed in [Get Started](/get-started/).
-Then, on the first dnode, use TDengine CLI `taos` to execute below command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes.
+Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes.
```sql
CREATE DNODE "h2.taos.com:6030";
```
-Then on the first dnode, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not.
+Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not.
```sql
SHOW DNODES;
@@ -109,6 +131,6 @@ SHOW DNODES;
If the status of the newly added dnode is offline, please check:
- Whether the `taosd` process is running properly or not
-- In the log file `taosdlog.0` to see whether the fqdn and port are correct or not
+- In the log file `taosdlog.0` to see whether the fqdn and port are correct
The above process can be repeated to add more dnodes in the cluster.
diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md
index 3fcd68b29ce08519af9a0cde11d5361c6b4cd312..674c92e2766a4eb304079140af19c8efea72d55e 100644
--- a/docs-en/10-cluster/02-cluster-mgmt.md
+++ b/docs-en/10-cluster/02-cluster-mgmt.md
@@ -3,16 +3,16 @@ sidebar_label: Operation
title: Manage DNODEs
---
-It has been introduced that how to deploy and start a cluster from scratch. Once a cluster is ready, the dnode status in the cluster can be shown at any time, new dnode can be added to scale out the cluster, an existing dnode can be removed, even load balance can be performed manually.\
+The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary.
:::note
-All the commands to be introduced in this chapter need to be run through TDengine CLI, sometimes it's necessary to use root privilege.
+All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege.
:::
## Show DNODEs
-below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes, etc. It's suggested to execute this command to check after adding or removing a dnode.
+The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode.
```sql
SHOW DNODES;
@@ -30,7 +30,7 @@ Query OK, 1 row(s) in set (0.008298s)
## Show VGROUPs
-To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located in different dnodes, scaling out can be achieved by adding more vnodes from more dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode according to system resources of the dnodes.
+To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes.
Launch TDengine CLI `taos` and execute below command:
@@ -39,7 +39,7 @@ USE SOME_DATABASE;
SHOW VGROUPS;
```
-The example output is as below:
+The example output is below:
```
taos> show dnodes;
@@ -87,7 +87,7 @@ taos> show dnodes;
Query OK, 2 row(s) in set (0.001017s)
```
-It can be seen that the status of the new dnode is "offline", once the dnode is started and connects the firstEp of the cluster, execute the command again and get below example output, from which it can be seen that two dnodes are both in "ready" status.
+It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status.
```
taos> show dnodes;
@@ -100,7 +100,7 @@ Query OK, 2 row(s) in set (0.001316s)
## Drop DNODE
-Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, `dnodeId` can be gotten from `show dnodes`.
+Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`.
```sql
DROP DNODE "fqdn:port";
@@ -112,7 +112,7 @@ or
DROP DNODE dnodeId;
```
-The example output is as below:
+The example output is below:
```
taos> show dnodes;
@@ -132,14 +132,14 @@ taos> show dnodes;
Query OK, 1 row(s) in set (0.001137s)
```
-In the above example, when `show dnodes` is executed the first time, two dnodes are shown. Then `drop dnode 2` is executed, after that from the output of executing `show dnodes` again it can be seen that only the dnode with ID 1 is still in the cluster.
+In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster.
:::note
-- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Normally, before dropping a dnode, the data belonging to the dnode needs to be migrated to other place.
-- Please be noted that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped.
+- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs.
+- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped.
- Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode.
-- dnodeID is allocated automatically and can't be interfered manually. dnodeID is generated in ascending order without duplication.
+- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication.
:::
@@ -155,7 +155,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:";
In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `.
-Firstly `show vgroups` is executed to show the vgroup distribution.
+First `show vgroups` is executed to show the vgroup distribution.
```
taos> show vgroups;
@@ -172,7 +172,7 @@ taos> show vgroups;
Query OK, 8 row(s) in set (0.001314s)
```
-It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute below command in `taos`
+It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos`
```
taos> alter dnode 3 balance "vnode:18-dnode:1";
@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno
:::note
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
-- Only vnode in normal state, i.e. master or slave, can be moved. vnode can't moved when its in status offline, unsynced or syncing.
+- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
:::
diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md
index 53c95be9e995a728b2b4053e4f204df58271716e..bd718eef9f8dc181628132de831dbca2af59d158 100644
--- a/docs-en/10-cluster/03-ha-and-lb.md
+++ b/docs-en/10-cluster/03-ha-and-lb.md
@@ -7,44 +7,45 @@ title: High Availability and Load Balancing
High availability of vnode and mnode can be achieved through replicas in TDengine.
-The number of vnodes is associated with each DB, there can be multiple DBs in a TDengine cluster. For the purpose of operation, different number of replicas can be configured properly for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas, the default value is 1. With single replica, the high availability of the system can't be guaranteed. Whenever one node is down, data service would be unavailable. The number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation would fail with error "more dnodes are needed". Below SQL statement is used to create a database named as "demo" with 3 replicas.
+A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas.
```sql
CREATE DATABASE demo replica 3;
```
-The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each group is determined by the number of replicas set for the DB. The vnodes in each vgroups store exactly same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in online state, the vgroup is able to serve data access. Otherwise the vgroup can't handle any data access for reading or inserting data.
+The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data.
-There may be data for multiple DBs in a dnode. Once a dnode is down, multiple DBs may be affected. However, it's hard to say the cluster is guaranteed to work properly as long as over half of dnodes are online because vnodes are introduced and there may be complex mapping between vnodes and dnodes.
+There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online.
## High Availability of Mnode
-Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`, the valid time range is [1,3]. To make sure the data consistency between mnodes, the data replication between mnodes is performed in synchronous way.
+Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously.
-There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. Command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster.
+There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster.
```sql
SHOW MNODES;
```
-The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode, because there must be at least one mnode otherwise the cluster doesn't work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
+The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
:::note
-If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. How to configure for them are different and have been described.
+If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas.
:::
-## Load Balance
+## Load Balancing
-Load balance will be triggered in 3 cades without manual intervention.
+Load balancing will be triggered in 3 cases without manual intervention.
-- When a new dnode is joined in the cluster, automatic load balancing may be triggered, some data from some dnodes may be transferred to the new dnode automatically.
+- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically.
- When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically.
- When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes.
-- :::tip
- Automatic load balancing is controlled by parameter `balance`, 0 means disabled and 1 means enabled.
+
+:::tip
+Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance).
:::
@@ -52,26 +53,26 @@ Load balance will be triggered in 3 cades without manual intervention.
When a dnode is offline, it can be detected by the TDengine cluster. There are two cases:
-- The dnode becomes online again before the threshold configured in `offlineThreshold` is reached, it is still in the cluster and data replication is started automatically. The dnode can work properly after the data syncup is finished.
+- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished.
-- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. System alert will be generated and automatic load balancing will be triggered too if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not be joined in the cluster automatically, it can only be joined manually by the system operator.
+- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
:::note
-If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted after all the vnodes or mnodes in the group become online and can exchange status, then the vgroup (or mnode group) is able to provide service.
+If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
:::
## Arbitrator
-If the number of replicas is set to an even number like 2, when half of the vnodes in a vgroup don't work master node can't be voted. Similar case is also applicable to mnode if the number of mnodes is set to an even number like 2.
+The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
-To resolve this problem, a new arbitrator component named `tarbitrator`, abbreviated for TDengine Arbitrator, was introduced. Arbitrator simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. With Arbitrator, any vgroup or mnode group can be considered as having number of member nodes and master node can be selected.
+To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
-Normally, it's suggested to configure replica number of each DB or system parameter `numOfMNodes` to an odd number. However, if a user is very sensitive to storage space, replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability.
+Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability.
Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service.
-In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number.
+In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number.
Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb".
diff --git a/docs-en/10-cluster/index.md b/docs-en/10-cluster/index.md
index a19a54e01d5a6429e95958c2544072961b0cb66a..5a45a2ce7b08c67322265cf1bbd54ef66cbfc027 100644
--- a/docs-en/10-cluster/index.md
+++ b/docs-en/10-cluster/index.md
@@ -3,7 +3,7 @@ title: Cluster
keywords: ["cluster", "high availability", "load balance", "scale out"]
---
-TDengine has a native distributed design and provides the ability to scale out. A few of nodes can form a TDengine cluster. If you need to get higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
+TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
This chapter mainly introduces cluster deployment, maintenance, and how to achieve high availability and load balancing.
diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md
index 931e3bbac7f0601a9de79d0dfa04ffc94ecced96..d038219c8ac66db52416001f7a79c71018e2ca33 100644
--- a/docs-en/12-taos-sql/01-data-type.md
+++ b/docs-en/12-taos-sql/01-data-type.md
@@ -1,49 +1,69 @@
---
title: Data Types
-description: "The data types supported by TDengine include timestamp, float, JSON, etc"
+description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
---
-When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows or querying data, timestamp must follow below rules:
+## TIMESTAMP
-- the format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
-- internal function `now` can be used to get the current timestamp of the client side
-- the current timestamp of the client side is applied when `now` is used to insert data
+When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
+
+- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
+- Internal function `now` can be used to get the current timestamp on the client side
+- The current timestamp of the client side is applied when `now` is used to insert data
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT)
-- timestamp can be applied with add/subtract operation, for example `now-2h` means 2 hours back from the time at which query is executed,the unit can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), w(week.。 So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operation.
+- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
-Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`, like below, the default time precision is millisecond.
+Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
-In TDengine, below data types can be used when specifying a column or tag.
+## Data Types
+
+In TDengine, the data types below can be used when specifying a column or tag.
| # | **type** | **Bytes** | **Description** |
| --- | :-------: | --------- | ------------------------- |
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
-| 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL |
-| 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL |
-| 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
-| 5 | DOUBLE | 8 | double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
-| 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` |
-| 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL |
-| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL |
-| 9 | BOOL | 1 | Bool, the value range is {true, false} |
-| 10 | NCHAR | User Defined| Multiple-Byte string that can include like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. Error will be reported the string value exceeds the length defined. |
-| 11 | JSON | | json type can only be used on tag, a tag of json type is excluded with any other tags of any other type |
-
-:::tip
-TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
-
-:::
+| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
+| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] |
+| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
+| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] |
+| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
+| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
+| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` |
+| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
+| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] |
+| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
+| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] |
+| 13 | BOOL | 1 | Bool, the value range is {true, false} |
+| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
+| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
+| 16 | VARCHAR | User Defined| Alias of BINARY type |
:::note
-Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multiple-byte characters must be stored in NCHAR type.
+- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
+- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
+- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
:::
+## Constants
+TDengine supports constants of multiple data type.
+
+| # | **Syntax** | **Type** | **Description** |
+| --- | :-------: | --------- | -------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. |
+| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. |
+| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. |
+| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. |
+| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. |
+| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.|
+
:::note
-Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
+- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type.
:::
diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md
index 85b71bbde727ea1ff84080d3770e641d59b88c7b..80581b2f1bc7ce9cd046c18873d3f22b6804d8cf 100644
--- a/docs-en/12-taos-sql/02-database.md
+++ b/docs-en/12-taos-sql/02-database.md
@@ -4,7 +4,7 @@ title: Database
description: "create and drop database, show or change database parameters"
---
-## Create Datable
+## Create Database
```
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
@@ -12,11 +12,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
:::info
-1. KEEP specifies the number of days for which the data in the database to be created will be kept, the default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold.
+1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold.
2. UPDATE specifies whether the data can be updated and how the data can be updated.
- 1. UPDATE set to 0 means update operation is not allowed, the data with an existing timestamp will be dropped silently.
- 2. UPDATE set to 1 means the whole row will be updated, the columns for which no value is specified will be set to NULL
- 3. UPDATE set to 2 means updating a part of columns for a row is allowed, the columns for which no value is specified will be kept as no change
+ 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is.
+ 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL.
+ 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged.
3. The maximum length of database name is 33 bytes.
4. The maximum length of a SQL statement is 65,480 bytes.
5. Below are the parameters that can be used when creating a database
@@ -35,7 +35,7 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb)
- comp: [Description](/reference/config/#comp)
- precision: [Description](/reference/config/#precision)
-6. Please be noted that all of the parameters mentioned in this section can be configured in configuration file `taosd.cfg` at server side and used by default, can be override if they are specified in `create database` statement.
+6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement.
:::
@@ -52,7 +52,7 @@ USE db_name;
```
:::note
-This way is not applicable when using a REST connection
+This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters"
:::
@@ -63,13 +63,13 @@ DROP DATABASE [IF EXISTS] db_name;
```
:::note
-All data in the database will be deleted too. This command must be used with caution.
+All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command.
:::
## Change Database Configuration
-Some examples are shown below to demonstrate how to change the configuration of a database. Please be noted that some configuration parameters can be changed after the database is created, but some others can't, for details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/).
+Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/).
```
ALTER DATABASE db_name COMP 2;
@@ -81,7 +81,7 @@ COMP parameter specifies whether the data is compressed and how the data is comp
ALTER DATABASE db_name REPLICA 2;
```
-REPLICA parameter specifies the number of replications of the database.
+REPLICA parameter specifies the number of replicas of the database.
```
ALTER DATABASE db_name KEEP 365;
@@ -124,4 +124,4 @@ SHOW DATABASES;
SHOW CREATE DATABASE db_name;
```
-This command is useful when migrating the data from one TDengine cluster to another one. Firstly this command can be used to get the CREATE statement, which in turn can be used in another TDengine to create an exactly same database.
+This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database.
diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md
index a1524f45f98e8435425a9a937b7f6dc4431b6e06..f065a8e2396583bb7a512446b513ed60056ad55e 100644
--- a/docs-en/12-taos-sql/03-table.md
+++ b/docs-en/12-taos-sql/03-table.md
@@ -12,12 +12,12 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam
:::info
-1. The first column of a table must be in TIMESTAMP type, and it will be set as primary key automatically
-2. The maximum length of table name is 192 bytes.
-3. The maximum length of each row is 16k bytes, please be notes that the extra 2 bytes used by each BINARY/NCHAR column are also counted in.
-4. The name of sub-table can only be consisted of English characters, digits and underscore, and can't be started with digit. Table names are case insensitive.
-5. The maximum length in bytes must be specified when using BINARY or NCHAR type.
-6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character.
+1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
+2. The maximum length of the table name is 192 bytes.
+3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
+4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
+5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
+6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character.
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
:::
@@ -28,9 +28,9 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
```
-The above command creates a subtable using the specified super table as template and the specified tab values.
+The above command creates a subtable using the specified super table as a template and the specified tag values.
-### Create Subtable Using STable As Template With A Part of Tags
+### Create Subtable Using STable As Template With A Subset of Tags
```
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
@@ -44,11 +44,11 @@ The tags for which no value is specified will be set to NULL.
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
-This way can be used to create a lot of tables in a single SQL statement to accelerate the speed of the creating tables.
+This can be used to create a lot of tables in a single SQL statement while making table creation much faster.
:::info
-- Creating tables in batch must use super table as template.
+- Creating tables in batch must use a super table as a template.
- The length of single statement is suggested to be between 1,000 and 3,000 bytes for best performance.
:::
@@ -71,7 +71,7 @@ SHOW TABLES [LIKE tb_name_wildcard];
SHOW CREATE TABLE tb_name;
```
-This way is useful when migrating the data in one TDengine cluster to another one because it can be used to create exactly same tables in the target database.
+This is useful when migrating the data in one TDengine cluster to another one because it can be used to create the exact same tables in the target database.
## Show Table Definition
@@ -90,7 +90,7 @@ ALTER TABLE tb_name ADD COLUMN field_name data_type;
:::info
1. The maximum number of columns is 4096, the minimum number of columns is 2.
-2. The maximum length of column name is 64 bytes.
+2. The maximum length of a column name is 64 bytes.
:::
@@ -101,7 +101,7 @@ ALTER TABLE tb_name DROP COLUMN field_name;
```
:::note
-If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table.
+If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table.
:::
@@ -111,10 +111,10 @@ If a table is created using a super table as template, the table definition can
ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length);
```
-The the type of a column is variable length, like BINARY or NCHAR, this way can be used to change (or increase) the length of the column.
+If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column.
:::note
-If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table.
+If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table.
:::
diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md
index b7817f90287a6415bee020fb5adc8e6239cc6da4..b8a608792ab327a81129d29ddd0ff44d7af6e6c5 100644
--- a/docs-en/12-taos-sql/04-stable.md
+++ b/docs-en/12-taos-sql/04-stable.md
@@ -9,20 +9,20 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15
:::
-## Crate STable
+## Create STable
```
CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
```
-The SQL statement of creating STable is similar to that of creating table, but a special column named as `TAGS` must be specified with the names and types of the tags.
+The SQL statement of creating a STable is similar to that of creating a table, but a special column set named `TAGS` must be specified with the names and types of the tags.
:::info
-1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it.
-2. The tag names specified in TAGS should NOT be same as other columns.
-3. The tag names specified in TAGS should NOT be same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/)
-4. The maximum number of tags specified in TAGS is 128, but there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB.
+1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp.
+2. The tag names specified in TAGS should NOT be the same as other columns.
+3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/)
+4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB.
:::
@@ -32,7 +32,7 @@ The SQL statement of creating STable is similar to that of creating table, but a
DROP STable [IF EXISTS] stb_name;
```
-All the sub-tables created using the deleted STable will be deleted automatically.
+All the subtables created using the deleted STable will be deleted automatically.
## Show All STables
@@ -40,7 +40,7 @@ All the sub-tables created using the deleted STable will be deleted automaticall
SHOW STableS [LIKE tb_name_wildcard];
```
-This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, number of tables created using this STable.
+This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, and number of tables created using this STable.
## Show The Create Statement of A STable
@@ -48,7 +48,7 @@ This command can be used to display the information of all STables in the curren
SHOW CREATE STable stb_name;
```
-This command is useful in migrating data from one TDengine cluster to another one because it can be used to create an exactly same STable in the target database.
+This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same STable in the target database.
## Get STable Definition
@@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name;
ALTER STable stb_name MODIFY COLUMN field_name data_type(length);
```
-This command can be used to change (or increase, more specifically) the length of a column of variable length types, like BINARY or NCHAR.
+This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR.
## Change Tags of A STable
@@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type.
ALTER STable stb_name DROP TAG tag_name;
```
-The tag will be removed automatically from all the sub tables crated using the super table as template once a tag is removed from a super table.
+The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table.
### Change A Tag
@@ -102,7 +102,7 @@ The tag will be removed automatically from all the sub tables crated using the s
ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name;
```
-The tag name will be changed automatically from all the sub tables crated using the super table as template once a tag name is changed for a super table.
+The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table.
### Change Tag Length
@@ -110,9 +110,9 @@ The tag name will be changed automatically from all the sub tables crated using
ALTER STable stb_name MODIFY TAG tag_name data_type(length);
```
-This command can be used to change (or increase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR.
+This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR.
:::note
-Changing tag value can be applied to only sub tables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its sub tables.
+Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables.
:::
diff --git a/docs-en/12-taos-sql/05-insert.md b/docs-en/12-taos-sql/05-insert.md
index 96e6a08ee17e0c72b15a35efc487a78ae4673017..1336cd7238a19190583ea9d268a64df242ffd3c9 100644
--- a/docs-en/12-taos-sql/05-insert.md
+++ b/docs-en/12-taos-sql/05-insert.md
@@ -19,15 +19,15 @@ INSERT INTO
## Insert Single or Multiple Rows
-Single row or multiple rows specified with VALUES can be inserted into a specific table. For example
+Single row or multiple rows specified with VALUES can be inserted into a specific table. For example:
-Single row is inserted using below statement.
+A single row is inserted using the below statement.
```sq;
INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32);
```
-Double rows can be inserted using below statement.
+Double rows are inserted using the below statement.
```sql
INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
@@ -36,7 +36,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (162616420
:::note
1. In the second example above, different formats are used in the two rows to be inserted. In the first row, the timestamp format is a date and time string, which is interpreted from the string value only. In the second row, the timestamp format is a long integer, which will be interpreted based on the database time precision.
-2. When trying to insert multiple rows in single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed.
+2. When trying to insert multiple rows in a single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed.
3. The oldest timestamp that is allowed is subtracting the KEEP parameter from current time.
4. The newest timestamp that is allowed is adding the DAYS parameter to current time.
@@ -51,13 +51,13 @@ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27,
```
:::info
-If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a part of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided.
+If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a subset of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided.
:::
## Insert Into Multiple Tables
-One or multiple rows can be inserted into multiple tables in single SQL statement, with or without specifying specific columns.
+One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns.
```sql
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
@@ -66,40 +66,40 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-
## Automatically Create Table When Inserting
-If it's not sure whether the table already exists, the table can be created automatically while inserting using below SQL statement. To use this functionality, a STable must be used as template and tag values must be provided.
+If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided.
```sql
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
```
-It's not necessary to provide values for all tag when creating tables automatically, the tags without values provided will be set to NULL.
+It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL.
```sql
INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
```
-Multiple rows can also be inserted into same table in single SQL statement using this way.
+Multiple rows can also be inserted into the same table in a single SQL statement.
```sql
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
:::info
-Prior to version 2.0.20.5, when using `INSERT` to create table automatically and specify the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In same SQL statement, however, these two ways of specifying column names can't be mixed.
+Prior to version 2.0.20.5, when using `INSERT` to create tables automatically and specifying the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In the same SQL statement, however, these two ways of specifying column names can't be mixed.
:::
## Insert Rows From A File
-Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains below data:
+Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data:
```
'2021-07-13 14:07:34.630', '10.2', '219', '0.32'
'2021-07-13 14:07:35.779', '10.15', '217', '0.33'
```
-Then data in this file can be inserted by below SQL statement:
+Then data in this file can be inserted by the SQL statement below:
```sql
INSERT INTO d1001 FILE '/tmp/csvfile.csv';
@@ -107,30 +107,30 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
## Create Tables Automatically and Insert Rows From File
-From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, Like below:
+From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, like below:
```sql
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv';
```
-Multiple tables can be automatically created and inserted in single SQL statement, like below:
+Multiple tables can be automatically created and inserted in a single SQL statement, like below:
```sql
-INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
+INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
```
## More About Insert
-For SQL statement like `insert`, stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior.
+For SQL statement like `insert`, a stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior.
-Firstly, a super table is created.
+First, a super table is created.
```sql
CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
```
-It can be proved that the super table has been created by `SHOW STableS`, but no table exists by `SHOW TABLES`.
+It can be proven that the super table has been created by `SHOW STableS`, but no table exists using `SHOW TABLES`.
```
taos> SHOW STableS;
@@ -146,7 +146,7 @@ Query OK, 0 row(s) in set (0.000946s)
Then, try to create table d1001 automatically when inserting data into it.
```sql
-INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
+INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a');
```
The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement.
@@ -161,4 +161,4 @@ taos> SHOW TABLES;
Query OK, 1 row(s) in set (0.001091s)
```
-From the above experiment, we can see that even though the value to be inserted is invalid but the table is still created.
+From the above experiment, we can see that while the value to be inserted is invalid the table is still created.
diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md
index 11b181f65d4e7e0e7d47d04986b144ff362c879f..8a017cf92e40aa4a854dcd531b7df291a9243515 100644
--- a/docs-en/12-taos-sql/06-select.md
+++ b/docs-en/12-taos-sql/06-select.md
@@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...]
## Wildcard
-Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables.
+Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables.
```
taos> SELECT * FROM d1001;
@@ -39,26 +39,26 @@ The result includes both data columns and tag columns for super table.
taos> SELECT * FROM meters;
ts | current | voltage | phase | location | groupid |
=====================================================================================================================================
- 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 |
- 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 |
- 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 |
- 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 |
- 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 |
- 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 |
- 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 |
- 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 |
- 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 |
+ 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 |
+ 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 |
+ 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 |
+ 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 |
+ 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 |
+ 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 |
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 |
+ 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 |
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 |
Query OK, 9 row(s) in set (0.002022s)
```
-Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns.
+Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns.
```SQL
SELECT * FROM d1001;
SELECT d1001.* FROM d1001;
```
-In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table.
+In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table.
```
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
@@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
Query OK, 1 row(s) in set (0.020443s)
```
-Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row.
+Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row.
```
taos> SELECT COUNT(*) FROM d1001;
@@ -96,20 +96,20 @@ Query OK, 1 row(s) in set (0.000849s)
## Tags
-Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please be noted that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like below example.
+Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below.
```
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
location | groupid | current |
======================================================================
- Beijing.Chaoyang | 2 | 10.30000 |
- Beijing.Chaoyang | 2 | 12.60000 |
+ California.SanFrancisco | 2 | 10.30000 |
+ California.SanFrancisco | 2 | 12.60000 |
Query OK, 2 row(s) in set (0.003112s)
```
## Get distinct values
-`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or sub table.
+`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable.
```sql
SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name;
@@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
:::info
-1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
-2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers.
-3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in same SQL statement.
+1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
+2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
+3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
:::
## Columns Names of Result Set
-When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example
+When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example
```
taos> SELECT ts, ts AS primary_key_ts FROM d1001;
@@ -161,7 +161,7 @@ SELECT * FROM d1001;
## Special Query
-Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use.
+Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use.
```
taos> SELECT DATABASE();
@@ -181,7 +181,7 @@ taos> SELECT DATABASE();
Query OK, 1 row(s) in set (0.000184s)
```
-Below statement can be used to get the version of client or server.
+The statement below can be used to get the version of client or server.
```
taos> SELECT CLIENT_VERSION();
@@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION();
Query OK, 1 row(s) in set (0.000077s)
```
-Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This way is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing connection from connection pool when using wrong heartbeat checking SQL statement.
+The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement.
```
taos> SELECT SERVER_STATUS();
@@ -248,12 +248,12 @@ summary:
## Special Keywords in TAOS SQL
-- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of sub-tables in that super table.
+- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of subtables in that super table.
- `_c0`: represents the first column of a table or super table.
## Tips
-To get all the sub tables and corresponding tag values from a super table:
+To get all the subtables and corresponding tag values from a super table:
```SQL
SELECT TBNAME, location FROM meters;
@@ -271,10 +271,10 @@ Only filter on `TAGS` are allowed in the `where` clause for above two query stat
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
- d1004 | Beijing.Haidian |
- d1003 | Beijing.Haidian |
- d1002 | Beijing.Chaoyang |
- d1001 | Beijing.Chaoyang |
+ d1004 | California.LosAngeles |
+ d1003 | California.LosAngeles |
+ d1002 | California.SanFrancisco |
+ d1001 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
@@ -284,11 +284,11 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
Query OK, 1 row(s) in set (0.001091s)
```
-- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set.
-- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for same purpose.
+- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set.
+- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose.
- Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead.
- Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values.
-- Result set are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may be not as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp.
+- Result sets are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may not be as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp.
- `LIMIT` parameter is used to control the number of rows to output. `OFFSET` parameter is used to specify from which row to output. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. A simple tip is that `LIMIT 5 OFFSET 2` can be abbreviated as `LIMIT 2, 5`.
- What is controlled by `LIMIT` is the number of rows in each group when `GROUP BY` is used.
- `SLIMIT` parameter is used to control the number of groups when `GROUP BY` is used. Similar to `LIMIT`, `SLIMIT 5 OFFSET 2` can be abbreviated as `SLIMIT 2, 5`.
@@ -296,7 +296,7 @@ Query OK, 1 row(s) in set (0.001091s)
## Where
-Logical operations in below table can be used in `where` clause to filter the resulting rows.
+Logical operations in below table can be used in the `where` clause to filter the resulting rows.
| **Operation** | **Note** | **Applicable Data Types** |
| ------------- | ------------------------ | ----------------------------------------- |
@@ -314,17 +314,17 @@ Logical operations in below table can be used in `where` clause to filter the re
**Explanations**:
-- Operator `<\>` is equal to `!=`, please be noted that this operator can't be used on the first column of any table, i.e.timestamp column.
+- Operator `<\>` is equal to `!=`, please note that this operator can't be used on the first column of any table, i.e.timestamp column.
- Operator `like` is used together with wildcards to match strings
- '%' matches 0 or any number of characters, '\_' matches any single ASCII character.
- `\_` is used to match the \_ in the string.
- - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator.
+ - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator.
- `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns.
- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`.
- From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range.
- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25".
-- From version 2.1.4.0, operator `IN` can be used in where clause. For example, `WHERE city IN ('Beijing', 'Shanghai')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
-- From version 2.3.0.0, regular expression is supported in where clause with keyword `match` or `nmatch`, the regular expression is case insensitive.
+- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
+- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive.
## Regular Expression
@@ -342,11 +342,11 @@ The regular expression being used must be compliant with POSIX specification, pl
Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns.
-The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on client side, and will take in effect after restarting the client.
+The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
## JOIN
-From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, that between STable and STable, and that between sub query and sub query are supported.
+From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, between STable and STable, and between sub query and sub query are supported.
Only primary key, i.e. timestamp, can be used in the join operation between table and table. For example:
@@ -364,12 +364,12 @@ FROM temp_STable t1, temp_STable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
-Similary, join operation can be performed on the result set of multiple sub queries.
+Similarly, join operations can be performed on the result set of multiple sub queries.
:::note
Restrictions on join operation:
-- The number of tables or STables in single join operation can't exceed 10.
+- The number of tables or STables in a single join operation can't exceed 10.
- `FILL` is not allowed in the query statement that includes JOIN operation.
- Arithmetic operation is not allowed on the result set of join operation.
- `GROUP BY` is not allowed on a part of tables that participate in join operation.
@@ -380,9 +380,9 @@ Restrictions on join operation:
## Nested Query
-Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query.
+Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
-From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like:
+From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like:
```SQL
SELECT ... FROM (SELECT ... FROM ...) ...;
@@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query
-- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
+- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query
+- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
- Sub query is not allowed in continuous query.
- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query.
- UNION operation is not allowed in either inner query or outer query.
-- The functionalities that can be used in the inner query is same as non-nested query.
- - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage.
-- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as:
+- The functions that can be used in the inner query are the same as those that can be used in a non-nested query.
+ - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query.
+- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions:
- Functions
- If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`.
- Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`.
@@ -414,7 +414,7 @@ UNION ALL SELECT ...
[UNION ALL SELECT ...]
```
-`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In single SQL statement, at most 100 `UNION ALL` can be supported.
+`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported.
### Examples
@@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
-The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement:
+The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement:
```SQL
-SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
+SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv;
```
diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md
index 9db5f36f92735c659a3bfae84c67089c62d577a6..129b7eb0c35b4409e8003855fb4facacb8e0c830 100644
--- a/docs-en/12-taos-sql/07-function.md
+++ b/docs-en/12-taos-sql/07-function.md
@@ -1,1479 +1,1151 @@
---
title: Functions
+toc_max_heading_level: 4
---
-## Aggregate Functions
+## Single-Row Functions
-Aggregate query is supported in TDengine by following aggregate functions and selection functions.
+Single-Row functions return a result row for each row in the query result.
-### COUNT
+### Numeric Functions
-```
-SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
-```
+#### ABS
-**Description**:Get the number of rows or the number of non-null values in a table or a super table.
+```sql
+SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
-**Return value type**:Long integer INT64
+**Description**: The absolute of a specific column.
-**Applicable column types**:All
+**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE.
-**Applicable table types**: table, super table, sub table
+**Applicable data types**: Numeric types.
-**More explanation**:
+**Applicable table types**: table, STable.
-- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows
-- The number of non-NULL values will be returned if this function is used on a specific column
+**Applicable nested query**: Inner query and Outer query.
-**Examples**:
+**More explanations**:
+- Can't be used with aggregate functions.
-```
-taos> SELECT COUNT(*), COUNT(voltage) FROM meters;
- count(*) | count(voltage) |
-================================================
- 9 | 9 |
-Query OK, 1 row(s) in set (0.004475s)
+#### ACOS
-taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
- count(*) | count(voltage) |
-================================================
- 3 | 3 |
-Query OK, 1 row(s) in set (0.001075s)
+```sql
+SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-### AVG
-
-```
-SELECT AVG(field_name) FROM tb_name [WHERE clause];
-```
+**Description**: The anti-cosine of a specific column
-**Description**:Get the average value of a column in a table or STable
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Return value type**:Double precision floating number
+**Applicable data types**: Numeric types.
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Applicable table types**: table, STable
-**Applicable table types**:table, STable
+**Applicable nested query**: Inner query and Outer query
-**Examples**:
+**More explanations**:
+- Can't be used with aggregate functions
-```
-taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters;
- avg(current) | avg(voltage) | avg(phase) |
-====================================================================================
- 11.466666751 | 220.444444444 | 0.293333333 |
-Query OK, 1 row(s) in set (0.004135s)
+#### ASIN
-taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001;
- avg(current) | avg(voltage) | avg(phase) |
-====================================================================================
- 11.733333588 | 219.333333333 | 0.316666673 |
-Query OK, 1 row(s) in set (0.000943s)
+```sql
+SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-### TWA
-
-```
-SELECT TWA(field_name) FROM tb_name WHERE clause;
-```
+**Description**: The anti-sine of a specific column
-**Description**:Time weighted average on a specific column within a time range
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Return value type**:Double precision floating number
+**Applicable data types**: Numeric types.
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Applicable table types**: table, STable
-**Applicable table types**:table, STable
+**Applicable nested query**: Inner query and Outer query
**More explanations**:
+- Can't be used with aggregate functions
-- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
-
-### IRATE
+#### ATAN
+```sql
+SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-SELECT IRATE(field_name) FROM tb_name WHERE clause;
-```
-**Description**:instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
+**Description**: anti-tangent of a specific column
+
+**Description**: The anti-cosine of a specific column
+
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Return value type**:Double precision floating number
+**Applicable data types**: Numeric types.
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Applicable table types**: table, STable
-**Applicable table types**:table, STable
+**Applicable nested query**: Inner query and Outer query
**More explanations**:
+- Can't be used with aggregate functions
-- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
-
-### SUM
+#### CEIL
```
-SELECT SUM(field_name) FROM tb_name [WHERE clause];
+SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**:The sum of a specific column in a table or STable
-
-**Return value type**:Double precision floating number or long integer
+**Description**: The rounded up value of a specific column
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Return value type**: Same as the column being used
-**Applicable table types**:table, STable
+**Applicable data types**: Numeric types.
-**Examples**:
+**Applicable table types**: table, STable
-```
-taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters;
- sum(current) | sum(voltage) | sum(phase) |
-================================================================================
- 103.200000763 | 1984 | 2.640000001 |
-Query OK, 1 row(s) in set (0.001702s)
+**Applicable nested query**: Inner query and outer query
-taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001;
- sum(current) | sum(voltage) | sum(phase) |
-================================================================================
- 35.200000763 | 658 | 0.950000018 |
-Query OK, 1 row(s) in set (0.000980s)
-```
+**More explanations**:
+- Arithmetic operation can be performed on the result of `ceil` function
+- Can't be used with aggregate functions
-### STDDEV
+#### COS
-```
-SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
+```sql
+SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**:Standard deviation of a specific column in a table or STable
+**Description**: The cosine of a specific column
-**Return value type**:Double precision floating number
+**Description**: The anti-cosine of a specific column
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Applicable table types**:table, STable (starting from version 2.0.15.1)
+**Applicable data types**: Numeric types.
-**Examples**:
+**Applicable table types**: table, STable
-```
-taos> SELECT STDDEV(current) FROM d1001;
- stddev(current) |
-============================
- 1.020892909 |
-Query OK, 1 row(s) in set (0.000915s)
-```
+**Applicable nested query**: Inner query and Outer query
-### LEASTSQUARES
+**More explanations**:
+- Can't be used with aggregate functions
+
+#### FLOOR
```
-SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
+SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
-
-**Return value type**: A string in the format of "(slope, intercept)"
-
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
-
-**Applicable table types**: table only
-
-**Examples**:
+**Description**: The rounded down value of a specific column
-```
-taos> SELECT LEASTSQUARES(current, 1, 1) FROM d1001;
- leastsquares(current, 1, 1) |
-=====================================================
-{slop:1.000000, intercept:9.733334} |
-Query OK, 1 row(s) in set (0.000921s)
-```
+**More explanations**: The restrictions are same as those of the `CEIL` function.
-### MODE
+#### LOG
-```
-SELECT MODE(field_name) FROM tb_name [WHERE clause];
+```sql
+SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags.
+**Description**: The log of a specific with `base` as the radix
-**Return value type**:Same as the data type of the column being operated
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Applicable column types**:Data types except for timestamp
+**Applicable data types**: Numeric types.
-**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned.
+**Applicable table types**: table, STable
-**Applicable version**:From version 2.6.0.0
+**Applicable nested query**: Inner query and Outer query
-**Examples**:
+**More explanations**:
+- Can't be used with aggregate functions
-```
-taos> select voltage from d002;
- voltage |
-========================
- 1 |
- 1 |
- 2 |
- 19 |
-Query OK, 4 row(s) in set (0.003545s)
+#### POW
-taos> select mode(voltage) from d002;
- mode(voltage) |
-========================
- 1 |
-Query OK, 1 row(s) in set (0.019393s)
+```sql
+SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
```
-### HYPERLOGLOG
-
-```
-SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**Description**: The power of a specific column with `power` as the index
-**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm.
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Return value type**:Integer
+**Applicable data types**: Numeric types.
-**Applicable column types**:Any data type
+**Applicable table types**: table, STable
-**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
+**Applicable nested query**: Inner query and Outer query
-**Applicable versions**:From version 2.6.0.0
+**More explanations**:
+- Can't be used with aggregate functions
-**Examples**:
+#### ROUND
```
-taos> select dbig from shll;
- dbig |
-========================
- 1 |
- 1 |
- 1 |
- NULL |
- 2 |
- 19 |
- NULL |
- 9 |
-Query OK, 8 row(s) in set (0.003755s)
-
-taos> select hyperloglog(dbig) from shll;
- hyperloglog(dbig)|
-========================
- 4 |
-Query OK, 1 row(s) in set (0.008388s)
+SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-## Selection Functions
+**Description**: The rounded value of a specific column.
-When any selective function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows.
+**More explanations**: The restrictions are same as `CEIL` function.
-### MIN
+#### SIN
-```
-SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
+```sql
+SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**:The minimum value of a specific column in a table or STable
+**Description**: The sine of a specific column
-**Return value type**:Same as the data type of the column being operated
+**Description**: The anti-cosine of a specific column
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Applicable table types**:table, STable
+**Applicable data types**: Numeric types.
-**Examples**:
+**Applicable table types**: table, STable
-```
-taos> SELECT MIN(current), MIN(voltage) FROM meters;
- min(current) | min(voltage) |
-======================================
- 10.20000 | 218 |
-Query OK, 1 row(s) in set (0.001765s)
+**Applicable nested query**: Inner query and Outer query
-taos> SELECT MIN(current), MIN(voltage) FROM d1001;
- min(current) | min(voltage) |
-======================================
- 10.30000 | 218 |
-Query OK, 1 row(s) in set (0.000950s)
-```
+**More explanations**:
+- Can't be used with aggregate functions
-### MAX
+#### SQRT
+```sql
+SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
-
-**Description**:The maximum value of a specific column of a table or STable
-**Return value type**:Same as the data type of the column being operated
+**Description**: The square root of a specific column
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Applicable table types**:table, STable
+**Applicable data types**: Numeric types.
-**Examples**:
+**Applicable table types**: table, STable
-```
-taos> SELECT MAX(current), MAX(voltage) FROM meters;
- max(current) | max(voltage) |
-======================================
- 13.40000 | 223 |
-Query OK, 1 row(s) in set (0.001123s)
+**Applicable nested query**: Inner query and Outer query
-taos> SELECT MAX(current), MAX(voltage) FROM d1001;
- max(current) | max(voltage) |
-======================================
- 12.60000 | 221 |
-Query OK, 1 row(s) in set (0.000987s)
-```
+**More explanations**:
+- Can't be used with aggregate functions
-### FIRST
+#### TAN
-```
-SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+```sql
+SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**:The first non-null value of a specific column in a table or STable
+**Description**: The tangent of a specific column
-**Return value type**:Same as the column being operated
+**Description**: The anti-cosine of a specific column
-**Applicable column types**:Any data type
+**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-**Applicable table types**:table, STable
+**Applicable data types**: Numeric types.
-**More explanations**:
+**Applicable table types**: table, STable
-- FIRST(\*) can be used to get the first non-null value of all columns
-- NULL will be returned if all the values of the specified column are all NULL
-- No result will NOT be returned if all the columns in the result set are all NULL
+**Applicable nested query**: Inner query and Outer query
-**Examples**:
+**More explanations**:
+- Can't be used with aggregate functions
-```
-taos> SELECT FIRST(*) FROM meters;
- first(ts) | first(current) | first(voltage) | first(phase) |
-=========================================================================================
-2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 |
-Query OK, 1 row(s) in set (0.004767s)
+### String Functions
-taos> SELECT FIRST(current) FROM d1002;
- first(current) |
-=======================
- 10.20000 |
-Query OK, 1 row(s) in set (0.001023s)
-```
+String functiosn take strings as input and output numbers or strings.
-### LAST
+#### CHAR_LENGTH
```
-SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**:The last non-NULL value of a specific column in a table or STable
+**Description**: The length in number of characters of a string
-**Return value type**:Same as the column being operated
+**Return value type**: Integer
-**Applicable column types**:Any data type
+**Applicable data types**: VARCHAR or NCHAR
-**Applicable table types**:table, STable
+**Applicable table types**: table, STable
-**More explanations**:
+**Applicable nested query**: Inner query and Outer query
-- LAST(\*) can be used to get the last non-NULL value of all columns
-- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned.
-- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
+**More explanations**
-**Examples**:
+- If the input value is NULL, the output is NULL too
-```
-taos> SELECT LAST(*) FROM meters;
- last(ts) | last(current) | last(voltage) | last(phase) |
-========================================================================================
-2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
-Query OK, 1 row(s) in set (0.001452s)
+#### CONCAT
-taos> SELECT LAST(current) FROM d1002;
- last(current) |
-=======================
- 10.30000 |
-Query OK, 1 row(s) in set (0.000843s)
+```sql
+SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
-### TOP
+**Description**: The concatenation result of two or more strings, the number of strings to be concatenated is at least 2 and at most 8
-```
-SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
-```
+**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR.
-**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
+**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are requird, and at most 8 input strings are allowed.
-**Return value type**:Same as the column being operated
+**Applicable table types**: table, STable
-**Applicable column types**:Data types except for timestamp, binary, nchar and bool
+**Applicable nested query**: Inner query and Outer query
-**Applicable table types**:table, STable
+#### CONCAT_WS
-**More explanations**:
+```
+SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+```
-- _k_ must be in range [1,100]
-- The timestamp associated with the selected values are returned too
-- Can't be used with `FILL`
+**Description**: The concatenation result of two or more strings with separator, the number of strings to be concatenated is at least 3 and at most 9
-**Examples**:
+**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR.
-```
-taos> SELECT TOP(current, 3) FROM meters;
- ts | top(current, 3) |
-=================================================
-2018-10-03 14:38:15.000 | 12.60000 |
-2018-10-03 14:38:16.600 | 13.40000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 3 row(s) in set (0.001548s)
+**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are requird, and at most 9 input strings are allowed.
-taos> SELECT TOP(current, 2) FROM d1001;
- ts | top(current, 2) |
-=================================================
-2018-10-03 14:38:15.000 | 12.60000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 2 row(s) in set (0.000810s)
-```
+**Applicable table types**: table, STable
-### BOTTOM
+**Applicable nested query**: Inner query and Outer query
-```
-SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
-```
+**More explanations**:
-**Description**:The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
+- If the value of `separator` is NULL, the output is NULL. If the value of `separator` is not NULL but other input are all NULL, the output is empty string.
-**Return value type**:Same as the column being operated
+#### LENGTH
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
+```
+SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+```
-**Applicable table types**: table, STable
+**Description**: The length in bytes of a string
-**More explanations**:
+**Return value type**: Integer
-- _k_ must be in range [1,100]
-- The timestamp associated with the selected values are returned too
-- Can't be used with `FILL`
+**Applicable data types**: VARCHAR or NCHAR
+**Applicable table types**: table, STable
-**Examples**:
+**Applicable nested query**: Inner query and Outer query
-```
-taos> SELECT BOTTOM(voltage, 2) FROM meters;
- ts | bottom(voltage, 2) |
-===============================================
-2018-10-03 14:38:15.000 | 218 |
-2018-10-03 14:38:16.650 | 218 |
-Query OK, 2 row(s) in set (0.001332s)
+**More explanations**
-taos> SELECT BOTTOM(current, 2) FROM d1001;
- ts | bottom(current, 2) |
-=================================================
-2018-10-03 14:38:05.000 | 10.30000 |
-2018-10-03 14:38:16.800 | 12.30000 |
-Query OK, 2 row(s) in set (0.000793s)
-```
+- If the input value is NULL, the output is NULL too
-### PERCENTILE
+#### LOWER
```
-SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
+SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
+**Description**: Convert the input string to lower case
-**Return value type**: Double precision floating point
+**Return value type**: Same as input
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
+**Applicable data types**: VARCHAR or NCHAR
-**Applicable table types**: table
+**Applicable table types**: table, STable
-**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
+**Applicable nested query**: Inner query and Outer query
-**Examples**:
+**More explanations**
-```
-taos> SELECT PERCENTILE(current, 20) FROM d1001;
-percentile(current, 20) |
-============================
- 11.100000191 |
-Query OK, 1 row(s) in set (0.000787s)
-```
+- If the input value is NULL, the output is NULL too
-### APERCENTILE
+#### LTRIM
```
-SELECT APERCENTILE(field_name, P[, algo_type])
-FROM { tb_name | stb_name } [WHERE clause]
+SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: Similar to `PERCENTILE`, but a simulated result is returned
+**Description**: Remove the left leading blanks of a string
-**Return value type**: Double precision floating point
+**Return value type**: Same as input
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
+**Applicable data types**: VARCHAR or NCHAR
**Applicable table types**: table, STable
+**Applicable nested query**: Inner query and Outer query
+
**More explanations**
-- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
-- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`.
-- When `t-digest` is used, `t-digest` sampling is used to calculate. It can be used from version 2.2.0.0.
+- If the input value is NULL, the output is NULL too
-**Nested query**: It can be used in both the outer query and inner query in a nested query.
+#### RTRIM
```
-taos> SELECT APERCENTILE(current, 20) FROM d1001;
-apercentile(current, 20) |
-============================
- 10.300000191 |
-Query OK, 1 row(s) in set (0.000645s)
+SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+```
-taos> select apercentile (count, 80, 'default') from stb1;
- apercentile (c0, 80, 'default') |
-==================================
- 601920857.210056424 |
-Query OK, 1 row(s) in set (0.012363s)
+**Description**: Remove the right tailing blanks of a string
-taos> select apercentile (count, 80, 't-digest') from stb1;
- apercentile (c0, 80, 't-digest') |
-===================================
- 605869120.966666579 |
-Query OK, 1 row(s) in set (0.011639s)
-```
+**Return value type**: Same as input
-### LAST_ROW
+**Applicable data types**: VARCHAR or NCHAR
+
+**Applicable table types**: table, STable
+
+**Applicable nested query**: Inner query and Outer query
+
+**More explanations**
+
+- If the input value is NULL, the output is NULL too
+
+#### SUBSTR
```
-SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
+SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: The last row of a table or STable
+**Description**: The sub-string starting from `pos` with length of `len` from the original string `str`
-**Return value type**: Same as the column being operated
+**Return value type**: Same as input
-**Applicable column types**: Any data type
+**Applicable data types**: VARCHAR or NCHAR
**Applicable table types**: table, STable
+**Applicable nested query**: Inner query and Outer query
+
**More explanations**:
-- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times.
-- Can't be used with `INTERVAL`.
+- If the input is NULL, the output is NULL
+- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string.
+- If `len` is not specified, it means from `pos` to the end.
-**Examples**:
+#### UPPER
```
- taos> SELECT LAST_ROW(current) FROM meters;
- last_row(current) |
- =======================
- 12.30000 |
- Query OK, 1 row(s) in set (0.001238s)
-
- taos> SELECT LAST_ROW(current) FROM d1002;
- last_row(current) |
- =======================
- 10.30000 |
- Query OK, 1 row(s) in set (0.001042s)
+SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
-### INTERP [From version 2.3.1]
-
-```
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
-```
+**Description**: Convert the input string to upper case
-**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
+**Return value type**: Same as input
-**Return value type**: same as the column being operated
+**Applicable data types**: VARCHAR or NCHAR
-**Applicable column types**: Numeric data types
+**Applicable table types**: table, STable
-**Applicable table types**: table, STable, nested query
+**Applicable nested query**: Inner query and Outer query
**More explanations**
-- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
-- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input.
-- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
-- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
-- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
-- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query.
-- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only..
-
-**Examples**: Based on the `meters` schema used throughout the documents
+- If the input value is NULL, the output is NULL too
-- Single point linear interpolation between "2017-07-14 18:40:00" and "2017-07-14 18:40:00:
+### Conversion Functions
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR);
-```
+This kind of functions convert from one data type to another one.
-- Get an original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00:
+#### CAST
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s);
+```sql
+SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
```
-- Linear interpolation every 5 seconds between "2017-07-14 18:00:00" and "2017-07-14 19:00:00:
+**Description**: It's used for type casting. The input parameter `expression` can be data columns, constants, scalar functions or arithmetic between them.
-```
- taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
-```
+**Return value type**: The type specified by parameter `type_name`
-- Backward interpolation every 5 seconds
+**Applicable data types**:
-```
- taos> SELECT INTERP(current) FROM t1 EVERY(5s) FILL(NEXT);
-```
+- Parameter `expression` can be any data type except for JSON
+- The output data type specified by `type_name` can only be one of BIGINT/VARCHAR(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED
-- Linear interpolation every 5 seconds between "2017-07-14 17:00:00" and "2017-07-14 20:00:00"
+**More explanations**:
-```
- taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
-```
+- Error will be reported for unsupported type casting
+- NULL will be returned if the input value is NULL
+- Some values of some supported data types may not be casted, below are known issues:
+ 1)When casting VARCHAR/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0.
+ 2)There may be overflow when casting singed integer or TIMESTAMP to unsigned BIGINT
+ 3)There may be overflow when casting unsigned BIGINT to BIGINT
+ 4)There may be overflow when casting FLOAT/DOUBLE to BIGINT or UNSIGNED BIGINT
-### INTERP [Prior to version 2.3.1]
+#### TO_ISO8601
-```
-SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
+```sql
+SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The value of a specific column that matches the specified time slice
+**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone of the client side system
-**Return value type**: Same as the column being operated
+**Return value type**: VARCHAR
-**Applicable column types**: Numeric data type
+**Applicable column types**: TIMESTAMP, constant or a column
**Applicable table types**: table, STable
**More explanations**:
-- It can be used from version 2.0.15.0
-- Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data.
-- The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter.
-- `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval.
-- **Examples**:
+- If the input is UNIX timestamp constant, the precision of the returned value is determined by the digits of the input timestamp
+- If the input is a column of TIMESTAMP type, The precision of the returned value is same as the precision set for the current data base in use
+
+#### TO_JSON
-```
- taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004';
- interp(ts) | interp(current) | interp(voltage) | interp(phase) |
- ==========================================================================================
- 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 |
- Query OK, 1 row(s) in set (0.002652s)
+```sql
+SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
```
-If there is not any data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned\
+**Description**: Convert a JSON string to a JSON body。
-```
- taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005';
- Query OK, 0 row(s) in set (0.004022s)
+**Return value type**: JSON
- taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV);
- interp(ts) | interp(current) | interp(voltage) | interp(phase) |
- ==========================================================================================
- 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 |
- Query OK, 1 row(s) in set (0.003056s)
-```
+**Applicable column types**: JSON string, in the format like '{ "literal" : literal }'. '{}' is NULL value. keys in the string must be string constants, values can be constants of numeric types, bool, string or NULL. Escaping characters are not allowed in the JSON string.
-Interpolation is performed every 5 milliseconds between `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']`
+**Applicable table types**: table, STable
-```
- taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a);
- ts | interp(current) |
- =================================================
- 2017-07-14 18:40:00.000 | 10.04179 |
- 2017-07-14 18:40:00.010 | 10.16123 |
- Query OK, 2 row(s) in set (0.003487s)
-```
+**Applicable nested query**: Inner query and Outer query.
-### TAIL
+#### TO_UNIXTIMESTAMP
-```
-SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
+```sql
+SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
+**Description**: UNIX timestamp converted from a string of date/time format
-**Parameter value range**: k: [1,100] offset_val: [0,100]
+**Return value type**: Long integer
-**Return value type**: Same as the column being operated
+**Applicable column types**: Constant or column of VARCHAR/NCHAR
-**Applicable column types**: Any data type except form timestamp, i.e. the primary key
+**Applicable table types**: table, STable
-**Applicable versions**: From version 2.6.0.0
+**More explanations**:
-**Examples**:
+- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be converted
+- The precision of the returned timestamp is same as the precision set for the current data base in use
-```
-taos> select ts,dbig from tail2;
- ts | dbig |
-==================================================
-2021-10-15 00:31:33.000 | 1 |
-2021-10-17 00:31:31.000 | NULL |
-2021-12-24 00:31:34.000 | 2 |
-2022-01-01 08:00:05.000 | 19 |
-2022-01-01 08:00:06.000 | NULL |
-2022-01-01 08:00:07.000 | 9 |
-Query OK, 6 row(s) in set (0.001952s)
+### DateTime Functions
-taos> select tail(dbig,2,2) from tail2;
-ts | tail(dbig,2,2) |
-==================================================
-2021-12-24 00:31:34.000 | 2 |
-2022-01-01 08:00:05.000 | 19 |
-Query OK, 2 row(s) in set (0.002307s)
-```
+This kind of functiosn oeprate on timestamp data. NOW(), TODAY() and TIMEZONE() are executed only once even though they may occurr multiple times in a single SQL statement.
-### UNIQUE
+#### NOW
-```
-SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
+```sql
+SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
+SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
+INSERT INTO tb_name VALUES (NOW(), ...);
```
-**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp.
+**Description**: The current time of the client side system
-**Return value type**: Same as the column or tag being operated
+**Return value type**: TIMESTAMP
-**Applicable column types**: Any data types except for timestamp
+**Applicable column types**: TIMESTAMP only
-**Applicable versions**: From version 2.6.0.0
+**Applicable table types**: table, STable
**More explanations**:
-- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` .
-- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned.
+- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be:
+ b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week)
+- The precision of the returned timestamp is same as the precision set for the current data base in use
-**Examples**:
+#### TIMEDIFF
+```sql
+SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
```
-taos> select ts,voltage from unique1;
- ts | voltage |
-==================================================
-2021-10-17 00:31:31.000 | 1 |
-2022-01-24 00:31:31.000 | 1 |
-2021-10-17 00:31:31.000 | 1 |
-2021-12-24 00:31:31.000 | 2 |
-2022-01-01 08:00:01.000 | 19 |
-2021-10-17 00:31:31.000 | NULL |
-2022-01-01 08:00:02.000 | NULL |
-2022-01-01 08:00:03.000 | 9 |
-Query OK, 8 row(s) in set (0.003018s)
-taos> select unique(voltage) from unique1;
-ts | unique(voltage) |
-==================================================
-2021-10-17 00:31:31.000 | 1 |
-2021-10-17 00:31:31.000 | NULL |
-2021-12-24 00:31:31.000 | 2 |
-2022-01-01 08:00:01.000 | 19 |
-2022-01-01 08:00:03.000 | 9 |
-Query OK, 5 row(s) in set (0.108458s)
-```
+**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
-## Scalar functions
+**Return value type**: Long Integer
-### DIFF
+**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of TIMESTAMP type
+
+**Applicable table types**: table, STable
+
+**More explanations**:
+
+- Time unit specified by `time_unit` can be:
+ 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
+- The precision of the returned timestamp is same as the precision set for the current data base in use
+
+#### TIMETRUNCATE
```sql
-SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
+SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
+**Description**: Truncate the input timestamp with unit specified by `time_unit`
-**Return value type**: Same as the column being operated
+**Return value type**: TIMESTAMP
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
+**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of timestamp
**Applicable table types**: table, STable
**More explanations**:
-- The number of result rows is the number of rows subtracted by one, no output for the first row
-- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
-- From version 2.6.0, `ignore_negative` parameter is supported
+- Time unit specified by `time_unit` can be:
+ 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
+- The precision of the returned timestamp is same as the precision set for the current data base in use
-**Examples**:
+#### TIMEZONE
```sql
-taos> SELECT DIFF(current) FROM d1001;
- ts | diff(current) |
-=================================================
-2018-10-03 14:38:15.000 | 2.30000 |
-2018-10-03 14:38:16.800 | -0.30000 |
-Query OK, 2 row(s) in set (0.001162s)
+SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
```
-### DERIVATIVE
+**Description**: The timezone of the client side system
-```
-SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
+**Return value type**: VARCHAR
+
+**Applicable column types**: None
+
+**Applicable table types**: table, STable
+
+#### TODAY
+
+```sql
+SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
+SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
+INSERT INTO tb_name VALUES (TODAY(), ...);
```
-**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
+**Description**: The timestamp of 00:00:00 of the client side system
-**Return value type**: Double precision floating point
+**Return value type**: TIMESTAMP
-**Applicable column types**: Data types except for timestamp, binary, nchar and bool
+**Applicable column types**: TIMESTAMP only
**Applicable table types**: table, STable
**More explanations**:
-- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row.\
-- It can be used together with `GROUP BY tbname` against a STable.
+- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be:
+ b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week)
+- The precision of the returned timestamp is same as the precision set for the current data base in use
-**Examples**:
+## Aggregate Functions
-```
-taos> select derivative(current, 10m, 0) from t1;
- ts | derivative(current, 10m, 0) |
-========================================================
- 2021-08-20 10:11:22.790 | 0.500000000 |
- 2021-08-20 11:11:22.791 | 0.166666620 |
- 2021-08-20 12:11:22.791 | 0.000000000 |
- 2021-08-20 13:11:22.792 | 0.166666620 |
- 2021-08-20 14:11:22.792 | -0.666666667 |
-Query OK, 5 row(s) in set (0.004883s)
-```
+Aggregate functions return single result row for each group in the query result set. Groups are determined by `GROUP BY` clause or time window clause if they are used; or the whole result is considered a group if neither of them is used.
-### SPREAD
+### AVG
```
-SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
+SELECT AVG(field_name) FROM tb_name [WHERE clause];
```
-**Description**: The difference between the max and the min of a specific column
+**Description**: Get the average value of a column in a table or STable
-**Return value type**: Double precision floating point
+**Return value type**: Double precision floating number
-**Applicable column types**: Data types except for binary, nchar, and bool
+**Applicable column types**: Numeric type
**Applicable table types**: table, STable
-**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size.
-
-**Examples**:
+### COUNT
```
-taos> SELECT SPREAD(voltage) FROM meters;
- spread(voltage) |
-============================
- 5.000000000 |
-Query OK, 1 row(s) in set (0.001792s)
-
-taos> SELECT SPREAD(voltage) FROM d1001;
- spread(voltage) |
-============================
- 3.000000000 |
-Query OK, 1 row(s) in set (0.000836s)
+SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
```
-### CEIL
+**Description**: Get the number of rows or the number of non-null values in a table or a super table.
-```
-SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**Return value type**: Long integer INT64
-**Description**: The round up value of a specific column
+**Applicable column types**: All
-**Return value type**: Same as the column being used
+**Applicable table types**: table, super table, sub table
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**More explanation**:
-**Applicable table types**: table, STable
+- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows.
+- The number of non-NULL values will be returned if this function is used on a specific column.
-**Applicable nested query**: inner query and outer query
+### ELAPSED
-**More explanations**:
+```mysql
+SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
+```
-- Can't be used on any tags of any type
-- Arithmetic operation can be performed on the result of `ceil` function
-- Can't be used with aggregate functions
+**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
-### FLOOR
+**Return value type**:Double
-```
-SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
-```
+**Applicable Column type**:Timestamp
-**Description**: The round down value of a specific column
+**Applicable tables**: table, STable, outter in nested query
-**More explanations**: The restrictions are same as `CEIL` function.
+**Explanations**:
-### ROUND
+- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key.
+- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit.
+- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
+- `order by asc/desc` has no effect on the result.
+- `group by tbname` must be used together when `elapsed` is used against a STable.
+- `group by` must NOT be used together when `elapsed` is used against a table or sub table.
+- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not.
+- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`.
+
+### LEASTSQUARES
```
-SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
+SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
```
-**Description**: The round value of a specific column.
-
-**More explanations**: The restrictions are same as `CEIL` function.
-
-### CSUM
+**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
-```sql
- SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
-```
+**Return value type**: A string in the format of "(slope, intercept)"
-**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
+**Applicable column types**: Numeric types
-**Return value type**: Long integer for integers; Double for floating points. Timestamp is returned for each row.
+**Applicable table types**: table only
-**Applicable data types**: Data types except for timestamp, binary, nchar, and bool
+### MODE
-**Applicable table types**: table, STable
+```
+SELECT MODE(field_name) FROM tb_name [WHERE clause];
+```
-**Applicable nested query**: Inner query and Outer query
+**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column.
-**More explanations**:
+**Return value type**:Same as the data type of the column being operated upon
-- Can't be used on tags when it's used on STable
-- Arithmetic operation can't be performed on the result of `csum` function
-- Can only be used with aggregate functions
-- `Group by tbname` must be used together on a STable to force the result on a single timeline
+**Applicable column types**:Data types except for timestamp
-**Applicable versions**: From 2.3.0.x
+**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned.
-### MAVG
+### SPREAD
-```sql
- SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range is _k_ is [1,1000].
+**Description**: The difference between the max and the min of a specific column
**Return value type**: Double precision floating point
-**Applicable data types**: Data types except for timestamp, binary, nchar, and bool
-
-**Applicable nested query**: Inner query and Outer query
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**More explanations**:
-
-- Arithmetic operation can't be performed on the result of `MAVG`.
-- Can only be used with data columns, can't be used with tags.
-- Can't be used with aggregate functions.
-- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.
-
-**Applicable versions**: From 2.3.0.x
+**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size.
-### SAMPLE
+### STDDEV
-```sql
- SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
```
-**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,10000]
+**Description**: Standard deviation of a specific column in a table or STable
-**Return value type**: Same as the column being operated plus the associated timestamp
+**Return value type**: Double precision floating number
-**Applicable data types**: Any data type except for tags of STable
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
+### SUM
-**More explanations**:
-
-- Arithmetic operation can't be operated on the result of `SAMPLE` function
-- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline
-
-**Applicable versions**: From 2.3.0.x
-
-### ASIN
-
-```sql
-SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT SUM(field_name) FROM tb_name [WHERE clause];
```
-**Description**: The anti-sine of a specific column
+**Description**: The sum of a specific column in a table or STable
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Double precision floating number or long integer
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**:
-
-- Can't be used with tags
-- Can't be used with aggregate functions
-
-### ACOS
+### HYPERLOGLOG
-```sql
-SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The anti-cosine of a specific column
-
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm.
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Return value type**:Integer
-**Applicable table types**: table, STable
+**Applicable column types**:Any data type
-**Applicable nested query**: Inner query and Outer query
+**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
-**Applicable versions**: From 2.6.0.0
+### HISTOGRAM
-**More explanations**:
+```
+SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
+```
-- Can't be used with tags
-- Can't be used with aggregate functions
+**Description**:Returns count of data points in user-specified ranges.
-### ATAN
+**Return value type**:Double or INT64, depends on normalized parameter settings.
-```sql
-SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
-```
+**Applicable column type**:Numerical types.
-**Description**: anti-tangent of a specific column
+**Applicable table types**: table, STable
-**Description**: The anti-cosine of a specific column
+**Explanations**:
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
+2. bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+ - "user_input": "[1, 3, 5, 7]": User specified bin values.
-**Applicable table types**: table, STable
+ - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
+ "start" - bin starting point.
+ "width" - bin offset.
+ "count" - number of bins generated.
+ "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
+ The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
-**Applicable nested query**: Inner query and Outer query
+ - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
+ "start" - bin starting point.
+ "factor" - exponential factor of bin offset.
+ "count" - number of bins generated.
+ "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
+ The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
-**Applicable versions**: From 2.6.0.0
+3. normalized: setting to 1/0 to turn on/off result normalization.
-**More explanations**:
+## Selector Functions
-- Can't be used with tags
-- Can't be used with aggregate functions
+Selector functiosn choose one or more rows in the query result set to retrun according toe the semantics. You can specify to output ts column and other columns including tbname and tags so that you can easily know which rows the selected values belong to.
-### SIN
+### APERCENTILE
-```sql
-SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT APERCENTILE(field_name, P[, algo_type])
+FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: The sine of a specific column
-
-**Description**: The anti-cosine of a specific column
+**Description**: Similar to `PERCENTILE`, but a simulated result is returned
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Double precision floating point
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
+**More explanations**
-**More explanations**:
+- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
+- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`.
+- When `t-digest` is used, `t-digest` sampling is used to calculate.
-- Can't be used with tags
-- Can't be used with aggregate functions
+**Nested query**: It can be used in both the outer query and inner query in a nested query.
-### COS
+### BOTTOM
-```sql
-SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The cosine of a specific column
-
-**Description**: The anti-cosine of a specific column
+**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
-- Can't be used with tags
-- Can't be used with aggregate functions
+- _k_ must be in range [1,100]
+- The timestamp associated with the selected values are returned too
+- Can't be used with `FILL`
-### TAN
+### FIRST
-```sql
-SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The tangent of a specific column
-
-**Description**: The anti-cosine of a specific column
+**Description**: The first non-null value of a specific column in a table or STable
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Any data type
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
-- Can't be used with tags
-- Can't be used with aggregate functions
+- FIRST(\*) can be used to get the first non-null value of all columns
+- NULL will be returned if all the values of the specified column are all NULL
+- A result will NOT be returned if all the columns in the result set are all NULL
-### POW
+### INTERP
-```sql
-SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
-**Description**: The power of a specific column with `power` as the index
-
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
-
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
-**Applicable table types**: table, STable
+**Return value type**: Same as the column being operated upon
-**Applicable nested query**: Inner query and Outer query
+**Applicable column types**: Numeric data types
-**Applicable versions**: From 2.6.0.0
+**Applicable table types**: table, STable, nested query
-**More explanations**:
+**More explanations**
-- Can't be used with tags
-- Can't be used with aggregate functions
+- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
+- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
+- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
+- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
+- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
+- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query.
+- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only..
-### LOG
+### LAST
-```sql
-SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The log of a specific with `base` as the radix
+**Description**: The last non-NULL value of a specific column in a table or STable
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Any data type
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
-- Can't be used with tags
-- Can't be used with aggregate functions
+- LAST(\*) can be used to get the last non-NULL value of all columns
+- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned.
+- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
-### ABS
+### LAST_ROW
-```sql
-SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
-**Description**: The absolute of a specific column
+**Description**: The last row of a table or STable
-**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Any data type
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
-- Can't be used with tags
-- Can't be used with aggregate functions
+- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times.
+- Can't be used with `INTERVAL`.
-### SQRT
+### MAX
-```sql
-SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The square root of a specific column
+**Description**: The maximum value of a specific column of a table or STable
-**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL
+**Return value type**: Same as the data type of the column being operated upon
-**Applicable data types**: Data types except for timestamp, binary, nchar, bool
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**:
-
-- Can't be used with tags
-- Can't be used with aggregate functions
-
-### CAST
+### MIN
-```sql
-SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
-**Description**: It's used for type casting. The input parameter `expression` can be data columns, constants, scalar functions or arithmetic between them. Can't be used with tags, and can only be used in `select` clause.
-
-**Return value type**: The type specified by parameter `type_name`
-
-**Applicable data types**:
-
-- Parameter `expression` can be any data type except for JSON, more specifically it can be any of BOOL/TINYINT/SMALLINT/INT/BIGINT/FLOAT/DOUBLE/BINARY(M)/TIMESTAMP/NCHAR(M)/TINYINT UNSIGNED/SMALLINT UNSIGNED/INT UNSIGNED/BIGINT UNSIGNED
-- The output data type specified by `type_name` can only be one of BIGINT/BINARY(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED
+**Description**: The minimum value of a specific column in a table or STable
-**Applicable versions**: From 2.6.0.0
+**Return value type**: Same as the data type of the column being operated upon
-**More explanations**:
+**Applicable column types**: Numeric types
-- Error will be reported for unsupported type casting
-- NULL will be returned if the input value is NULL
-- Some values of some supported data types may not be casted, below are known issues:
- 1)When casting BINARY/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0.
- 2)There may be overflow when casting singed integer or TIMESTAMP to unsigned BIGINT
- 3)There may be overflow when casting unsigned BIGINT to BIGINT
- 4)There may be overflow when casting FLOAT/DOUBLE to BIGINT or UNSIGNED BIGINT
+**Applicable table types**: table, STable
-### CONCAT
+### PERCENTILE
-```sql
-SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+```
+SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
-**Description**: The concatenation result of two or more strings, the number of strings to be concatenated is at least 2 and at most 8
-
-**Return value type**: Same as the columns being operated, BINARY or NCHAR; or NULL if all the input are NULL
+**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
-**Applicable data types**: The input data must be in either all BINARY or in all NCHAR; can't be used on tag columns
+**Return value type**: Double precision floating point
-**Applicable table types**: table, STable
+**Applicable column types**: Numeric types
-**Applicable nested query**: Inner query and Outer query
+**Applicable table types**: table
-**Applicable versions**: From 2.6.0.0
+**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
-### CONCAT_WS
+### TAIL
```
-SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
```
-**Description**: The concatenation result of two or more strings with separator, the number of strings to be concatenated is at least 3 and at most 9
-
-**Return value type**: Same as the columns being operated, BINARY or NCHAR; or NULL if all the input are NULL
-
-**Applicable data types**: The input data must be in either all BINARY or in all NCHAR; can't be used on tag columns
-
-**Applicable table types**: table, STable
-
-**Applicable nested query**: Inner query and Outer query
+**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
-**Applicable versions**: From 2.6.0.0
+**Parameter value range**: k: [1,100] offset_val: [0,100]
-**More explanations**:
+**Return value type**: Same as the column being operated upon
-- If the value of `separator` is NULL, the output is NULL. If the value of `separator` is not NULL but other input are all NULL, the output is empty string.
+**Applicable column types**: Any data type except form timestamp, i.e. the primary key
-### LENGTH
+### TOP
```
-SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The length in bytes of a string
+**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
-**Return value type**: Integer
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**
+**More explanations**:
-- If the input value is NULL, the output is NULL too
+- _k_ must be in range [1,100]
+- The timestamp associated with the selected values are returned too
+- Can't be used with `FILL`
-### CHAR_LENGTH
+### UNIQUE
```
-SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
-**Description**: The length in number of characters of a string
+**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp.
-**Return value type**: Integer
+**Return value type**: Same as the column or tag being operated upon
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable column types**: Any data types except for timestamp
-**Applicable table types**: table, STable
+**More explanations**:
-**Applicable nested query**: Inner query and Outer query
+- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` .
+- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned.
-**Applicable versions**: From 2.6.0.0
+## Time-Series Specific Functions
-**More explanations**
+TDengine provides a set of time-series specific functions to better meet the requirements in querying time-series data. In general databases, similar functionalities can only be achieved with much more complex syntax and much worse performance. TDengine provides these functionalities in builtin functions so that the burden on user side is minimized.
-- If the input value is NULL, the output is NULL too
-
-### LOWER
+### CSUM
-```
-SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+```sql
+ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: Convert the input string to lower case
+**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
-**Return value type**: Same as input
+**Return value type**: Long integer for integers; Double for floating points. Timestamp is returned for each row.
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable data types**: Numeric types
**Applicable table types**: table, STable
**Applicable nested query**: Inner query and Outer query
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**
-
-- If the input value is NULL, the output is NULL too
+**More explanations**:
+- Arithmetic operation can't be performed on the result of `csum` function
+- Can only be used with aggregate functions
+- `Group by tbname` must be used together on a STable to force the result on a single timeline
-### UPPER
+### DERIVATIVE
```
-SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
```
-**Description**: Convert the input string to upper case
+**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
-**Return value type**: Same as input
+**Return value type**: Double precision floating point
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**
+**More explanations**:
-- If the input value is NULL, the output is NULL too
+- The number of result rows is the number of total rows in the time range subtracted by one, no output for the first row.
+- It can be used together with `GROUP BY tbname` against a STable.
-### LTRIM
+### DIFF
-```
-SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+```sql
+SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
```
-**Description**: Remove the left leading blanks of a string
+**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
-**Return value type**: Same as input
+**Return value type**: Same as the column being operated upon
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**
+**More explanations**:
-- If the input value is NULL, the output is NULL too
+- The number of result rows is the number of rows subtracted by one, no output for the first row
+- It can be used on STable with `GROUP by tbname`
-### RTRIM
+### IRATE
```
-SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+SELECT IRATE(field_name) FROM tb_name WHERE clause;
```
-**Description**: Remove the right tailing blanks of a string
+**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
-**Return value type**: Same as input
+**Return value type**: Double precision floating number
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
-**Applicable nested query**: Inner query and Outer query
-
-**Applicable versions**: From 2.6.0.0
-
-**More explanations**
+**More explanations**:
-- If the input value is NULL, the output is NULL too
+- It can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
-### SUBSTR
+### MAVG
-```
-SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
+```sql
+ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: The sub-string starting from `pos` with length of `len` from the original string `str`
+**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000].
-**Return value type**: Same as input
+**Return value type**: Double precision floating point
-**Applicable data types**: BINARY or NCHAR, can't be used on tags
-
-**Applicable table types**: table, STable
+**Applicable data types**: Numeric types
**Applicable nested query**: Inner query and Outer query
-**Applicable versions**: From 2.6.0.0
+**Applicable table types**: table, STable
**More explanations**:
-- If the input is NULL, the output is NULL
-- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string.
-- If `len` is not specified, it means from `pos` to the end.
+- Arithmetic operation can't be performed on the result of `MAVG`.
+- Can't be used with aggregate functions.
+- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.
-### Arithmetic Operations
+### SAMPLE
-```
-SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
+```sql
+ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
-**Description**: The sum, difference, product, quotient, or remainder between one or more columns
+**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,10000]
-**Return value type**: Double precision floating point
+**Return value type**: Same as the column being operated plus the associated timestamp
-**Applicable column types**: Data types except for timestamp, binary, nchar, bool
+**Applicable data types**: Any data type except for tags of STable
**Applicable table types**: table, STable
-**More explanations**:
-
-- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence
-- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL
+**Applicable nested query**: Inner query and Outer query
-**Examples**:
+**More explanations**:
-```
-taos> SELECT current + voltage * phase FROM d1001;
-(current+(voltage*phase)) |
-============================
- 78.190000713 |
- 84.540003240 |
- 80.810000718 |
-Query OK, 3 row(s) in set (0.001046s)
-```
+- Arithmetic operation can't be operated on the result of `SAMPLE` function
+- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline
### STATECOUNT
@@ -1490,45 +1162,17 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
**Return value type**: Integer
-**Applicable data types**: Data types excpet for timestamp, binary, nchar, bool
+**Applicable data types**: Numeric types
**Applicable table types**: table, STable
**Applicable nested query**: Outer query only
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
-**Examples**:
-
-```
-taos> select ts,dbig from statef2;
- ts | dbig |
-========================================================
-2021-10-15 00:31:33.000000000 | 1 |
-2021-10-17 00:31:31.000000000 | NULL |
-2021-12-24 00:31:34.000000000 | 2 |
-2022-01-01 08:00:05.000000000 | 19 |
-2022-01-01 08:00:06.000000000 | NULL |
-2022-01-01 08:00:07.000000000 | 9 |
-Query OK, 6 row(s) in set (0.002977s)
-
-taos> select stateCount(dbig,GT,2) from statef2;
-ts | dbig | statecount(dbig,gt,2) |
-================================================================================
-2021-10-15 00:31:33.000000000 | 1 | -1 |
-2021-10-17 00:31:31.000000000 | NULL | NULL |
-2021-12-24 00:31:34.000000000 | 2 | -1 |
-2022-01-01 08:00:05.000000000 | 19 | 1 |
-2022-01-01 08:00:06.000000000 | NULL | NULL |
-2022-01-01 08:00:07.000000000 | 9 | 2 |
-Query OK, 6 row(s) in set (0.002791s)
-```
-
### STATEDURATION
```
@@ -1545,324 +1189,65 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**Return value type**: Integer
-**Applicable data types**: Data types excpet for timestamp, binary, nchar, bool
+**Applicable data types**: Numeric types
**Applicable table types**: table, STable
**Applicable nested query**: Outer query only
-**Applicable versions**: From 2.6.0.0
-
**More explanations**:
- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
-**Examples**:
-
-```
-taos> select ts,dbig from statef2;
- ts | dbig |
-========================================================
-2021-10-15 00:31:33.000000000 | 1 |
-2021-10-17 00:31:31.000000000 | NULL |
-2021-12-24 00:31:34.000000000 | 2 |
-2022-01-01 08:00:05.000000000 | 19 |
-2022-01-01 08:00:06.000000000 | NULL |
-2022-01-01 08:00:07.000000000 | 9 |
-Query OK, 6 row(s) in set (0.002407s)
-
-taos> select stateDuration(dbig,GT,2) from statef2;
-ts | dbig | stateduration(dbig,gt,2) |
-===================================================================================
-2021-10-15 00:31:33.000000000 | 1 | -1 |
-2021-10-17 00:31:31.000000000 | NULL | NULL |
-2021-12-24 00:31:34.000000000 | 2 | -1 |
-2022-01-01 08:00:05.000000000 | 19 | 0 |
-2022-01-01 08:00:06.000000000 | NULL | NULL |
-2022-01-01 08:00:07.000000000 | 9 | 2 |
-Query OK, 6 row(s) in set (0.002613s)
-```
-
-## Time Functions
-
-From version 2.6.0.0, below time related functions can be used in TDengine.
-
-### NOW
-
-```sql
-SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
-INSERT INTO tb_name VALUES (NOW(), ...);
-```
-
-**Description**: The current time of the client side system
-
-**Return value type**: TIMESTAMP
-
-**Applicable column types**: TIMESTAMP only
-
-**Applicable table types**: table, STable
-
-**More explanations**:
-
-- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be:
- b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week)
-- The precision of the returned timestamp is same as the precision set for the current data base in use
-
-**Examples**:
-
-```sql
-taos> SELECT NOW() FROM meters;
- now() |
-==========================
- 2022-02-02 02:02:02.456 |
-Query OK, 1 row(s) in set (0.002093s)
-
-taos> SELECT NOW() + 1h FROM meters;
- now() + 1h |
-==========================
- 2022-02-02 03:02:02.456 |
-Query OK, 1 row(s) in set (0.002093s)
-
-taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < NOW();
- count(voltage) |
-=============================
- 5 |
-Query OK, 5 row(s) in set (0.004475s)
+### TWA
-taos> INSERT INTO d1001 VALUES (NOW(), 10.2, 219, 0.32);
-Query OK, 1 of 1 row(s) in database (0.002210s)
```
-
-### TODAY
-
-```sql
-SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
-INSERT INTO tb_name VALUES (TODAY(), ...);
+SELECT TWA(field_name) FROM tb_name WHERE clause;
```
-**Description**: The timestamp of 00:00:00 of the client side system
+**Description**: Time weighted average on a specific column within a time range
-**Return value type**: TIMESTAMP
+**Return value type**: Double precision floating number
-**Applicable column types**: TIMESTAMP only
+**Applicable column types**: Numeric types
**Applicable table types**: table, STable
**More explanations**:
-- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be:
- b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week)
-- The precision of the returned timestamp is same as the precision set for the current data base in use
+- It can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
-**Examples**:
+## System Information Functions
-```sql
-taos> SELECT TODAY() FROM meters;
- today() |
-==========================
- 2022-02-02 00:00:00.000 |
-Query OK, 1 row(s) in set (0.002093s)
+### DATABASE
-taos> SELECT TODAY() + 1h FROM meters;
- today() + 1h |
-==========================
- 2022-02-02 01:00:00.000 |
-Query OK, 1 row(s) in set (0.002093s)
-
-taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < TODAY();
- count(voltage) |
-=============================
- 5 |
-Query OK, 5 row(s) in set (0.004475s)
-
-taos> INSERT INTO d1001 VALUES (TODAY(), 10.2, 219, 0.32);
-Query OK, 1 of 1 row(s) in database (0.002210s)
```
-
-### TIMEZONE
-
-```sql
-SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
+SELECT DATABASE();
```
-**Description**: The timezone of the client side system
-
-**Return value type**: BINARY
-
-**Applicable column types**: None
-
-**Applicable table types**: table, STable
-
-**Examples**:
-
-```sql
-taos> SELECT TIMEZONE() FROM meters;
- timezone() |
-=================================
- UTC (UTC, +0000) |
-Query OK, 1 row(s) in set (0.002093s)
-```
-
-### TO_ISO8601
-
-```sql
-SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
-```
-
-**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone of the client side system
-
-**Return value type**: BINARY
-
-**Applicable column types**: TIMESTAMP, constant or a column
-
-**Applicable table types**: table, STable
-
-**More explanations**:
-
-- If the input is UNIX timestamp constant, the precision of the returned value is determined by the digits of the input timestamp
-- If the input is a column of TIMESTAMP type, The precision of the returned value is same as the precision set for the current data base in use
-
-**Examples**:
+**Description**:Return the current database being used. If the user doesn't specify database when logon and doesn't use `USE` SQL command to switch the datbase, this function returns NULL.
-```sql
-taos> SELECT TO_ISO8601(1643738400) FROM meters;
- to_iso8601(1643738400) |
-==============================
- 2022-02-02T02:00:00+0800 |
+### CLIENT_VERSION
-taos> SELECT TO_ISO8601(ts) FROM meters;
- to_iso8601(ts) |
-==============================
- 2022-02-02T02:00:00+0800 |
- 2022-02-02T02:00:00+0800 |
- 2022-02-02T02:00:00+0800 |
```
-
-### TO_UNIXTIMESTAMP
-
-```sql
-SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
+SELECT CLIENT_VERSION();
```
-**Description**: UNIX timestamp converted from a string of date/time format
-
-**Return value type**: Long integer
-
-**Applicable column types**: Constant or column of BINARY/NCHAR
-
-**Applicable table types**: table, STable
-
-**More explanations**:
-
-- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be converted
-- The precision of the returned timestamp is same as the precision set for the current data base in use
-
-**Examples**:
+**Description**:Return the client version.
-```sql
-taos> SELECT TO_UNIXTIMESTAMP("2022-02-02T02:00:00.000Z") FROM meters;
-to_unixtimestamp("2022-02-02T02:00:00.000Z") |
-==============================================
- 1643767200000 |
+### SERVER_VERSION
-taos> SELECT TO_UNIXTIMESTAMP(col_binary) FROM meters;
- to_unixtimestamp(col_binary) |
-========================================
- 1643767200000 |
- 1643767200000 |
- 1643767200000 |
```
-
-### TIMETRUNCATE
-
-```sql
-SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
+SELECT SERVER_VERSION();
```
-**Description**: Truncate the input timestamp with unit specified by `time_unit`\
-
-**Return value type**: TIMESTAMP\
-
-**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of timestamp
+**Description**:Returns the server version.
-**Applicable table types**: table, STable
-
-**More explanations**:
+### SERVER_STATUS
-- Time unit specified by `time_unit` can be:
- 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
-- The precision of the returned timestamp is same as the precision set for the current data base in use
-
-**Examples**:
-
-```sql
-taos> SELECT TIMETRUNCATE(1643738522000, 1h) FROM meters;
- timetruncate(1643738522000, 1h) |
-===================================
- 2022-02-02 02:00:00.000 |
-Query OK, 1 row(s) in set (0.001499s)
-
-taos> SELECT TIMETRUNCATE("2022-02-02 02:02:02", 1h) FROM meters;
- timetruncate("2022-02-02 02:02:02", 1h) |
-===========================================
- 2022-02-02 02:00:00.000 |
-Query OK, 1 row(s) in set (0.003903s)
-
-taos> SELECT TIMETRUNCATE(ts, 1h) FROM meters;
- timetruncate(ts, 1h) |
-==========================
- 2022-02-02 02:00:00.000 |
- 2022-02-02 02:00:00.000 |
- 2022-02-02 02:00:00.000 |
-Query OK, 3 row(s) in set (0.003903s)
```
-
-### TIMEDIFF
-
-```sql
-SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
+SELECT SERVER_VERSION();
```
-**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
-
-**Return value type**: Long Integer
-
-**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of TIMESTAMP type
-
-**Applicable table types**: table, STable
-
-**More explanations**:
-
-- Time unit specified by `time_unit` can be:
- 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
-- The precision of the returned timestamp is same as the precision set for the current data base in use
-
-**Examples**:
-
-```sql
-taos> SELECT TIMEDIFF(1643738400000, 1643742000000) FROM meters;
- timediff(1643738400000, 1643742000000) |
-=========================================
- 3600000 |
-Query OK, 1 row(s) in set (0.002553s)
-taos> SELECT TIMEDIFF(1643738400000, 1643742000000, 1h) FROM meters;
- timediff(1643738400000, 1643742000000, 1h) |
-=============================================
- 1 |
-Query OK, 1 row(s) in set (0.003726s)
-
-taos> SELECT TIMEDIFF("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) FROM meters;
- timediff("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) |
-=============================================================
- 1 |
-Query OK, 1 row(s) in set (0.001937s)
-
-taos> SELECT TIMEDIFF(ts_col1, ts_col2, 1h) FROM meters;
- timediff(ts_col1, ts_col2, 1h) |
-===================================
- 1 |
-Query OK, 1 row(s) in set (0.001937s)
-```
+**Description**:Returns the server's status.
diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md
index 5cc3fa8cb43749fd40b808699f82a8761525cc6a..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644
--- a/docs-en/12-taos-sql/08-interval.md
+++ b/docs-en/12-taos-sql/08-interval.md
@@ -3,36 +3,36 @@ sidebar_label: Interval
title: Aggregate by Time Window
---
-Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window.
-Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window.
+Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window.
+Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window.
## Time Window
-`INTERVAL` clause is used to generate time windows of same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time range of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
+The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
-
+
-`INTERVAL` and `SLIDING` should be used with aggregate functions and selection functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
+`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`.
```
SELECT * FROM temp_tb_1 INTERVAL(1m);
```
-The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
+The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
```
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
```
-When the time length specified by `SLIDING` is same as that specified by `INTERVAL`, sliding window is actually flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please be noted that the `timezone` parameter should be configured to same value in the `taos.cfg` configuration file on client side and server side.
+When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
## Status Window
-In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure,there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
+In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
-
+
-`STATE_WINDOW` is used to specify the column based on which to define status window, for example:
+`STATE_WINDOW` is used to specify the column on which the status window will be based. For example:
```
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
@@ -44,9 +44,9 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
```
-The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
+The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
-
+
If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now.
@@ -54,7 +54,7 @@ If the time interval between two continuous rows are within the time interval sp
### Syntax
-The full syntax of aggregate by window is as following:
+The full syntax of aggregate by window is as follows:
```sql
SELECT function_list FROM tb_name
@@ -73,11 +73,11 @@ SELECT function_list FROM stb_name
### Restrictions
-- Aggregate functions and selection functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations.
+- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
- `LAST_ROW` can't be used together with window aggregate.
- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
- `WHERE` clause can be used to specify the starting and ending time and other filter conditions
-- `FILL` clause is used to specify how to fill when there is data missing in any window, including: \
+- `FILL` clause is used to specify how to fill when there is data missing in any window, including:
1. NONE: No fill (the default fill mode)
2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)`
3. PREV:Fill with the previous non-NULL value, `FILL(PREV)`
@@ -87,22 +87,23 @@ SELECT function_list FROM stb_name
:::info
-1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000.
-2. The result set is in the ascending order of timestamp in aggregate by time window aggregate.
+1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
+2. The result set is in ascending order of timestamp when you aggregate by time window.
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
- :::
+
+:::
Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query).
## Examples
-The table of intelligent meters can be created like below SQL statement:
+A table of intelligent meters can be created by the SQL statement below:
```sql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-The average current, maximum current and median of current in every 10 minutes of the past 24 hours can be calculated using below SQL statement, with missing value filled with the previous non-NULL value.
+The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values.
```
SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md
index 873e484fbb4731294d00df323f8e0d2cbc6b1d30..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644
--- a/docs-en/12-taos-sql/09-limit.md
+++ b/docs-en/12-taos-sql/09-limit.md
@@ -4,9 +4,9 @@ title: Limits & Restrictions
## Naming Rules
-1. Only English characters, digits and underscore are allowed
-2. Can't be started with digits
-3. Case Insensitive without escape character "\`"
+1. Only characters from the English alphabet, digits and underscore are allowed
+2. Names cannot start with a digit
+3. Case insensitive without escape character "\`"
4. Identifier with escape character "\`"
To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape).
@@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
## General Limits
-- Maximum length of database name is 32 bytes
-- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator
-- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please be noted that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
-- Maximum of column name is 64.
+- Maximum length of database name is 32 bytes.
+- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
+- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
+- Maximum length of column name is 64.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- Maximum length of tag name is 64.
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes.
-- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
-- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded.
-- Maximum numbers of databases, STables, tables are only depending on the system resources.
-- Maximum of database name is 32 bytes, can't include "." and special characters.
-- Maximum replica number of database is 3
-- Maximum length of user name is 23 bytes
-- Maximum length of password is 15 bytes
-- Maximum number of rows depends on the storage space only.
-- Maximum number of tables depends on the number of nodes only.
-- Maximum number of databases depends on the number of nodes only.
-- Maximum number of vnodes for single database is 64.
+- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
+- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
+- Maximum numbers of databases, STables, tables are dependent only on the system resources.
+- Maximum of database name is 32 bytes, and it can't include "." or special characters.
+- Maximum number of replicas for a database is 3.
+- Maximum length of user name is 23 bytes.
+- Maximum length of password is 15 bytes.
+- Maximum number of rows depends only on the storage space.
+- Maximum number of tables depends only on the number of nodes.
+- Maximum number of databases depends only on the number of nodes.
+- Maximum number of vnodes for a single database is 64.
## Restrictions of `GROUP BY`
-`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please be noted that `GROUP BY` can't be performed on float or double type.
+`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types.
## Restrictions of `IS NOT NULL`
-`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types.
+`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types.
## Restrictions of `ORDER BY`
-- Only one `order by` is allowed for normal table and sub table.
+- Only one `order by` is allowed for normal table and subtable.
- At most two `order by` are allowed for STable, and the second one must be `ts`.
-- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`.
+- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`.
- `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable.
- `order by ts` is applicable to table and STable.
- If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group.
@@ -56,11 +56,11 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
### Name Restrictions of Table/Column
-The name of a table or column can only be composed of ASCII characters, digits and underscore, while digit can't be used as the beginning. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
+The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
### Name Restrictions After Escaping
-To support more flexible table or column names, new escape character "`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table name. The escape character is not counted in the length of table name.
+To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name.
With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally.
diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md
index 60468f1e0fd75cc04cae8a91b0a1a22b9bd3600b..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644
--- a/docs-en/12-taos-sql/10-json.md
+++ b/docs-en/12-taos-sql/10-json.md
@@ -4,7 +4,7 @@ title: JSON Type
## Syntax
-1. Tag of JSON type
+1. Tag of type JSON
```sql
create STable s1 (ts timestamp, v1 int) tags (info json);
@@ -12,7 +12,7 @@ title: JSON Type
create table s1_1 using s1 tags ('{"k1": "v1"}');
```
-2. -> Operator of JSON
+2. "->" Operator of JSON
```sql
select * from s1 where info->'k1' = 'v1';
@@ -20,7 +20,7 @@ title: JSON Type
select info->'k1' from s1;
```
-3. contains Operator of JSON
+3. "contains" Operator of JSON
```sql
select * from s1 where info contains 'k2';
@@ -30,7 +30,7 @@ title: JSON Type
## Applicable Operations
-1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
+1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
```sql
select * from s1 where info->'k1' match 'v*';
@@ -42,9 +42,9 @@ title: JSON Type
select * from s1 where info->'k1' is not null;
```
-2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'`
+2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'`
-3. `Distinct` can be used with tag of JSON type
+3. `Distinct` can be used with a tag of type JSON
```sql
select distinct info->'k1' from s1;
@@ -52,29 +52,29 @@ title: JSON Type
4. Tag Operations
- The value of JSON tag can be altered. Please be noted that the full JSON will be override when doing this.
+ The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
- The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
+ The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
## Other Restrictions
-- JSON type can only be used for tag. There can be only one tag of JSON type, and it's exclusive to any other types of tag.
+- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags.
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
- JSON format:
- - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array.
- - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so.
- - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON.
+ - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
+ - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
+ - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON.
- If one key occurs twice in JSON, only the first one is valid.
- Escape characters are not allowed in JSON.
-- NULL is returned if querying a key that doesn't exist in JSON.
+- NULL is returned when querying a key that doesn't exist in JSON.
- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query.
-For example, below SQL statements are not supported.
+For example, the SQL statements below are not supported.
```sql;
select jtag->'key' from (select jtag from STable);
diff --git a/docs-en/12-taos-sql/12-keywords.md b/docs-en/12-taos-sql/12-keywords.md
index fa750300b71251e1172dba13f91d05822f9ac1f4..ed0c96b4e4d94dd70da1c3778f4129bd34daed62 100644
--- a/docs-en/12-taos-sql/12-keywords.md
+++ b/docs-en/12-taos-sql/12-keywords.md
@@ -46,3 +46,45 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
| CONNS | ID | NOTNULL | STable | WAL |
| COPY | IF | NOW | STableS | WHERE |
+| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART |
+| _WSTOP | _WDURATION | _ROWTS |
+
+## Explanations
+### TBNAME
+`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable.
+
+Get the table name and tag values of all subtables in a STable.
+```mysql
+SELECT TBNAME, location FROM meters;
+```
+
+Count the number of subtables in a STable.
+```mysql
+SELECT COUNT(TBNAME) FROM meters;
+```
+
+Only filter on TAGS can be used in WHERE clause in the above two query statements.
+```mysql
+taos> SELECT TBNAME, location FROM meters;
+ tbname | location |
+==================================================================
+ d1004 | California.SanFrancisco |
+ d1003 | California.SanFrancisco |
+ d1002 | California.LosAngeles |
+ d1001 | California.LosAngeles |
+Query OK, 4 row(s) in set (0.000881s)
+
+taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
+ count(tbname) |
+========================
+ 2 |
+Query OK, 1 row(s) in set (0.001091s)
+```
+### _QSTART/_QSTOP/_QDURATION
+The start, stop and duration of a query time window.
+
+### _WSTART/_WSTOP/_WDURATION
+The start, stop and duration of aggegate query by time window, like interval, session window, state window.
+
+### _c0/_ROWTS
+_c0 is equal to _ROWTS, it means the first column of a table or STable.
diff --git a/docs-en/12-taos-sql/13-operators.md b/docs-en/12-taos-sql/13-operators.md
new file mode 100644
index 0000000000000000000000000000000000000000..0ca9ec49430a66384400bc41cd08562b3d5d28c7
--- /dev/null
+++ b/docs-en/12-taos-sql/13-operators.md
@@ -0,0 +1,66 @@
+---
+sidebar_label: Operators
+title: Operators
+---
+
+## Arithmetic Operators
+
+| # | **Operator** | **Data Types** | **Description** |
+| --- | :----------: | -------------- | --------------------------------------------------------- |
+| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator |
+| 2 | +, - | Numeric Types | Addition and substraction, binary operator |
+| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator |
+| 4 | % | Numeric Types | Taking the remainder, binary operator |
+
+## Bitwise Operators
+
+| # | **Operator** | **Data Types** | **Description** |
+| --- | :----------: | -------------- | ----------------------------- |
+| 1 | & | Numeric Types | Bitewise AND, binary operator |
+| 2 | \| | Numeric Types | Bitewise OR, binary operator |
+
+## JSON Operator
+
+`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`.
+
+## Set Operator
+
+Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function.
+
+TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used.
+
+## Comparsion Operator
+
+| # | **Operator** | **Data Types** | **Description** |
+| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- |
+| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal |
+| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal |
+| 3 | \>, < | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than |
+| 4 | \>=, <= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to |
+| 5 | IS [NOT] NULL | Any types | Is NULL or NOT |
+| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not |
+| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not |
+| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching |
+| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching |
+| 10 | CONTAINS | JSON | If A key exists in JSON |
+
+`LIKE` operator uses wildcard to match a string, the rules are:
+
+- '%' matches 0 to any number of characters; '\_' matches any single ASCII character.
+- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\`
+- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is.
+
+`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are:
+
+- Regular expressions of POSIX standard are supported.
+- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported.
+- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect.
+
+## Logical Operators
+
+| # | **Operator** | **Data Types** | **Description** |
+| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- |
+| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. |
+| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE |
+
+TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated.
diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md
index 611f2bf75eb2a234ae139ce65f2e78d356483bb7..33656338a7bba38dc55cf536bdba8e95309c5acf 100644
--- a/docs-en/12-taos-sql/index.md
+++ b/docs-en/12-taos-sql/index.md
@@ -3,11 +3,9 @@ title: TDengine SQL
description: "The syntax supported by TDengine SQL "
---
-This section explains the syntax about operating database, table, STable, inserting data, selecting data, functions and some tips that can be used in TDengine SQL. It would be easier to understand with some fundamental knowledge of SQL.
+This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL.
-TDengine SQL is the major interface for users to write data into or query from TDengine. For users to easily use, syntax similar to standard SQL is provided. However, please be noted that TDengine SQL is not standard SQL. Besides, because TDengine doesn't provide the functionality of deleting time series data, corresponding statements are not provided in TDengine SQL.
-
-TDengine SQL doesn't support abbreviation for keywords, for example `DESCRIBE` can't be abbreviated as `DESC`.
+TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL.
Syntax Specifications used in this chapter:
@@ -16,7 +14,7 @@ Syntax Specifications used in this chapter:
- | means one of a few options, excluding | itself.
- … means the item prior to it can be repeated multiple times.
-To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of meters. Assuming each meter collects 3 data: current, voltage, phase. The data model is as below:
+To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
```sql
taos> DESCRIBE meters;
@@ -30,4 +28,4 @@ taos> DESCRIBE meters;
groupid | INT | 4 | TAG |
```
-The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003, d1004 respectively based on the data model of TDengine.
+The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine.
diff --git a/docs-en/12-taos-sql/timewindow-1.webp b/docs-en/12-taos-sql/timewindow-1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df
Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-1.webp differ
diff --git a/docs-en/12-taos-sql/timewindow-2.webp b/docs-en/12-taos-sql/timewindow-2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3
Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-2.webp differ
diff --git a/docs-en/12-taos-sql/timewindow-3.webp b/docs-en/12-taos-sql/timewindow-3.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9
Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-3.webp differ
diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md
index a1aad1c3c96c52689e9f68509c27ccce574d2082..c098002962d62aa0acc7a94462c052303cb2ed90 100644
--- a/docs-en/13-operation/01-pkg-install.md
+++ b/docs-en/13-operation/01-pkg-install.md
@@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine community version provides dev and rpm package for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers.
+TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
## Install
@@ -14,7 +14,7 @@ TDengine community version provides dev and rpm package for users to choose base
1. Download deb package from official website, for example TDengine-server-2.4.0.7-Linux-x64.deb
-2. In the directory where the package is located, execute below command
+2. In the directory where the package is located, execute the command below
```bash
$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
@@ -46,7 +46,7 @@ TDengine is installed successfully!
1. Download rpm package from official website, for example TDengine-server-2.4.0.7-Linux-x64.rpm;
-2. In the directory where the package is located, execute below command
+2. In the directory where the package is located, execute the command below
```
$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
@@ -77,7 +77,7 @@ TDengine is installed successfully!
1. Download the tar.gz package, for example TDengine-server-2.4.0.7-Linux-x64.tar.gz;
- 2、In the directory where the package is located, firstly decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script.
+2. In the directory where the package is located, first decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script.
```bash
$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
@@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper`
```
:::info
-Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation.
+Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation.
:::
@@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec
:::note
-When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it has been already up; or just ignore it and configure later after installation is done.
+When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished.
:::
@@ -181,14 +181,14 @@ taosKeeper is removed successfully!
:::note
-- It's strongly suggested not to use multiple kinds of installation packages on single host TDengine
-- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling.
+- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
+- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
-- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling.
+- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rpm -e --noscripts tdengine
@@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
During the installation process:
- Configuration directory, data directory, and log directory are created automatically if they don't exist
-- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing
+- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
- The executables at /usr/local/taos/bin are linked to /usr/bin
@@ -228,14 +228,14 @@ During the installation process:
:::note
-- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered once
+- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
## Start and Stop
-Linux system services `systemd`, `systemctl` or `service` is used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operator can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
+Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
-For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are as below:
+For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
- Start server:`systemctl start taosd`
@@ -263,20 +263,22 @@ Active: inactive (dead)
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
-Upgrading package should follow the steps mentioned previously to firstly uninstall old version then install new version.
+To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
-Upgrading a running server is much more complex. Firstly please check the version number of old version and new version. The version number of TDengine consists of 4 sections, only the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
+Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
-- Make sure all data persisted into disk
+- Make sure all data is persisted to disk
+- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
-- Make some simple queries to make sure no data loss
-- Make some simple data insertion to make sure the cluster works well
-- Restore business data
+- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
+- Run some simple data insertion statements to make sure the cluster works well
+- Restore business services
:::warning
+
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::
diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx
index 35a34aebc088c233ed9fc39723e8890ebc56e124..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644
--- a/docs-en/13-operation/02-planning.mdx
+++ b/docs-en/13-operation/02-planning.mdx
@@ -2,19 +2,19 @@
title: Resource Planning
---
-The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter.
+It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
## Memory Requirement of Server Side
-The number of vgroups created for each database is same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using below formula:
+By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
```
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
-For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
+For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
-In real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
+In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
```
taosd_memory = vnode_memory + mnode_memory + query_memory
@@ -22,29 +22,29 @@ In real operation of TDengine, we are more concerned about the memory used by ea
In the above formula:
-1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas.
+1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
```
- vnode_memory = sum(Database memory) / number_of_dnodes \* replica
+ vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
```
2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables".
-Please be noted that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to preserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query.
+Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query.
## Memory Requirement of Client Side
-The client programs use TDengine client driver `taosc` to connect to the server side, there is also memory requirement for a client program.
+For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well.
-The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using below formula:
+The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using the below formula:
```
M = (T * S * 3 + (N / 4096) + 100)
```
-For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then minimum memory requirement of a client program is:
+For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then the minimum memory requirement of a client program is:
```
100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes)
@@ -56,10 +56,10 @@ So, at least 3GB needs to be reserved for such a client.
The CPU resources required depend on two aspects:
-- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirement for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold.
-- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user.
+- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
+- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
-In short words, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
+In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
## Disk Requirement
@@ -69,14 +69,14 @@ The compression ratio in TDengine is much higher than that in RDBMS. In most cas
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
```
-For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection si 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB).
+For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB).
-Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs.
+Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs.
-To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please be noted that expensive disk array is not necessary because replications are used in TDengine to provide high availability.
+To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
## Number of Hosts
-A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts are same in resources, the number of hosts can be derived easily.
+A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md
index 367474cddb7395ea84a4a33623d1643e487f9d09..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644
--- a/docs-en/13-operation/03-tolerance.md
+++ b/docs-en/13-operation/03-tolerance.md
@@ -7,23 +7,26 @@ title: Fault Tolerance & Disaster Recovery
TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability.
-When a data block is received by TDengine, the original data block is firstly written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted.
+When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted.
There are 2 configuration parameters related to WAL:
-- walLevel:0:wal is disabled; 1:wal is enabled without fsync; 2:wal is enabled with fsync.
-- fsync:only valid when walLevel is set to 2, it specified the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
+- walLevel:
+ - 0:wal is disabled
+ - 1:wal is enabled without fsync
+ - 2:wal is enabled with fsync
+- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
-To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds.
+To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
## Disaster Recovery
-TDengine uses replications to provide high availability and disaster recovery capability.
+TDengine uses replication to provide high availability and disaster recovery capability.
-TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by system parameter `numOfMnodes`. The data replication between mnode replicas is in synchronous way to guarantee the metadata consistency.
+A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
-The number of replicas for time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
+The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
-The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create table.
+The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
-As long as the dnodes of a TDengine cluster are deployed on different physical machines and replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too.
+As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
diff --git a/docs-en/13-operation/06-admin.md b/docs-en/13-operation/06-admin.md
index 1ca0dfeaf4a4b0b4c597e1a5ec6ece20224e2dba..458a91b88c6d8319fe8b84c2b34d8ff968957910 100644
--- a/docs-en/13-operation/06-admin.md
+++ b/docs-en/13-operation/06-admin.md
@@ -2,7 +2,7 @@
title: User Management
---
-System operator can use TDengine CLI `taos` to create or remove user or change password. The SQL command is as low:
+A system operator can use TDengine CLI `taos` to create or remove users or change passwords. The SQL commands are documented below:
## Create User
@@ -10,7 +10,7 @@ System operator can use TDengine CLI `taos` to create or remove user or change p
CREATE USER PASS <'password'>;
```
-When creating a user and specifying the user name and password, password needs to be quoted using single quotes.
+When creating a user and specifying the user name and password, the password needs to be quoted using single quotes.
## Drop User
@@ -18,7 +18,7 @@ When creating a user and specifying the user name and password, password needs t
DROP USER ;
```
-Drop a user can only be performed by root.
+Dropping a user can only be performed by root.
## Change Password
@@ -26,7 +26,7 @@ Drop a user can only be performed by root.
ALTER USER PASS <'password'>;
```
-To keep the case of the password when changing password, password needs to be quoted using single quotes.
+To keep the case of the password when changing password, the password needs to be quoted using single quotes.
## Change Privilege
@@ -36,7 +36,7 @@ ALTER USER PRIVILEGE ;
The privileges that can be changed to are `read` or `write` without single quotes.
-Note:there is another privilege `super`, which not allowed to be authorized to any user.
+Note:there is another privilege `super`, which is not allowed to be authorized to any user.
## Show Users
@@ -45,6 +45,6 @@ SHOW USERS;
```
:::note
-In SQL syntax, `< >` means the part that needs to be input by user, excluding the `< >` itself.
+In SQL syntax, `< >` means the part that needs to be input by the user, excluding the `< >` itself.
:::
diff --git a/docs-en/13-operation/07-import.md b/docs-en/13-operation/07-import.md
index befca38652abadca60b62721754de7ab718f65ea..8362cec1ab3072866018678b42a679d0c19b49de 100644
--- a/docs-en/13-operation/07-import.md
+++ b/docs-en/13-operation/07-import.md
@@ -2,26 +2,26 @@
title: Data Import
---
-There are multiple ways of importing data provided byTDengine: import with script, import from data file, import using `taosdump`.
+There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`.
## Import Using Script
-TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in single file with one statement on each line, then the file can be executed using `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently.
+TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in a single file with one statement on each line, then the file can be executed using the `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently.
## Import from Data File
-In TDengine CLI, data can be imported from a CSV file into an existing table. The data in single CSV must belong to same table and must be consistent with the schema of that table. The SQL statement is as below:
+In TDengine CLI, data can be imported from a CSV file into an existing table. The data in a single CSV must belong to the same table and must be consistent with the schema of that table. The SQL statement is as below:
```sql
insert into tb1 file 'path/data.csv';
```
:::note
-If there is description in the first line of a CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes.
+If there is a description in the first line of the CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes.
:::
-For example, there is a sub table d1001 whose schema is as below:
+For example, there is a subtable d1001 whose schema is as below:
```sql
taos> DESCRIBE d1001
@@ -49,7 +49,7 @@ The format of the CSV file to be imported, data.csv, is as below:
'2018-10-12 06:38:05.000',18.30000,219,0.31000
```
-Then, below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of current Linux user.
+Then, the below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of the current Linux user.
```sql
taos> insert into d1001 file '~/data.csv';
@@ -58,4 +58,4 @@ Query OK, 9 row(s) affected (0.004763s)
## Import using taosdump
-A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
+A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md
index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644
--- a/docs-en/13-operation/08-export.md
+++ b/docs-en/13-operation/08-export.md
@@ -2,11 +2,13 @@
title: Data Export
---
-There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`.
+There are two ways of exporting data from a TDengine cluster:
+- Using a SQL statement in TDengine CLI
+- Using the `taosdump` tool
## Export Using SQL
-If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI.
+If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI.
```sql
select * from >> data.csv;
@@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
## Export Using taosdump
-With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
+With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md
index 3f3c6c9f1e86f9f33bafc7edfd79bebb175871cc..51396524ea281ae665c9fdf61d2e6e6202995537 100644
--- a/docs-en/13-operation/09-status.md
+++ b/docs-en/13-operation/09-status.md
@@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks
title: Manage Connections and Query Tasks
---
-System operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing.
+A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing.
## Show Connections
@@ -13,7 +13,7 @@ SHOW CONNECTIONS;
One column of the output of the above SQL command is "ip:port", which is the end point of the client.
-## Close Connections Forcedly
+## Force Close Connections
```sql
KILL CONNECTION ;
@@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output
SHOW QUERIES;
```
-The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no".
+The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no".
-## Close Queries Forcedly
+## Force Close Queries
```sql
KILL QUERY ;
@@ -43,12 +43,12 @@ In the above SQL command, `query-id` is from the first column of the output of `
SHOW STREAMS;
```
-The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no".
+The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no".
-## Close Continuous Query Forcedly
+## Force Close Continuous Query
```sql
KILL STREAM ;
```
-The the above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`.
+The above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`.
diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md
index 019cf4f2948141fac79587429f1fdc3b06623945..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644
--- a/docs-en/13-operation/10-monitor.md
+++ b/docs-en/13-operation/10-monitor.md
@@ -2,19 +2,19 @@
title: TDengine Monitoring
---
-After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Besides, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into `log` database too. System operator can view the data in `log` database from TDengine CLI or from a web console.
+After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
-Collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in configuration file.
+The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
## TDinsight
-TDinsight is a total solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster.
+TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.
-A script `TDinsight.sh` is provided to deploy TDinsight in automatic way.
+A script `TDinsight.sh` is provided to deploy TDinsight automatically.
-Download `TDinsight.sh` with below command:
+Download `TDinsight.sh` with the below command:
```bash
wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh
@@ -38,7 +38,7 @@ There are two ways to setup Grafana alert notification.
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E
```
-- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of this way are as follows:
+- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below:
- `-I`: AliCloud SMS Key ID
- `-K`: AliCloud SMS Key Secret
@@ -47,7 +47,7 @@ There are two ways to setup Grafana alert notification.
- `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
- `-B`: List of mobile numbers to be notified
- Below is an example of the full command using this way.
+ Below is an example of the full command using the AliCloud SMS alert.
```bash
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
@@ -55,6 +55,6 @@ There are two ways to setup Grafana alert notification.
-T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
```
-Launch `TDinsight.sh` as above command and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
+Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
diff --git a/docs-en/13-operation/11-optimize.md b/docs-en/13-operation/11-optimize.md
deleted file mode 100644
index 7cccfc8b0d51a4bfda9ae4827130a3747f10e649..0000000000000000000000000000000000000000
--- a/docs-en/13-operation/11-optimize.md
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Performance Optimization
----
-
-After a TDengine cluster has been running for long enough time, because of updating data, deleting tables and deleting expired data, there may be fragments in data files and query performance may be impacted. To resolve the problem of fragments, from version 2.1.3.0 a new SQL command `COMPACT` can be used to defragment the data files.
-
-```sql
-COMPACT VNODES IN (vg_id1, vg_id2, ...)
-```
-
-`COMPACT` can be used to defragment one or more vgroups. The defragmentation work will be put in task queue for scheduling execution by TDengine. `SHOW VGROUPS` command can be used to get the vgroup ids to be used in `COMPACT` command. There is a column `compacting` in the output of `SHOW GROUPS` to indicate the compacting status of the vgroup: 2 means the vgroup is waiting in task queue for compacting, 1 means compacting is in progress, and 0 means the vgroup has nothing to do with compacting.
-
-Please be noted that a lot of disk I/O is required for defragementation operation, during which the performance may be impacted significantly for data insertion and query, data insertion may be blocked shortly in extreme cases.
-
-## Optimize Storage Parameters
-
-The data in different use cases may have different characteristics, such as the days to keep, number of replicas, collection interval, record size, number of collection points, compression or not, etc. To achieve best efficiency in storage, the parameters in below table can be used, all of them can be either configured in `taos.cfg` as default configuration or in the command `create database`. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/).
-
-| # | Parameter | Unit | Definition | **Value Range** | **Default Value** |
-| --- | --------- | ---- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------- |
-| 1 | days | Day | The time range of the data stored in a single data file | 1-3650 | 10 |
-| 2 | keep | Day | The number of days the data is kept in the database | 1-36500 | 3650 |
-| 3 | cache | MB | The size of each memory block | 1-128 | 16 |
-| 4 | blocks | None | The number of memory blocks used by each vnode | 3-10000 | 6 |
-| 5 | quorum | None | The number of required confirmation in case of multiple replicas | 1-2 | 1 |
-| 6 | minRows | None | The minimum number of rows in a data file | 10-1000 | 100 |
-| 7 | maxRows | None | The maximum number of rows in a daa file | 200-10000 | 4096 |
-| 8 | comp | None | Whether to compress the data | 0:uncompressed; 1: One Phase compression; 2: Two Phase compression | 2 |
-| 9 | walLevel | None | wal sync level (named as "wal" in create database ) | 1:wal enabled without fsync; 2:wal enabled with fsync | 1 |
-| 10 | fsync | ms | The time to wait for invoking fsync when walLevel is set to 2; 0 means no wait | 3000 |
-| 11 | replica | none | The number of replications | 1-3 | 1 |
-| 12 | precision | none | Time precision | ms: millisecond; us: microsecond;ns: nanosecond | ms |
-| 13 | update | none | Whether to allow updating data | 0: not allowed; 1: a row must be updated as whole; 2: a part of columns in a row can be updated | 0 |
-| 14 | cacheLast | none | Whether the latest data of a table is cached in memory | 0: not cached; 1: the last row is cached; 2: the latest non-NULL value of each column is cached | 0 |
-
-For a specific use case, there may be multiple kinds of data with different characteristics, it's best to put data with same characteristics in same database. So there may be multiple databases in a system while each database can be configured with different storage parameters to achieve best performance. The above parameters can be used when creating a database to override the default setting in configuration file.
-
-```sql
- CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1;
-```
-
-The above SQL statement creates a database named as `demo`, in which each data file stores data across 10 days, the size of each memory block is 32 MB and each vnode is allocated with 8 blocks, the replica is set to 3, update operation is allowed, and all other parameters not specified in the command follow the default configuration in `taos.cfg`.
-
-Once a database is created, only some parameters can be changed and be effective immediately while others are can't.
-
-| **Parameter** | **Alterable** | **Value Range** | **Syntax** |
-| ------------- | ------------- | ---------------- | -------------------------------------- |
-| name | | | |
-| create time | | | |
-| ntables | | | |
-| vgroups | | | |
-| replica | **YES** | 1-3 | ALTER DATABASE REPLICA _n_ |
-| quorum | **YES** | 1-2 | ALTER DATABASE QUORUM _n_ |
-| days | | | |
-| keep | **YES** | days-365000 | ALTER DATABASE KEEP _n_ |
-| cache | | | |
-| blocks | **YES** | 3-1000 | ALTER DATABASE BLOCKS _n_ |
-| minrows | | | |
-| maxrows | | | |
-| wal | | | |
-| fsync | | | |
-| comp | **YES** | 0-2 | ALTER DATABASE COMP _n_ |
-| precision | | | |
-| status | | | |
-| update | | | |
-| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE CACHELAST _n_ |
-
-**Explanation:** Prior to version 2.1.3.0, `taosd` server process needs to be restarted for these parameters to take in effect if they are changed using `ALTER DATABASE`.
-
-When trying to join a new dnode into a running TDengine cluster, all the parameters related to cluster in the new dnode configuration must be consistent with the cluster, otherwise it can't join the cluster. The parameters that are checked when joining a dnode are as below. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/).
-
-- numOfMnodes
-- mnodeEqualVnodeNum
-- offlineThreshold
-- statusInterval
-- maxTablesPerVnode
-- maxVgroupsPerDb
-- arbitrator
-- timezone
-- balance
-- flowctrl
-- slaveQuery
-- adjustMaster
-
-For the convenience of debugging, the log setting of a dnode can be changed temporarily. The temporary change will be lost once the server is restarted.
-
-```sql
-ALTER DNODE
-```
-
-- dnode_id: from output of "SHOW DNODES"
-- config: the parameter to be changed, as below
- - resetlog: close the old log file and create the new on
- - debugFlag: 131 (INFO/ERROR/WARNING), 135 (DEBUG), 143 (TRACE)
-
-For example
-
-```
-alter dnode 1 debugFlag 135;
-```
diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md
index b140d925c07386f93c82d492bb8bcf4d95349f12..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644
--- a/docs-en/13-operation/17-diagnose.md
+++ b/docs-en/13-operation/17-diagnose.md
@@ -4,19 +4,19 @@ title: Problem Diagnostics
## Network Connection Diagnostics
-When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems.
+When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
-The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows.
+Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
Diagnostic steps:
-1. If the port range to be diagnosed are being occupied by a `taosd` server process, please firstly stop `taosd.
-2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server.
-3. On the client side, execute command `taos -n client -h -P -l ` to send testing package to the specified server and port.
+1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
+2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
+3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port.
--l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please be noted that the package length must be same in the above 2 commands executed on server side and client side respectively.
+-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
-Output of the server side is as below for example:
+Output of the server side for the example is below:
```bash
# taos -n server -P 6000
@@ -47,7 +47,7 @@ Output of the server side is as below for example:
12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
```
-Output of the client side is as below for example:
+Output of the client side for the example is below:
```bash
# taos -n client -h 172.27.0.7 -P 6000
@@ -65,13 +65,13 @@ Output of the client side is as below for example:
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
```
-The output needs to be checked carefully for the system operator to find out root cause and solve the problem.
+The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
## Startup Status and RPC Diagnostic
-`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster.
+`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
-`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or work abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's network problem or `taosd` is abnormal.
+`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
## Sync and Arbitrator Diagnostic
@@ -80,43 +80,43 @@ taos -n sync -P 6040 -h
taos -n sync -P 6042 -h
```
-The above commands can be executed on Linux Shell to check whether the port for sync works well and whether the sync module of the server side works well. Besides, `-P 6042` is used to check whether the arbitrator is configured properly and works well.
+The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
## Network Speed Diagnostic
`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
-From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
+From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
--n:When set to "speed", it means testing network speed
--h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used
--P:The port of the server process to connect to, the default value is 6030
--N:The number of packages that will be sent in the test, range is [1,10000], default value is 100
--l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024
--S:The type of network packages to send, can be either TCP or UDP, default value is
+-n:When set to "speed", it means testing network speed.
+-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
+-P:The port of the server process to connect to, the default value is 6030.
+-N:The number of packages that will be sent in the test, range is [1,10000], default value is 100.
+-l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024.
+-S:The type of network packages to send, can be either TCP or UDP, default value is TCP.
## FQDN Resolution Diagnostic
`taos -n fqdn -h `
-From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
+From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
--n:When set to "fqdn", it means testing the speed of resolving FQDN
--h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
+-n:When set to "fqdn", it means testing the speed of resolving FQDN.
+-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
## Server Log
-The parameter `debugFlag` is used to control the log level of `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143.
+The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs.
+Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
## Client Log
-An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only log at level of INFO/ERROR/WARNING is recorded, it and needs to be changed to 135 or 143 so that log at DEBUG or TRACE level can be recorded for debugging purpose.
+An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.
-log file is written in async way to minimize the workload on disk, bu the penalty is that a few log lines may be lost in some extreme conditions.
+Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions.
diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md
index a9801c0390f294d6b39b1219cc4055149871ef9c..c64749c40e26f091e4a25e0238827ebceff4b069 100644
--- a/docs-en/13-operation/index.md
+++ b/docs-en/13-operation/index.md
@@ -2,7 +2,7 @@
title: Administration
---
-This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered.
+This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx
index f405d551e530a37a5221e71a824f605fba0c0db9..990af861961e9daf4ac775462e21d6d9852d17c1 100644
--- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx
@@ -2,23 +2,23 @@
title: REST API
---
-To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL.
+To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
:::note
-One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.)
+One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.)
:::
## Installation
-The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language supports the HTTP protocol is enough.
+The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol.
## Verification
If the TDengine server is already installed, it can be verified as follows:
-The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working.
+The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
@@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
-- The custom authentication information is as follows (Let's introduce token later)
+- The custom authentication information is as follows. More details about "token" later.
```
Authorization: Taosd
@@ -136,7 +136,7 @@ The return result is in JSON format, as follows:
Description:
-- status: tell if the operation result is success or failure.
+- status: tells you whethre the operation result is success or failure.
- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.)
- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes.
- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta.
diff --git a/docs-en/14-reference/03-connector/03-connector.mdx b/docs-en/14-reference/03-connector/03-connector.mdx
index 6be914bdb4b701f478b6b8b27366d6ebb5a39ec8..44685579005c2cebd5e0194a10d457cd1199051e 100644
--- a/docs-en/14-reference/03-connector/03-connector.mdx
+++ b/docs-en/14-reference/03-connector/03-connector.mdx
@@ -4,7 +4,7 @@ title: Connector
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
-
+
## Supported platforms
diff --git a/docs-en/14-reference/03-connector/connector.webp b/docs-en/14-reference/03-connector/connector.webp
new file mode 100644
index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be
Binary files /dev/null and b/docs-en/14-reference/03-connector/connector.webp differ
diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx
index 4b388d32a9050645e268bb267d16e9a5b8aa4bda..d549413012d1f17edf4711ae51a56ba5696fcbe3 100644
--- a/docs-en/14-reference/03-connector/cpp.mdx
+++ b/docs-en/14-reference/03-connector/cpp.mdx
@@ -4,7 +4,7 @@ sidebar_label: C/C++
title: C/C++ Connector
---
-C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use it, you need to include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs; the application also needs to link to the corresponding dynamic libraries on the platform where it is located.
+C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
```c
#include
@@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla
## Supported versions
-The version number of the TDengine client driver and the version number of the TDengine server require one-to-one correspondence and recommend using the same version of client driver as what the TDengine server version is. Although a lower version of the client driver is compatible to work with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different), but it is not recommended. It is strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server.
+The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server.
## Installation steps
@@ -55,7 +55,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603
:::note
-- If not specified, when the return value of the API is an integer, _0_ means success, the others are error codes representing the reason for failure, and when the return value is a pointer, _NULL_ means failure.
+- If not specified, when the return value of the API is an integer, _0_ means success. All others are error codes representing the reason for failure. When the return value is a pointer, _NULL_ means failure.
- All error codes and their corresponding causes are described in the `taoserror.h` file.
:::
@@ -114,7 +114,6 @@ This section shows sample code for standard access methods to TDengine clusters
Subscribe and consume
```c
-{{#include examples/c/subscribe.c}}
```
@@ -140,13 +139,12 @@ The base API is used to do things like create database connections and provide a
- `void taos_cleanup()`
- Clean up the runtime environment and should be called before the application exits.
+ Cleans up the runtime environment and should be called before the application exits.
- ` int taos_options(TSDB_OPTION option, const void * arg, ...) `
Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set
-(`TSDB_OPTION_CHARSET`), time zone
-(`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`) . The region setting, character set, and time zone default to the current settings of the operating system.
+(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system.
- `char *taos_get_client_info()`
@@ -159,7 +157,7 @@ The base API is used to do things like create database connections and provide a
- host: FQDN of any node in the TDengine cluster
- user: user name
- pass: password
- - db: database name, if the user does not provide, it can also be connected correctly, the user can create a new database through this connection, if the user provides the database name, it means that the database user has already created, the default use of the database
+ - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database.
- port: the port the taosd program is listening on
NULL indicates a failure. The application needs to save the returned parameters for subsequent use.
@@ -187,7 +185,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
- Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. You can't tell if the result failed by whether the return value is `NULL`, but by parsing the error code in the result set with the `taos_errno()` function.
+ Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. If the return value is `NULL` this does not necessarily indicate a failure. You can get the error code, if any, by parsing the error code in the result set with the `taos_errno()` function.
- `int taos_result_precision(TAOS_RES *res)`
@@ -231,7 +229,7 @@ typedef struct taosField {
- ` void taos_free_result(TAOS_RES *res)`
- Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Otherwise, it may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources.
+ Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources.
- `char *taos_errstr(TAOS_RES *res)`
@@ -242,7 +240,7 @@ typedef struct taosField {
Get the reason for the last API call failure. The return value is the error code.
:::note
-TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, etc., issued based on TAOS structures are multi-thread safe, but state quantities such as "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection.
+TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection.
:::
@@ -274,12 +272,12 @@ All TDengine's asynchronous APIs use a non-blocking call pattern. Applications c
### Parameter Binding API
-In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL, and currently only supports using a question mark `? ` to represent the parameter to be bound.
+In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL. TDengine currently only supports using a question mark `? ` to represent the parameter to be bound.
-Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support for data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows.
+Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows.
1. call `taos_stmt_init()` to create the parameter binding object.
-2. call `taos_stmt_prepare()` to parse the INSERT statement. 3.
+2. call `taos_stmt_prepare()` to parse the INSERT statement.
3. call `taos_stmt_set_tbname()` to set the table name if it is reserved in the INSERT statement but not the TAGS.
4. call `taos_stmt_set_tbname_tags()` to set the table name and TAGS values if the table name and TAGS are reserved in the INSERT statement (for example, if the INSERT statement takes an automatic table build).
5. call `taos_stmt_bind_param_batch()` to set the value of VALUES in multiple columns, or call `taos_stmt_bind_param()` to set the value of VALUES in a single row.
@@ -383,7 +381,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**return value**
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
- The returned TAOS_RES needs to be freed by the caller. Otherwise, a memory leak will occur.
+ The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
**Description**
The protocol type is enumerated and contains the following three formats.
@@ -416,13 +414,13 @@ The Subscription API currently supports subscribing to one or more tables and co
This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters.
- - taos: the database connection that has been established
- - restart: if the subscription already exists, whether to restart or continue the previous subscription
- - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription
- - sql: the query statement of the subscription, this statement can only be _select_ statement, only the original data should be queried, only the data can be queried in time order
- - fp: the callback function when the query result is received (the function prototype will be introduced later), only used when called asynchronously. This parameter should be passed `NULL` when called synchronously
- - param: additional parameter when calling the callback function, the system API will pass it to the callback function as it is, without any processing
- - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. not recommended to set this parameter too small To avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period.
+ - taos: the database connection that has been established.
+ - restart: if the subscription already exists, whether to restart or continue the previous subscription.
+ - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription.
+ - sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order.
+ - fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below.
+ - param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing.
+ - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period.
- ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs-en/14-reference/03-connector/csharp.mdx
index 2969392a0594ff0705e88bede5be90fb9dfd646d..5eb322cf9125fe036349de22ceea5988de46e404 100644
--- a/docs-en/14-reference/03-connector/csharp.mdx
+++ b/docs-en/14-reference/03-connector/csharp.mdx
@@ -179,9 +179,9 @@ namespace TDengineExample
1. "Unable to establish connection", "Unable to resolve FQDN"
- Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2.
+ Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot.
-Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found.
+2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found.
This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work.
diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx
index fd5930f07ff7184bd8dd5ff19cd3860f9718eaf9..c1e85ae4eb1d1d7ccfb70b2b4f38cebaf6cbf06c 100644
--- a/docs-en/14-reference/03-connector/go.mdx
+++ b/docs-en/14-reference/03-connector/go.mdx
@@ -15,9 +15,9 @@ import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.md
import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"
import GoQuery from "../../07-develop/04-query-data/_go.mdx"
-`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data.
+`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data.
-`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection.
+`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection.
This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`.
@@ -213,7 +213,7 @@ func main() {
Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`.
-You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error.
+You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error.
The complete example is as follows.
@@ -289,7 +289,7 @@ func main() {
6. `readBufferSize` parameter has no significant effect after being increased
- If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result.
+ Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance.
7. `disableCompression` parameter is set to `false` when the query efficiency is reduced
diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx
index 328907c4d781bdea8d30623e01d431cedbf8d0fa..33d715c2e218fd6db4f61882f2a7a92baa80f5a2 100644
--- a/docs-en/14-reference/03-connector/java.mdx
+++ b/docs-en/14-reference/03-connector/java.mdx
@@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections.
+'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features.
-
+
The preceding diagram shows two ways for a Java app to access TDengine via connector:
- JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2).
-- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result.
+- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result.
-Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection.
+The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection.
:::info
-TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using:
+TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind:
- TDengine does not currently support delete operations for individual data records.
- Transactional operations are not currently supported.
@@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
-You can build Java connector from source code after clone TDengine project:
+You can build Java connector from source code after cloning the TDengine project:
```shell
git clone https://github.com/taosdata/TDengine.git
@@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc
mvn clean install -Dmaven.test.skip=true
```
-After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository.
+After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository.
@@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`.
-There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1.
+There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required:
1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver".
2. jdbcUrl starting with "jdbc:TAOS-RS://".
@@ -206,10 +206,10 @@ The configuration parameters in the URL are as follows.
- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example.
```sql
-INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
+INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
-- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6);
+- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
@@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client
2. Properties connProps
3. the configuration file taos.cfg of the TDengine client driver when using a native connection
-For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection.
+For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection.
## Usage examples
@@ -323,7 +323,7 @@ while(resultSet.next()){
}
```
-> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them.
+> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Handling exceptions
@@ -565,7 +565,7 @@ public class ParameterBindingDemo {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
- pstmt.setTagNString(0, "Beijing-abc");
+ pstmt.setTagNString(0, "California-abc");
// set columns
ArrayList tsList = new ArrayList<>();
@@ -576,7 +576,7 @@ public class ParameterBindingDemo {
ArrayList f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
- f1List.add("Beijing-abc");
+ f1List.add("California-abc");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
@@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws
### Schemaless Writing
-Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details.
+Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details.
**Note**.
@@ -635,7 +635,7 @@ public class SchemalessInsertTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
- private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
+ private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
@@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi
#### Create subscriptions
```java
-TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
+TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false);
```
The three parameters of the `subscribe()` method have the following meanings.
-- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription
-- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order
+- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription.
+- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order.
- restart: if the subscription already exists, whether to restart or continue the previous subscription
-The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning.
+The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning.
#### Subscribe to consume data
diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs-en/14-reference/03-connector/node.mdx
index 3d30148e8ed9d8f98d135fa0fa72809f1115231a..8f586acde4848af71efcb23358be1f8486cedb8e 100644
--- a/docs-en/14-reference/03-connector/node.mdx
+++ b/docs-en/14-reference/03-connector/node.mdx
@@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
-import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx";
`td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data.
@@ -189,14 +188,8 @@ let cursor = conn.cursor();
### Query data
-#### Synchronous queries
-
-#### asynchronous query
-
-
-
## More Sample Programs
| Sample Programs | Sample Program Description |
@@ -232,7 +225,7 @@ See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the
2. "Unable to establish connection", "Unable to resolve FQDN"
- Usually, root cause is the FQDN is not configured correctly. You can refer to [How to understand TDengine's FQDN (In Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html).
+ Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot.
## Important Updates
diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx
index 2b238173e04e3e13de36b5ac4d91d0cda290ca72..69eec2388d460754493d2b775f14ab4bbf129799 100644
--- a/docs-en/14-reference/03-connector/python.mdx
+++ b/docs-en/14-reference/03-connector/python.mdx
@@ -11,18 +11,18 @@ import TabItem from "@theme/TabItem";
`taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
-The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
+The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported Platforms
-- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client.
+- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- REST connections are supported on all platforms that can run Python.
## Version selection
-We recommend using the latest version of `taospy`, regardless what the version of TDengine is.
+We recommend using the latest version of `taospy`, regardless of the version of TDengine.
## Supported features
@@ -53,7 +53,7 @@ Earlier TDengine client software includes the Python connector. If the Python co
:::
-#### to install `taospy`
+#### To install `taospy`
@@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the
-For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command.
+For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command.
```
curl -u root:taosdata http://:/rest/sql -d "select server_version()"
@@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
### Exception handling
-All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example:
+All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
```python
{{#include docs-examples/python/handle_exception.py}}
@@ -320,7 +320,7 @@ All database operations will be thrown directly if an exception occurs. The appl
### About nanoseconds
-Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.
+Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
2. https://www.python.org/dev/peps/pep-0564/
@@ -328,7 +328,7 @@ Due to the current imperfection of Python's nanosecond support (see link below),
## Frequently Asked Questions
-Welcome to [ask questions or report questions] (https://github.com/taosdata/taos-connector-python/issues).
+Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues).
## Important Update
diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx
index 2c8fe68c1ca8b091b8d685d8e20942a02ab2c5e8..cd54f35982ec13fc3c9160145fa002fb6f1d094b 100644
--- a/docs-en/14-reference/03-connector/rust.mdx
+++ b/docs-en/14-reference/03-connector/rust.mdx
@@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust.
Please refer to [version support list](/reference/connector#version-support).
-The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues.
+The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues.
## Installation
@@ -206,7 +206,7 @@ let conn: Taos = cfg.connect();
### Connection pooling
-In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2].
+In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2].
As follows, a connection pool with default parameters can be generated.
@@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai
Note that Rust asynchronous functions and an asynchronous runtime are required.
-[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks.
+[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks.
- `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
@@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par
### Bind Interface
-Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
+Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
```rust
let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ;
diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png
deleted file mode 100644
index 7541aaf98ad73cbddac44c34bd775b32ab3a735e..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ
diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp
new file mode 100644
index 0000000000000000000000000000000000000000..37cf6d90a528e320d5cb7d6da502d3a5b10aa4ee
Binary files /dev/null and b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp differ
diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md
index 85fd2923b02189d6f3cfd73efff784d12c3bb69a..3264124655e7040e1d94b43500a0b582d95cb5a1 100644
--- a/docs-en/14-reference/04-taosadapter.md
+++ b/docs-en/14-reference/04-taosadapter.md
@@ -24,21 +24,21 @@ taosAdapter provides the following features.
## taosAdapter architecture diagram
-
+
## taosAdapter Deployment Method
### Install taosAdapter
-taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TAOSData official website](https://taosdata.com/en/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation.
+taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation.
-### start/stop taosAdapter
+### Start/Stop taosAdapter
On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service.
### Remove taosAdapter
-Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter.
+Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter.
### Upgrade taosAdapter
@@ -153,8 +153,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
## Feature List
-- Compatible with RESTful interfaces
- [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful)
+- Compatible with RESTful interfaces [REST API](/reference/rest-api/)
- Compatible with InfluxDB v1 write interface
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- Compatible with OpenTSDB JSON and telnet format writes
@@ -187,7 +186,7 @@ You can use any client that supports the http protocol to write data to or query
### InfluxDB
-You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows:
+You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows:
```text
/influxdb/v1/write
@@ -204,7 +203,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho
### OpenTSDB
-You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine.
+You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine.
```text
/opentsdb/v1/put/json/:db
@@ -241,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
## Memory usage optimization methods
-taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory.
+taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
- pauseQueryMemoryThreshold
- pauseAllMemoryThreshold
@@ -277,7 +276,7 @@ Corresponding configuration parameter
monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70)
```
-You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface.
+You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface.
## taosAdapter Monitoring Metrics
@@ -326,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo
## How to migrate from older TDengine versions to taosAdapter
-In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details.
+In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details.
| **#** | **embedded httpd** | **taosAdapter** | **comment** |
| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ |
diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md
index 1e2b0b99f652bca0d775bebe28378600470f8661..b029f3d3eea0b010354dac1eb3ffecbc872e597f 100644
--- a/docs-en/14-reference/05-taosbenchmark.md
+++ b/docs-en/14-reference/05-taosbenchmark.md
@@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe
## Introduction
-taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users.
+taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users.
## Installation
@@ -21,7 +21,7 @@ There are two ways to install taosBenchmark:
### Configuration and running methods
-taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive, and with only one command-line parameter, users can use `-f ` to specify a configuration file when using a configuration file. When running taosBenchmark with command-line arguments and controlling its behavior, users should use other parameters for configuration rather than `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
+taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file.
@@ -35,7 +35,7 @@ Execute the following commands to quickly experience taosBenchmark's default con
taosBenchmark
```
-When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named test in TDengine, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a test database, this table is not used. Note that if there is already a test database, this command will delete it first and create a new test database.
+When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database.
### Run with command-line configuration parameters
@@ -45,7 +45,7 @@ The `-f ` argument cannot be used when running taosBenchmark with com
taosBenchmark -I stmt -n 200 -t 100
```
-The above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding.
+Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding.
### Run with the configuration file
@@ -95,10 +95,10 @@ taosBenchmark -f
## Command-line argument in detailed
- **-f/--file ** :
- specify the configuration file to use. This file includes All parameters. And users should not use this parameter with other parameters on the command-line. There is no default value.
+ specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
- **-c/--config-dir ** :
- specify the directory where the TDengine cluster configuration file. the default path is `/etc/taos`.
+ specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`.
- **-h/--host ** :
Specify the FQDN of the TDengine server to connect to. The default value is localhost.
@@ -272,13 +272,13 @@ The parameters for creating super tables are configured in `super_tables` in the
- **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value.
-- **escape_character**: specify the super table and child table names containing escape characters. By default is "no". The value can be "yes" or "no".
+- **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no".
- **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting.
-- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value when the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating.
+- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating.
-- **data_source**: specify the source of data-generating. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter.
+- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter.
- **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc.
@@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the
- **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0.
-- **disorder_ratio** : Specifies the percentage probability of disordered data in the value range [0,50]. The default is 0, which means there is no disorder data.
+- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data.
-- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The generated disorder timestamp is the timestamp that should be used in the non-disorder case minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
+- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
-- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database, the default value is 1.
+- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1.
- **start_timestamp** : The timestamp start value of each sub-table, the default value is now.
-- **sample_format**: The type of the sample data file, now only "csv" is supported.
+- **sample_format**: The type of the sample data file; for now only "csv" is supported.
- **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two.
@@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col
- **create_table_thread_count** : The number of threads to build the table, default is 8.
-- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same number of threads specified.
+- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified.
- **result_file** : The path to the result output file, the default value is . /output.txt.
diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md
index 973999704b595ea9b742f1ef759f973aa1f05649..5403e40925f633ce62795cc6037fc8c8f7aad07a 100644
--- a/docs-en/14-reference/06-taosdump.md
+++ b/docs-en/14-reference/06-taosdump.md
@@ -1,25 +1,25 @@
---
title: taosdump
-description: "taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster."
+description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster."
---
## Introduction
-taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster.
+taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster.
taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default.
-Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup.
-Please be careful if you see a prompt for this.
+If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup.
+
+Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security.
-taosdump is a logical backup tool and should not be used to back up any raw data, environment settings,
Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data.
## Installation
There are two ways to install taosdump:
-- Install the taosTools official installer. Please find taosTools from [All download links](https://www.taosdata.com/all-downloads) page and download and install it.
+- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it.
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
@@ -28,14 +28,14 @@ There are two ways to install taosdump:
### taosdump backup data
1. backing up all databases: specify `-A` or `-all-databases` parameter.
-2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3.
+2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
-5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters.
+5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters.
:::tip
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.
-- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ..." can be tried by challenging the `-B` parameter to a smaller value.
+- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value.
:::
@@ -44,7 +44,7 @@ There are two ways to install taosdump:
Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups.
:::tip
-taosdump internally uses TDengine stmt binding API for writing recovery data and currently uses 16384 as one write batch for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust to a smaller value by using the `-B` parameter.
+taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter.
:::
@@ -59,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
or: taosdump [OPTION...] -i inpath
or: taosdump [OPTION...] -o outpath
- -h, --host=HOST Server host dumping data from. Default is
+ -h, --host=HOST Server host from which to dump data. Default is
localhost.
-p, --password User password to connect to server. Default is
taosdata.
@@ -72,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-r, --resultFile=RESULTFILE DumpOut/In Result file path and name.
-a, --allow-sys Allow to dump system database
-A, --all-databases Dump all databases.
- -D, --databases=DATABASES Dump inputted databases. Use comma to separate
- databases' name.
+ -D, --databases=DATABASES Dump listed databases. Use comma to separate
+ database names.
-N, --without-property Dump database without its properties.
- -s, --schemaonly Only dump tables' schema.
+ -s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file
checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
@@ -98,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
and try. The workable value is related to the
length of the row and type of table schema.
-I, --inspect inspect avro file content and print on screen
- -L, --loose-mode Using loose mode if the table name and column name
+ -L, --loose-mode Use loose mode if the table name and column name
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png
deleted file mode 100644
index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png
deleted file mode 100644
index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png
deleted file mode 100644
index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png
deleted file mode 100644
index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp
new file mode 100644
index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png
deleted file mode 100644
index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp
new file mode 100644
index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png
deleted file mode 100644
index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp
new file mode 100644
index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png
deleted file mode 100644
index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp
new file mode 100644
index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png
deleted file mode 100644
index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png
deleted file mode 100644
index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png
deleted file mode 100644
index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png
deleted file mode 100644
index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp
new file mode 100644
index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png
deleted file mode 100644
index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp
new file mode 100644
index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png
deleted file mode 100644
index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp
new file mode 100644
index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png
deleted file mode 100644
index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp
new file mode 100644
index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png
deleted file mode 100644
index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp
new file mode 100644
index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png
deleted file mode 100644
index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png
deleted file mode 100644
index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp
new file mode 100644
index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png
deleted file mode 100644
index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png
deleted file mode 100644
index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png
deleted file mode 100644
index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp
new file mode 100644
index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png
deleted file mode 100644
index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png
deleted file mode 100644
index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png
deleted file mode 100644
index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png
deleted file mode 100644
index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp
new file mode 100644
index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png
deleted file mode 100644
index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png
deleted file mode 100644
index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ
diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b
Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ
diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md
index 4850cecb334ff24cc9fcf3b9a6e394827730111c..cebfafa225e6e8de75ff84bb51fa664784177910 100644
--- a/docs-en/14-reference/07-tdinsight/index.md
+++ b/docs-en/14-reference/07-tdinsight/index.md
@@ -5,11 +5,11 @@ sidebar_label: TDinsight
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
-After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, etc., and also vnode, dnode, and mnode status, and exception alerts. Developers monitoring TDengine cluster operation status in real-time can be very convinient. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel through `TDinsight.sh` installation script.
+After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
## System Requirements
-To deploy TDinsight, a single-node TDengine server or a multi-nodes TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`).
+To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`).
## Installing Grafana
@@ -17,7 +17,7 @@ We recommend using the latest [Grafana] version 7 or 8 here. You can install Gra
### Installing Grafana on Debian or Ubuntu
-For Debian or Ubuntu operating systems, we recommend the Grafana image repository and Use the following command to install from scratch.
+For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch.
```bash
sudo apt-get install -y apt-transport-https
@@ -61,7 +61,7 @@ sudo yum install \
## Automated deployment of TDinsight
-We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) script to allow users to configure the installation automatically and quickly.
+We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly.
You can download the script via `wget` or other tools:
@@ -71,7 +71,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
-This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters from the command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
+This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
@@ -233,33 +233,33 @@ The default username/password is `admin`. Grafana will require a password change
Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button.
-
+
Search for and select **TDengine**.
-
+
Configure the TDengine datasource.
-
+
Save and test. It will report 'TDengine Data source is working' under normal circumstances.
-
+
### Importing dashboards
Point to **+** / **Create** - **import** (or `/dashboard/import` url).
-
+
Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**.
-
+
Once the import is complete, the full page view of TDinsight is shown below.
-
+
## TDinsight dashboard details
@@ -269,7 +269,7 @@ Details of the metrics are as follows.
### Cluster Status
-
+
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
@@ -289,7 +289,7 @@ This section contains the current information and status of the cluster, the ale
### DNodes Status
-
+
- **DNodes Status**: simple table view of `show dnodes`.
- **DNodes Lifetime**: the time elapsed since the dnode was created.
@@ -298,14 +298,14 @@ This section contains the current information and status of the cluster, the ale
### MNode Overview
-
+
-1. **MNodes Status**: a simple table view of `show mnodes`. 2.
+1. **MNodes Status**: a simple table view of `show mnodes`.
2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes.
### Request
-
+
1. **Requests Rate(Inserts per Second)**: average number of inserts per second.
2. **Requests (Selects)**: number of query requests and change rate (count of second).
@@ -313,46 +313,46 @@ This section contains the current information and status of the cluster, the ale
### Database
-
+
Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database.
-1. **STables**: number of super tables. 2.
-2. **Total Tables**: number of all tables. 3.
-3. **Sub Tables**: the number of all super table sub-tables. 4.
+1. **STables**: number of super tables.
+2. **Total Tables**: number of all tables.
+3. **Sub Tables**: the number of all super table subtables.
4. **Tables**: graph of all normal table numbers over time.
5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups.
### DNode Resource Usage
-
+
Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes.
1. **Uptime**: the time elapsed since the dnode was created.
-2. **Has MNodes?**: whether the current dnode is a mnode. 3.
-3. **CPU Cores**: the number of CPU cores. 4.
-4. **VNodes Number**: the number of VNodes in the current dnode. 5.
-5. **VNodes Masters**: the number of vnodes in the master role. 6.
+2. **Has MNodes?**: whether the current dnode is a mnode.
+3. **CPU Cores**: the number of CPU cores.
+4. **VNodes Number**: the number of VNodes in the current dnode.
+5. **VNodes Masters**: the number of vnodes in the master role.
6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes.
7. **Current Memory Usage of taosd**: memory usage of taosd processes.
8. **Disk Used**: The total disk usage percentage of the taosd data directory.
-9. **CPU Usage**: Process and system CPU usage. 10.
+9. **CPU Usage**: Process and system CPU usage.
10. **RAM Usage**: Time series view of RAM usage metrics.
11. **Disk Used**: Disks used at each level of multi-level storage (default is level0).
12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute.
-13. **Disk IO**: Disk IO rate. 14.
+13. **Disk IO**: Disk IO rate.
14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network.
### Login History
-
+
Currently, only the number of logins per minute is reported.
### Monitoring taosAdapter
-
+
Support monitoring taosAdapter request statistics and status details. Includes.
@@ -376,7 +376,7 @@ TDinsight installed via the `TDinsight.sh` script can be cleaned up using the co
To completely uninstall TDinsight during a manual installation, you need to clean up the following.
1. the TDinsight Dashboard in Grafana.
-2. the Data Source in Grafana. 3.
+2. the Data Source in Grafana.
3. remove the `tdengine-datasource` plugin from the plugin installation directory.
## Integrated Docker Example
diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md
index fe5e5f2bc29509a4b96646253732076c7a6ee7ea..002b515093258152e85dd9d7437e424dfa98c874 100644
--- a/docs-en/14-reference/08-taos-shell.md
+++ b/docs-en/14-reference/08-taos-shell.md
@@ -1,14 +1,14 @@
---
-title: TDengine Command Line (CLI)
-sidebar_label: TDengine CLI
+title: TDengine Command Line Interface (CLI)
+sidebar_label: Command Line Interface
description: Instructions and tips for using the TDengine CLI
---
-The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the most simplest way for users to manipulate and interact with TDengine instances.
+The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
## Installation
-If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI on the environment which no TDengine server running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/).
+If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/).
## Execution
diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md
index 4ca84be369e14b3223e8609e06c9ebc4e35eaa2d..b7e60ab3e7f04a6078950977a563382a3524ebaa 100644
--- a/docs-en/14-reference/11-docker/index.md
+++ b/docs-en/14-reference/11-docker/index.md
@@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
```
-The above command starts a container named "tdengine" and maps the HTTP service end 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
+The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
```shell
curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
@@ -34,7 +34,7 @@ taos> show databases;
Query OK, 1 row(s) in set (0.002843s)
```
-The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from containerized using TDengine CLI or various connectors in some complex scenarios.
+The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
## Start TDengine on the host network
@@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es
docker run -d --name tdengine --network host tdengine/tdengine
```
-The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It works too, like using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command.
+The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command.
```shell
$ taos
@@ -315,13 +315,13 @@ password: taosdata
taoslog-td2:
```
- :::note
+:::note
- The `VERSION` environment variable is used to set the tdengine image tag
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3]
- We recommend setting with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment.
- :::
-
+ We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment.
+
+ :::
2. Start the cluster
@@ -382,7 +382,7 @@ password: taosdata
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
```docker
- ersion: "3"
+ version: "3"
networks:
inter:
diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md
index c4e7cc523c400ea5be6610b64f1561246b1bfa24..8ad9a474a02c5cc52559ccdc5910ad9d7b6264ae 100644
--- a/docs-en/14-reference/12-config/index.md
+++ b/docs-en/14-reference/12-config/index.md
@@ -65,7 +65,7 @@ taos --dump-config
| ------------- | ------------------------------------------------------------------------ |
| Applicable | Server Only |
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
-| Default Value | The first hostname configured for the hos |
+| Default Value | The first hostname configured for the host |
| Note | It should be within 96 bytes |
### serverPort
@@ -78,7 +78,7 @@ taos --dump-config
| Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 |
:::note
-TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by `serverPort`. These ports need to be kept as open if firewall is enabled. Below table describes the ports used by TDengine in details.
+TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details.
:::
@@ -182,8 +182,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by
| ------------- | -------------------------------------------- |
| Applicable | Server Only |
| Meaning | The maximum number of distinct rows returned |
-| Value Range | [100,000 - 100, 000, 000] |
-| Default Value | 100, 000 |
+| Value Range | [100,000 - 100,000,000] |
+| Default Value | 100,000 |
| Note | After version 2.3.0.0 |
## Locale Parameters
@@ -197,19 +197,19 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by
| Default Value | TimeZone configured in the host |
:::info
-To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
+To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
```
-timezone UTC-8
+timezone UTC-7
timezone GMT-8
timezone Asia/Shanghai
```
The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`.
-The setting for timezone impacts the strings not in Unix timestamp, keywords or functions related to date/time, for example
+The setting for timezone impacts strings that are not in Unix timestamp format and keywords or functions related to date/time. For example:
```sql
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
@@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
-To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statement, for example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format, they are not influenced by timezone setting when converted to Unix timestamp.
+To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statements. For example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format are not influenced by timezone setting when converted to Unix timestamp.
:::
@@ -240,11 +240,11 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
| Default Value | Locale configured in host |
:::info
-A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
+A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
-The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux andMac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
+The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
:::
@@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha
locale zh_CN.UTF-8
```
-Besides, on Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the one who comes later in the configuration file is used.
+On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
```title="Effective charset is GBK"
locale zh_CN.UTF-8
@@ -778,8 +778,8 @@ To prevent system resource from being exhausted by multiple concurrent streams,
## HTTP Parameters
:::note
-HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0.
-The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter]](/reference/taosadapter/).
+HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0.
+The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/).
:::
diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md
index dbdba2b715bb41baf9b70dce91a3065e585d0434..304e3bcb434ee9a6ba338577a4d1ba546b548e3f 100644
--- a/docs-en/14-reference/12-directory.md
+++ b/docs-en/14-reference/12-directory.md
@@ -32,7 +32,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
:::note
-taosdump after version 2.4.0.0 require taosTools as a standalone installation. A few version taosBenchmark is include in taosTools too.
+taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too.
:::
:::tip
diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md
index d9ce9b434dd14a89d243b2ed629f3fde64e6aba0..acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd 100644
--- a/docs-en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs-en/14-reference/13-schemaless/13-schemaless.md
@@ -1,19 +1,19 @@
---
title: Schemaless Writing
-description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface."
+description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
---
-In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrade of the application logic, or the hardware adjustment of the device itself, the data collection items may change more frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0, it provides a series of interfaces to the schemaless writing method, which eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as the data is written to the interface. And when necessary, Schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
+In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
-The schemaless writing method creates super tables and their corresponding sub-tables completely indistinguishable from the super tables and sub-tables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability.
+The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
## Schemaless Writing Line Protocol
-TDengine's schemaless writing line protocol supports to be compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content.
+TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content.
For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly.
-With the following formatting conventions, Schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing).
+With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing).
```json
measurement,tag_set field_set timestamp
@@ -23,7 +23,7 @@ where :
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space.
-- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by space.
+- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space.
- The timestamp is the primary key corresponding to the data in this row.
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@@ -32,7 +32,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
-- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\) in front. (All refer to the ASCII character)
+- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
- Numeric types will be distinguished from data types by the suffix.
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
@@ -58,26 +58,25 @@ Note that if the wrong case is used when describing the data type suffix, or if
Schemaless writes process row data according to the following principles.
-1. You can use the following rules to generate the sub-table names: first, combine the measurement name and the key and value of the label into the next string:
+1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
-The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2.
+The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
-If the sub-table obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the sub-table name determined in steps 1 or 2. 4.
+If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
-7. If the specified data sub-table already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value.
+7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value.
8. Errors encountered throughout the processing will interrupt the writing process and return an error code.
:::tip
-All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
-16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition
@@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow
| **Serial** | **Value** | **Description** |
| -------- | ------------------- | ------------------------------- |
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol |
| 3 | SML_JSON_PROTOCOL | JSON protocol format |
In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table.
@@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ
## Data schema mapping rules
-This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped to
-The tag name in tag_set is the name of the tag in the data schema, and the name in field_set is the column's name. The following data is used as an example to illustrate the mapping rules.
+This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows:
+- The tag name in tag_set is the name of the tag in the data schema
+- The name in field_set is the column's name.
+
+The following data is used as an example to illustrate the mapping rules.
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
```
-The first line of the line protocol parsing will declare column c5 is a BINARY(4) field, the second line data write will extract column c5 is still a BINARY column. Still, its width is 6, then you need to increase the width of the BINARY field to be able to accommodate the new string.
+The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string.
```json
st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
diff --git a/docs-en/14-reference/_collectd.mdx b/docs-en/14-reference/_collectd.mdx
index 1f57d883eec9feadc3cc460bf968b0dd43fedfe8..ce88328098a181de48dcaa080ef45f228b20bf1c 100644
--- a/docs-en/14-reference/_collectd.mdx
+++ b/docs-en/14-reference/_collectd.mdx
@@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod
#collectd
collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins.
-#### is configured to receive data from the direct collection plugin
+#### Configure the direct collection plugin
Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
@@ -62,7 +62,7 @@ LoadPlugin write_tsdb
```
-Where fills in the server's domain name or IP address running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
+Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
```text
LoadPlugin write_tsdb
diff --git a/docs-en/14-reference/_tcollector.mdx b/docs-en/14-reference/_tcollector.mdx
index 85794d54007b70acf205b1bbc897cec1d0c4f824..42b021410e3862c4fa328d8dae40dcac1456e929 100644
--- a/docs-en/14-reference/_tcollector.mdx
+++ b/docs-en/14-reference/_tcollector.mdx
@@ -17,7 +17,7 @@ password = "taosdata"
...
```
-The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. user and password fill in the actual TDengine configuration values. After changing the configuration file, you need to restart the taosAdapter.
+The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. Fill in the actual user and password for TDengine. After changing the configuration file, you need to restart the taosAdapter.
- You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables.
@@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c
To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920).
-Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port that taosAdapter supports TCollector on (default is 6049).
+Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port on which taosAdapter supports TCollector (default is 6049).
Example of git diff output of source code changes.
diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md
index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644
--- a/docs-en/14-reference/index.md
+++ b/docs-en/14-reference/index.md
@@ -2,11 +2,11 @@
title: Reference
---
-The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it.
+The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
-```
\ No newline at end of file
+```
diff --git a/docs-en/14-reference/taosAdapter-architecture.png b/docs-en/14-reference/taosAdapter-architecture.png
deleted file mode 100644
index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000
Binary files a/docs-en/14-reference/taosAdapter-architecture.png and /dev/null differ
diff --git a/docs-en/14-reference/taosAdapter-architecture.webp b/docs-en/14-reference/taosAdapter-architecture.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570
Binary files /dev/null and b/docs-en/14-reference/taosAdapter-architecture.webp differ
diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx
index c1bfd4a96a4576df8570d8b480d5c2afe47e20b8..1a84e02c665d2e49deca35a20b137b205736def5 100644
--- a/docs-en/20-third-party/01-grafana.mdx
+++ b/docs-en/20-third-party/01-grafana.mdx
@@ -3,13 +3,14 @@ sidebar_label: Grafana
title: Grafana
---
-TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a DashBoard.
+TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard.
You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
## Prerequisites
In order for Grafana to add the TDengine data source successfully, the following preparations are required:
+
1. The TDengine cluster is deployed and functioning properly
2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
@@ -19,21 +20,22 @@ TDengine currently supports Grafana versions 7.0 and above. Users can go to the
## Configuring Grafana
-You can download The Grafana plugin for TDengine from . The current latest version is 3.1.4.
-
-Recommend using the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
+Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
-``bash
-sudo -u grafana grafana-cli \
- --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip \
- plugins install tdengine-datasource
+```bash
+grafana-cli plugins install tdengine-datasource
+# with sudo
+sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-Or download it locally and extract it to the Grafana plugin directory.
+Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory.
```bash
-GF_VERSION=3.1.4
+GF_VERSION=3.2.2
+# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+# from Grafana
+wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download
```
Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana.
@@ -42,18 +44,10 @@ Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plug
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
-Grafana versions 7.3+ / 8.x do signature checks on plugins, so you also need to add the following line to the grafana.ini file to use the plugin correctly.
-
-```ini
-[plugins]
-allow_loading_unsigned_plugins = tdengine-datasource
-```
-
-The TDengine plugin can be automatically installed and set up using the following environment variable settings in a Docker environment.
+If Grafana is running in a Docker environment, the TDengine plugin can be automatically installed and set up using the following environment variable settings:
```bash
-GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine- datasource
-GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
+GF_INSTALL_PLUGINS=tdengine-datasource
```
## Using Grafana
@@ -62,39 +56,39 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
Users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure.
-
+
Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure.
-
+
Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration.
-
+
- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
- User: TDengine user name.
- Password: TDengine user password.
-Click `Save & Test` to test. Follows are a success.
+Click `Save & Test` to test. You should see a success message if the test worked.
-
+
### Create Dashboard
-Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page:
+Go back to the main interface to create a dashboard and click Add Query to enter the panel query page:
-
+
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
-- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported.
+- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported.
- ALIAS BY: This allows you to set the current query alias.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
-
+
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
diff --git a/docs-en/20-third-party/03-telegraf.md b/docs-en/20-third-party/03-telegraf.md
index 0d563c9ff36268ac27e18e21fefed789789dc1a7..6a7aac322f9def880f58d7ed0adcc4a8f3687ed1 100644
--- a/docs-en/20-third-party/03-telegraf.md
+++ b/docs-en/20-third-party/03-telegraf.md
@@ -5,7 +5,7 @@ title: Telegraf writing
import Telegraf from "../14-reference/_telegraf.mdx"
-Telegraf is a viral metrics collection open-source software. Telegraf can collect the operation information of various components without writing any scripts to collect regularly, reducing the difficulty of data acquisition.
+Telegraf is a viral, open-source, metrics collection software. Telegraf can collect the operation information of various components without having to write any scripts to collect regularly, reducing the difficulty of data acquisition.
Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data.
diff --git a/docs-en/20-third-party/05-collectd.md b/docs-en/20-third-party/05-collectd.md
index 609e55842ab35cdc2d394663f5450f908e49f7f7..db62f2ecd1afb4936466ca0243a7e14ff294f8b6 100644
--- a/docs-en/20-third-party/05-collectd.md
+++ b/docs-en/20-third-party/05-collectd.md
@@ -6,7 +6,7 @@ title: collectd writing
import CollectD from "../14-reference/_collectd.mdx"
-collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load.
+collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load.
You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data.
diff --git a/docs-en/20-third-party/06-statsd.md b/docs-en/20-third-party/06-statsd.md
index bf4b6c7ab5dac4114cad0d650b2aeb026a67581c..40e927b9fd1d2eca9d454a987ac51d533eb75005 100644
--- a/docs-en/20-third-party/06-statsd.md
+++ b/docs-en/20-third-party/06-statsd.md
@@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx"
StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics.
-You can write StatsD data to TDengine by simply modifying in the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data.
+You can write StatsD data to TDengine by simply modifying the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data.
## Prerequisites
diff --git a/docs-en/20-third-party/07-icinga2.md b/docs-en/20-third-party/07-icinga2.md
index ba9cde8cea7504ac9df871d5f6aa42cc5c94d895..b27196dfe313b468eeb73ff4b114d9d955618c3e 100644
--- a/docs-en/20-third-party/07-icinga2.md
+++ b/docs-en/20-third-party/07-icinga2.md
@@ -5,7 +5,7 @@ title: icinga2 writing
import Icinga2 from "../14-reference/_icinga2.mdx"
-icinga2 is an open-source software monitoring host and network initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license.
+icinga2 is an open-source, host and network monitoring software initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license.
You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data.
diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md
index 13562ba7f720499c23771437c5c6ba0f61819456..d3eafebc14e8ddc29b03abf8785a6c0a013ef014 100644
--- a/docs-en/20-third-party/09-emq-broker.md
+++ b/docs-en/20-third-party/09-emq-broker.md
@@ -3,7 +3,7 @@ sidebar_label: EMQX Broker
title: EMQX Broker writing
---
-MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine).
+MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.).
## Prerequisites
@@ -44,25 +44,25 @@ Since the configuration interface of EMQX differs from version to version, here
Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`.
-
+
### Creating Rule
Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
-
+
### Edit SQL fields
-
+
### Add "action handler"
-
+
### Add "Resource"
-
+
Select "Data to Web Service" and click the "New Resource" button.
@@ -70,13 +70,13 @@ Select "Data to Web Service" and click the "New Resource" button.
Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values.
-
+
### Edit "action"
Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body.
-
+
## Compose program to mock data
@@ -163,7 +163,7 @@ Edit the resource configuration to add the key/value pairing for Authorization.
Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients.
-
+
## Execute tests to simulate sending MQTT data
@@ -172,19 +172,19 @@ npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org
node mock.js
```
-
+
## Verify that EMQX is receiving data
Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly:
-
+
## Verify that data writing to TDengine
Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly:
-
+
Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md
index b9c7a3814a75a066b498438b6e632690697ae7ca..6720af8bf81ea2f4fce415a54847453f578ababf 100644
--- a/docs-en/20-third-party/11-kafka.md
+++ b/docs-en/20-third-party/11-kafka.md
@@ -7,17 +7,17 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe
## What is Kafka Connect?
-Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
+Kafka Connect is a component of [Apache Kafka](https://kafka.apache.org/) that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
-
+
TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine.
-
+
## What is Confluent?
-Confluent adds many extensions to Kafka. include:
+[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
1. Schema Registry
2. REST Proxy
@@ -26,7 +26,7 @@ Confluent adds many extensions to Kafka. include:
5. GUI for managing and monitoring Kafka - Confluent Control Center
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
-
+
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
@@ -79,10 +79,10 @@ Development: false
git clone https://github.com:taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
mvn clean package
-unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip
+unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to the path where the plugin is installed. The path to install the plugin is in the configuration file `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties`. The default path is `$CONFLUENT_HOME/share/confluent-hub-components/`.
+The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
### Install with confluent-hub
@@ -96,7 +96,7 @@ confluent local services start
```
:::note
-Be sure to install the plugin before starting Confluent. Otherwise, there will be a class not found error. The log of Kafka Connect (default path: /tmp/confluent.xxxx/connect/logs/connect.log) will output the successfully installed plugin, which users can use to determine whether the plugin is installed successfully.
+Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
:::
:::tip
@@ -123,6 +123,59 @@ Control Center is [UP]
To clear data, execute `rm -rf /tmp/confluent.106668`.
:::
+### Check Confluent Services Status
+
+Use command bellow to check the status of all service:
+
+```
+confluent local services status
+```
+
+The expected output is:
+```
+Connect is [UP]
+Control Center is [UP]
+Kafka is [UP]
+Kafka REST is [UP]
+ksqlDB Server is [UP]
+Schema Registry is [UP]
+ZooKeeper is [UP]
+```
+
+### Check Successfully Loaded Plugin
+
+After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
+```
+confluent local services connect plugin list
+```
+
+The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
+
+```
+Available Connect Plugins:
+[
+ {
+ "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "type": "sink",
+ "version": "1.0.0"
+ },
+ {
+ "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "type": "source",
+ "version": "1.0.0"
+ },
+......
+```
+
+If not, please check the log file of Kafka Connect. To view the log file path, please execute:
+
+```
+echo `cat /tmp/confluent.current`/connect/connect.stdout
+```
+It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
+
+Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
+
## The use of TDengine Sink Connector
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
@@ -142,7 +195,7 @@ vi sink-demo.properties
sink-demo.properties' content is following:
```ini title="sink-demo.properties"
-name=tdengine-sink-demo
+name=TDengineSinkConnector
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
tasks.max=1
topics=meters
@@ -151,6 +204,7 @@ connection.user=root
connection.password=taosdata
connection.database=power
db.schemaless=line
+data.precision=ns
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
@@ -177,6 +231,7 @@ If the above command is executed successfully, the output is as follows:
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
"connection.user": "root",
"connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "data.precision": "ns",
"db.schemaless": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"tasks.max": "1",
@@ -194,10 +249,10 @@ If the above command is executed successfully, the output is as follows:
Prepare text file as test data, its content is following:
```txt title="test-data.txt"
-meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
-meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
-meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
-meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
+meters,location=California.LoSangeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
+meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
+meters,location=California.LoSangeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
+meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
```
Use kafka-console-producer to write test data to the topic `meters`.
@@ -221,14 +276,14 @@ Database changed.
taos> select * from meters;
ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
- 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian |
- 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian |
- 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian |
- 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian |
+ 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
+ 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
+ 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
+ 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
Query OK, 4 row(s) in set (0.004208s)
```
-If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#Configuration Reference).
+If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference).
## The use of TDengine Source Connector
@@ -273,7 +328,7 @@ DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
-INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
+INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
```
Use TDengine CLI to execute SQL script
@@ -300,8 +355,8 @@ output:
````
......
-meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
-meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
+meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
+meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
......
````
@@ -356,6 +411,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine
4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1.
5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000.
6. `db.schemaless`: Data format, could be one of `line`, `json`, and `telnet`. Represent InfluxDB line protocol format, OpenTSDB JSON format, and OpenTSDB Telnet line protocol format.
+7. `data.precision`: The time precision when use InfluxDB line protocol format data, could be one of `ms`, `us` and `ns`. The default is `ns`.
### TDengine Source Connector specific configuration
@@ -366,7 +422,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
-## feedback
+
+## Other notes
+
+1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
+2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
+
+## Feedback
https://github.com/taosdata/kafka-connect-tdengine/issues
diff --git a/docs-en/20-third-party/emqx/add-action-handler.png b/docs-en/20-third-party/emqx/add-action-handler.png
deleted file mode 100644
index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/add-action-handler.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/add-action-handler.webp b/docs-en/20-third-party/emqx/add-action-handler.webp
new file mode 100644
index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a
Binary files /dev/null and b/docs-en/20-third-party/emqx/add-action-handler.webp differ
diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.png b/docs-en/20-third-party/emqx/check-result-in-taos.png
deleted file mode 100644
index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/check-result-in-taos.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.webp b/docs-en/20-third-party/emqx/check-result-in-taos.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d
Binary files /dev/null and b/docs-en/20-third-party/emqx/check-result-in-taos.webp differ
diff --git a/docs-en/20-third-party/emqx/check-rule-matched.png b/docs-en/20-third-party/emqx/check-rule-matched.png
deleted file mode 100644
index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/check-rule-matched.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/check-rule-matched.webp b/docs-en/20-third-party/emqx/check-rule-matched.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513
Binary files /dev/null and b/docs-en/20-third-party/emqx/check-rule-matched.webp differ
diff --git a/docs-en/20-third-party/emqx/client-num.png b/docs-en/20-third-party/emqx/client-num.png
deleted file mode 100644
index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/client-num.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/client-num.webp b/docs-en/20-third-party/emqx/client-num.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda
Binary files /dev/null and b/docs-en/20-third-party/emqx/client-num.webp differ
diff --git a/docs-en/20-third-party/emqx/create-resource.png b/docs-en/20-third-party/emqx/create-resource.png
deleted file mode 100644
index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/create-resource.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/create-resource.webp b/docs-en/20-third-party/emqx/create-resource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb
Binary files /dev/null and b/docs-en/20-third-party/emqx/create-resource.webp differ
diff --git a/docs-en/20-third-party/emqx/create-rule.png b/docs-en/20-third-party/emqx/create-rule.png
deleted file mode 100644
index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/create-rule.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/create-rule.webp b/docs-en/20-third-party/emqx/create-rule.webp
new file mode 100644
index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203
Binary files /dev/null and b/docs-en/20-third-party/emqx/create-rule.webp differ
diff --git a/docs-en/20-third-party/emqx/edit-action.png b/docs-en/20-third-party/emqx/edit-action.png
deleted file mode 100644
index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/edit-action.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/edit-action.webp b/docs-en/20-third-party/emqx/edit-action.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0
Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-action.webp differ
diff --git a/docs-en/20-third-party/emqx/edit-resource.png b/docs-en/20-third-party/emqx/edit-resource.png
deleted file mode 100644
index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/edit-resource.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/edit-resource.webp b/docs-en/20-third-party/emqx/edit-resource.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98
Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-resource.webp differ
diff --git a/docs-en/20-third-party/emqx/login-dashboard.png b/docs-en/20-third-party/emqx/login-dashboard.png
deleted file mode 100644
index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/login-dashboard.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/login-dashboard.webp b/docs-en/20-third-party/emqx/login-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30
Binary files /dev/null and b/docs-en/20-third-party/emqx/login-dashboard.webp differ
diff --git a/docs-en/20-third-party/emqx/rule-engine.png b/docs-en/20-third-party/emqx/rule-engine.png
deleted file mode 100644
index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/rule-engine.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/rule-engine.webp b/docs-en/20-third-party/emqx/rule-engine.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0
Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-engine.webp differ
diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.png b/docs-en/20-third-party/emqx/rule-header-key-value.png
deleted file mode 100644
index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/rule-header-key-value.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.webp b/docs-en/20-third-party/emqx/rule-header-key-value.webp
new file mode 100644
index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d
Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-header-key-value.webp differ
diff --git a/docs-en/20-third-party/emqx/run-mock.png b/docs-en/20-third-party/emqx/run-mock.png
deleted file mode 100644
index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/emqx/run-mock.png and /dev/null differ
diff --git a/docs-en/20-third-party/emqx/run-mock.webp b/docs-en/20-third-party/emqx/run-mock.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037
Binary files /dev/null and b/docs-en/20-third-party/emqx/run-mock.webp differ
diff --git a/docs-en/20-third-party/grafana/add_datasource1.jpg b/docs-en/20-third-party/grafana/add_datasource1.jpg
deleted file mode 100644
index 1f0f5110f312c57f3ec1788bbc02f04fac6ac142..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/add_datasource1.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/add_datasource1.webp b/docs-en/20-third-party/grafana/add_datasource1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6
Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource1.webp differ
diff --git a/docs-en/20-third-party/grafana/add_datasource2.jpg b/docs-en/20-third-party/grafana/add_datasource2.jpg
deleted file mode 100644
index fa7a83e00e96fae649910dff4edf5f5bdadd7850..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/add_datasource2.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/add_datasource2.webp b/docs-en/20-third-party/grafana/add_datasource2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1
Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource2.webp differ
diff --git a/docs-en/20-third-party/grafana/add_datasource3.jpg b/docs-en/20-third-party/grafana/add_datasource3.jpg
deleted file mode 100644
index fc850ad08ff1174de972906842e0d5ee64e6e5cb..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/add_datasource3.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/add_datasource3.webp b/docs-en/20-third-party/grafana/add_datasource3.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f
Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource3.webp differ
diff --git a/docs-en/20-third-party/grafana/add_datasource4.jpg b/docs-en/20-third-party/grafana/add_datasource4.jpg
deleted file mode 100644
index 3ba73e50d455111f8621f4165746078554c2d790..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/add_datasource4.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/add_datasource4.webp b/docs-en/20-third-party/grafana/add_datasource4.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e
Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource4.webp differ
diff --git a/docs-en/20-third-party/grafana/create_dashboard1.jpg b/docs-en/20-third-party/grafana/create_dashboard1.jpg
deleted file mode 100644
index 3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/create_dashboard1.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/create_dashboard1.webp b/docs-en/20-third-party/grafana/create_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d
Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard1.webp differ
diff --git a/docs-en/20-third-party/grafana/create_dashboard2.jpg b/docs-en/20-third-party/grafana/create_dashboard2.jpg
deleted file mode 100644
index fe5d768ac55254251e0290bf257178f5ff28f5a5..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/grafana/create_dashboard2.jpg and /dev/null differ
diff --git a/docs-en/20-third-party/grafana/create_dashboard2.webp b/docs-en/20-third-party/grafana/create_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b
Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard2.webp differ
diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.png b/docs-en/20-third-party/kafka/Kafka_Connect.png
deleted file mode 100644
index f3dc02ea2a743c6e1ae5531e14f820e3adeca29a..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/kafka/Kafka_Connect.png and /dev/null differ
diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.webp b/docs-en/20-third-party/kafka/Kafka_Connect.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171
Binary files /dev/null and b/docs-en/20-third-party/kafka/Kafka_Connect.webp differ
diff --git a/docs-en/20-third-party/kafka/confluentPlatform.png b/docs-en/20-third-party/kafka/confluentPlatform.png
deleted file mode 100644
index f8e69f2c7f64d809996b2d1bf1370b67b8030850..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/kafka/confluentPlatform.png and /dev/null differ
diff --git a/docs-en/20-third-party/kafka/confluentPlatform.webp b/docs-en/20-third-party/kafka/confluentPlatform.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847
Binary files /dev/null and b/docs-en/20-third-party/kafka/confluentPlatform.webp differ
diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png
deleted file mode 100644
index 26d8a866d706180c900d69bb6f57ca2dff0047dd..0000000000000000000000000000000000000000
Binary files a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png and /dev/null differ
diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp
new file mode 100644
index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c
Binary files /dev/null and b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ
diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md
index 9607c9b38709f6a320f82a8ee250afb407492627..4d8bed4d2d6b3a0404e10213aeab599767325cc2 100644
--- a/docs-en/21-tdinternal/01-arch.md
+++ b/docs-en/21-tdinternal/01-arch.md
@@ -5,38 +5,38 @@ title: Architecture
## Cluster and Primary Logic Unit
-The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resources significantly.
+The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly.
### Primary Logic Unit
-Logical structure diagram of TDengine distributed architecture as following:
+Logical structure diagram of TDengine's distributed architecture is as follows:
-
+
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name).
-**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
+**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
-**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
+**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
-**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
+**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
-**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
+**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
-**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
+**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
### Node Communication
-**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
+**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly.
**Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
-**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
+**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
@@ -44,31 +44,33 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
-**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
+**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
-**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
+- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
-**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
+**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
### A Typical Data Writing Process
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
-
+
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
-2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
+2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
4. TAOSC initiates an insert request to master vnode.
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
6. TAOSC notifies APP that writing is successful.
-For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode.
+For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
-For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node.
+For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node.
-The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
+The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache.
@@ -76,24 +78,24 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is
### Storage Model
-The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts:
+The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts:
-- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance.
-- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds.
-- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
+- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point.
+- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds.
+- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck.
-Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
+Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages:
-- Reduce the redundancy of tag data storage significantly: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate.
-- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
+- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation.
+- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
### Data Sharding
-For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range.
+For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range.
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
-For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
+For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
@@ -101,43 +103,43 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno
### Data Partitioning
-In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost.
+In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs.
In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability.
### Load Balancing
-Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected.
+Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected.
-If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `“offlineThreshold”`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number.
+If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number.
-When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process.
+When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process.
The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
## Data Writing and Replication Process
-If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
+If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process
Master Vnode uses a writing process as follows:
-
+
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
-5. Master vnode returns a confirmation message to the application, indicating a successful writing.
+5. Master vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process
For a slave vnode, the write process as follows:
-
+
Figure 4: TDengine Slave Writing Process
1. Slave vnode receives a data insertion request forwarded by Master vnode;
@@ -146,19 +148,19 @@ For a slave vnode, the write process as follows:
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
-### Remote Disaster Recovery and IDC Migration
+### Remote Disaster Recovery and IDC (Internet Data Center) Migration
-As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
+As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
-On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
+On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
-However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows:
+However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
-1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down;
+1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record.
-In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
+In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
@@ -171,43 +173,43 @@ When a vnode starts, the roles (master, slave) are uncertain, and the data is in
1. If there’s only one replica, it’s always master
2. When all replicas are online, the one with latest version is master
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
-4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master
+4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master.
### Synchronous Replication
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
-With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
+With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
## Caching and Persistence
### Caching
-TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
+TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
-TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system.
+TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems.
Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”.
### Persistent Storage
-TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth.
+TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth.
-To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
+To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations.
For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
-In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
+In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
-Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
+Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file.
-When data is written to disk, it is decided whether to compress the data according to system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
+When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
### Tiered Storage
-By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
+By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
```
@@ -216,7 +218,7 @@ dataDir data_path [tier_level]
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
-Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
+Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos
@@ -233,11 +235,11 @@ Note: Tiered Storage is only supported in Enterprise Edition
## Data Query
-TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode.
+TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode.
### Single Table Query
-The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode).
+The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode).
According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time.
@@ -245,9 +247,9 @@ When client obtains query result, the worker thread in query execution queue of
### Aggregation by Time Axis, Downsampling, Interpolation
-The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine.
+Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines.
-The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example:
+The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example:
```mysql
select count(*) from d1001 interval(1h);
@@ -265,21 +267,21 @@ For the data collected by device D1001, the number of records per hour is counte
### Multi-table Aggregation Query
-TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
+TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:
-
+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
2. TAOSC sends the STable name to Meta Node(management node);
3. Management node sends the vnode list owned by the STable back to TAOSC;
4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
-5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
+5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
### Precomputation
-In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL.
+In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
diff --git a/docs-en/21-tdinternal/30-iot-big-data.md b/docs-en/21-tdinternal/30-iot-big-data.md
deleted file mode 100644
index 4bdf5cfba98234c9d843634b5210ca3dae94d870..0000000000000000000000000000000000000000
--- a/docs-en/21-tdinternal/30-iot-big-data.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: IoT Big Data
-description: "Characteristics of IoT Big Data, why general big data platform does not work well for IoT? The required features for an IoT Big Data Platform"
----
-
-- [Characteristics of IoT Big Data](https://tdengine.com/2019/07/09/86.html)
-- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://tdengine.com/2019/07/09/92.html)
-- [Why TDengine is the Best Choice for IoT Big Data Processing?](https://tdengine.com/2019/07/09/94.html)
-- [Why Redis, Kafka, Spark aren’t Needed if TDengine is Used in the IoT Platform?](https://tdengine.com/2019/07/09/96.html)
-
diff --git a/docs-en/21-tdinternal/dnode.png b/docs-en/21-tdinternal/dnode.png
deleted file mode 100644
index cea87dcccba5d2761996e5dde998022d86487eb9..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/dnode.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/dnode.webp b/docs-en/21-tdinternal/dnode.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8
Binary files /dev/null and b/docs-en/21-tdinternal/dnode.webp differ
diff --git a/docs-en/21-tdinternal/message.png b/docs-en/21-tdinternal/message.png
deleted file mode 100644
index 715a8bd37ee9fe7e96eacce4e7ff563fedeefbee..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/message.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/message.webp b/docs-en/21-tdinternal/message.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5
Binary files /dev/null and b/docs-en/21-tdinternal/message.webp differ
diff --git a/docs-en/21-tdinternal/modules.png b/docs-en/21-tdinternal/modules.png
deleted file mode 100644
index 10ae4703a6cbbf66afea325ce4c0f919f7769a07..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/modules.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/modules.webp b/docs-en/21-tdinternal/modules.webp
new file mode 100644
index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523
Binary files /dev/null and b/docs-en/21-tdinternal/modules.webp differ
diff --git a/docs-en/21-tdinternal/multi_tables.png b/docs-en/21-tdinternal/multi_tables.png
deleted file mode 100644
index 0cefaab6a9a4cdd671c671f7c6186dea41415ff0..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/multi_tables.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/multi_tables.webp b/docs-en/21-tdinternal/multi_tables.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2
Binary files /dev/null and b/docs-en/21-tdinternal/multi_tables.webp differ
diff --git a/docs-en/21-tdinternal/replica-forward.png b/docs-en/21-tdinternal/replica-forward.png
deleted file mode 100644
index bf616e030b130603eceb5dccfd30b4a1dfa68ea5..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/replica-forward.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/replica-forward.webp b/docs-en/21-tdinternal/replica-forward.webp
new file mode 100644
index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e
Binary files /dev/null and b/docs-en/21-tdinternal/replica-forward.webp differ
diff --git a/docs-en/21-tdinternal/replica-master.png b/docs-en/21-tdinternal/replica-master.png
deleted file mode 100644
index cb33f1ce98661563693215d8fc73b003235c7668..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/replica-master.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/replica-master.webp b/docs-en/21-tdinternal/replica-master.webp
new file mode 100644
index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee
Binary files /dev/null and b/docs-en/21-tdinternal/replica-master.webp differ
diff --git a/docs-en/21-tdinternal/replica-restore.png b/docs-en/21-tdinternal/replica-restore.png
deleted file mode 100644
index 1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/replica-restore.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/replica-restore.webp b/docs-en/21-tdinternal/replica-restore.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a
Binary files /dev/null and b/docs-en/21-tdinternal/replica-restore.webp differ
diff --git a/docs-en/21-tdinternal/structure.png b/docs-en/21-tdinternal/structure.png
deleted file mode 100644
index 4fc8f47ab0a30d95b85ba1d85105726ed981e56e..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/structure.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/structure.webp b/docs-en/21-tdinternal/structure.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3
Binary files /dev/null and b/docs-en/21-tdinternal/structure.webp differ
diff --git a/docs-en/21-tdinternal/vnode.png b/docs-en/21-tdinternal/vnode.png
deleted file mode 100644
index e6148d4907cf9a18bc52251f712d5c685651b7f5..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/vnode.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/vnode.webp b/docs-en/21-tdinternal/vnode.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32
Binary files /dev/null and b/docs-en/21-tdinternal/vnode.webp differ
diff --git a/docs-en/21-tdinternal/write_master.png b/docs-en/21-tdinternal/write_master.png
deleted file mode 100644
index ff2dfc20bfc2ecf956a2aab1a8965a7bbcae4387..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/write_master.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/write_master.webp b/docs-en/21-tdinternal/write_master.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652
Binary files /dev/null and b/docs-en/21-tdinternal/write_master.webp differ
diff --git a/docs-en/21-tdinternal/write_slave.png b/docs-en/21-tdinternal/write_slave.png
deleted file mode 100644
index cacb2cb6bcc4f4d934e979862387e1345bbac078..0000000000000000000000000000000000000000
Binary files a/docs-en/21-tdinternal/write_slave.png and /dev/null differ
diff --git a/docs-en/21-tdinternal/write_slave.webp b/docs-en/21-tdinternal/write_slave.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96
Binary files /dev/null and b/docs-en/21-tdinternal/write_slave.webp differ
diff --git a/docs-en/25-application/01-telegraf.md b/docs-en/25-application/01-telegraf.md
index 718e04ecd3dbd2a72feba3f5297d9da33a94ba83..d30a23fe1b942e1411e8b5f1320e1c54ae2b407f 100644
--- a/docs-en/25-application/01-telegraf.md
+++ b/docs-en/25-application/01-telegraf.md
@@ -5,18 +5,18 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G
## Background
-TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
+TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
IT DevOps metric data usually are time sensitive, for example:
- System resource metrics: CPU, memory, IO, bandwidth, etc.
- Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics.
-Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistent module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance.
+Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistence module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance.
-This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows.
+This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines in configuration files. The architecture is as follows.
-
+
## Installation steps
@@ -73,11 +73,11 @@ sudo systemctl start telegraf
Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`.
Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon.
-Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard- v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
+Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
-
+
## Wrap-up
-The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
+The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
Please refer to the official documentation and product implementation cases for other features.
diff --git a/docs-en/25-application/02-collectd.md b/docs-en/25-application/02-collectd.md
index 2ac37618fafe11e71b215313e53f89b6c302f7cb..1733ed1b1af8c9375c3773d1ca86831396499a78 100644
--- a/docs-en/25-application/02-collectd.md
+++ b/docs-en/25-application/02-collectd.md
@@ -5,19 +5,19 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd
## Background
-TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
+TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
IT DevOps metric data usually are time sensitive, for example:
- System resource metrics: CPU, memory, IO, bandwidth, etc.
- Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics.
-The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistent module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system.
+The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistence module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system.
The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display.
-This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure.
+This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines in configuration files. The architecture is shown in the following figure.
-
+
## Installation Steps
@@ -83,22 +83,22 @@ Click on the gear icon on the left and select `Plugins`, you should find the TDe
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see
The dashboard can be seen in the following screen.
-
+
#### import collectd dashboard
Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see
dashboard with the following interface.
-
+
#### Importing the StatsD dashboard
Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen.
-
+
## Wrap-up
-TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing function in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system or adapt to an existing system in just a few minutes.
+TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases.
diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md
index 4cfeb892d821a1e5b7d5250615e7122e64b9882d..4d47aec1d76014ba63f6be91004abcc3934769f7 100644
--- a/docs-en/25-application/03-immigrate.md
+++ b/docs-en/25-application/03-immigrate.md
@@ -3,10 +3,9 @@ sidebar_label: OpenTSDB Migration to TDengine
title: Best Practices for Migrating OpenTSDB Applications to TDengine
---
-As a distributed, scalable, HBase-based distributed time-series database software, thanks to its first-mover advantage, OpenTSDB has been introduced and widely used in DevOps by people. However, using new technologies like cloud computing, microservices, and containerization technology with rapid development. Enterprise-level services are becoming more and more diverse. The architecture is becoming more complex.
+As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex.
-From this situation, it increasingly plagues to use of OpenTSDB as a DevOps backend storage for monitoring by performance issues and delayed feature upgrades. The resulting increase in application deployment costs and reduced operational efficiency.
-These problems are becoming increasingly severe as the system scales up.
+As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up.
To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**.
@@ -14,14 +13,14 @@ After learning the advantages of many traditional relational databases and NoSQL
Compared with OpenTSDB, TDengine has the following distinctive features.
-- Performance of data writing and querying far exceeds that of OpenTSDB.
-- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk.
-- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process in a few seconds;
-- The built-in functions cover all of OpenTSDB's query functions. And support more time-series data query functions, scalar functions, and aggregation functions. And support advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules is more straightforward and has no learning cost.
+- Data writing and querying performance far exceeds that of OpenTSDB.
+- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk.
+- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds.
+- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost.
- Supports up to 128 tags, with a total tag length of 16 KB.
- In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC.
-If we migrate the applications originally running on OpenTSDB to TDengine, we will effectively reduce the compute and storage resource consumption and the number of deployed servers. And will also significantly reduce the operation and maintenance costs, making operation and maintenance management more straightforward and more accessible, and considerably reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced, including the stand-alone version and the cluster version source code. So there is no need to be concerned about the vendor-lock problem.
+Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem.
We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems.
@@ -32,9 +31,9 @@ We will explain how to migrate OpenTSDB applications to TDengine quickly, secure
The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario.
**Figure 1. Typical architecture in a DevOps scenario**
-
+
-In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.).
+In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.).
The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana.
@@ -44,15 +43,15 @@ The agents deployed in the application nodes are responsible for providing opera
First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
-Note that once the installation is complete, do not start the `taosd` service immediately, but after properly configuring the parameters.
+Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
- **Adjusting the data collector configuration**
TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration.
-Users can flexibly deploy taosAdapter instances according to their requirements to rapidly improve the throughput of data writes in conjunction with the needs of scenarios and provide guarantees for data writes in different application scenarios.
+Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
-Through taosAdapter, users can directly push the data collected by `collectd` or `StatsD` to TDengine to achieve seamless migration of application scenarios, which is very easy and convenient. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
+Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
@@ -66,29 +65,29 @@ LoadPlugin write_tsdb
```
-You can use collectd and push the data to taosAdapter utilizing the push to OpenTSDB plugin. taosAdapter will call the API to write the data to TDengine, thus completing the writing of the data. If you are using StatsD, adjust the profile information accordingly.
+You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly.
- **Tuning the Dashboard system**
-After writing the data to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana).
+After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana).
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
**Importing Grafana Templates** Figure 2.
-
+
-After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work.
+With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed.
### 3. Post-migration architecture
-After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable, except for a few configuration adjustments, which do not involve any critical changes or alterations. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance.
+After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance.
-In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) for providing the storage layer of DevOps and rely on OpenTSDB to give a data persistence layer and query capabilities, you can safely replace OpenTSDB with TDengine. TDengine will save more compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. If the scale is more prominent, then TDengine clustering is required.
-
-Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine.
+In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application.
**Figure 3. System architecture after migration**
-
+
+
+The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application.
## Migration evaluation and strategy for other scenarios
@@ -96,26 +95,25 @@ Suppose your application is particularly complex, or the application domain is n
This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration.
-TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly.
+TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly.
-TDengine version 2.3.0.x only supports collectd and StatsD as data collection aggregation software but will provide more data collection aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly.
+TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly.
In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine.
-In addition, if your application uses the following features of OpenTSDB, you need to understand the following considerations before migrating your application to TDengine.
+In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine.
1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application.
-2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy, but it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values.
-3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. We think this strategy is a compromise when the time-series database does not.
-TDengine does not support automatic downsampling of multiple timelines and preaggregation (for a range of periods) for the time being. Still, thanks to its high-performance query processing logic can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods), making your application query processing logic much more straightforward.
-The logic is much simpler.
-4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from Rate, but the functions are more powerful overall. In addition, TDengine supports all the calculation functions provided by OpenTSDB, and TDengine's query functions are much more potent than those supported by OpenTSDB, which can significantly simplify the processing logic of your application.
+2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values.
+3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all.
+While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple.
+4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application.
-Through the above introduction, I believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you would migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine.
+With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine.
### 2. Migration strategy suggestion
-First, the OpenTSDB-based system migration involves data schema design, system scale estimation, and data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a while and then migrate the historical data to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them, you can migrate the historical data to TDengine.
-You can consider keeping the original OpenTSDB system running while starting TDengine to provide the primary services.
+OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them.
+You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services.
## Data model design
@@ -129,16 +127,19 @@ Let us now assume a DevOps scenario where we use collectd to collect the underly
| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a |
| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source |
-TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: 1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format)
-and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format.
+TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this:
+1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format.
At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference.
-(2) based on a complete understanding of TDengine's data model, to establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually. Considering that OpenTSDB is a single-value mapping model, recommended using the single-value model in TDengine. TDengine can support both multi-value and single-value models.
+(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models.
- **Single-valued model**.
-The steps are as follows: use the name of the metrics as the name of the TDengine super table, which build with two basic data columns - timestamp and value, and the label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name.
+The steps are as follows:
+- Use the name of the metrics as the name of the TDengine super table
+- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics.
+- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name.
Create 3 super tables in TDengine.
@@ -158,13 +159,13 @@ The final system will have about 340 sub-tables and three super-tables. Note tha
- **Multi-value model**
-Suppose you want to take advantage of TDengine's multi-value modeling capabilities. In that case, you need first to meet the requirements that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The names of the sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible.
+Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible.
## Data triage and application adaptation
-Subscribe data from the message queue and start the adapted writer to write the data.
+Subscribe to the message queue and start writing data to TDengine.
-After writing the data starts for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data.
+After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data.
```sql
select count(*) from memory
@@ -184,7 +185,7 @@ To facilitate historical data migration, we provide a plug-in for the data synch
For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html).
-After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. I wish to use these for application migration as a reference.
+After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration.
| Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) |
| ----------------------------- | ------------------- -- |
@@ -202,13 +203,13 @@ Suppose you need to use the multi-value model for data writing. In that case, yo
Manual migration of data requires attention to the following two issues:
-1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. Adopting the partial import mode to avoid the shortage of disk file storage after the total amount of data is exported. Preferentially export the timelines belonging to the same super table. Then the exported data files are imported into the TDengine system.
+1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine.
-2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded importing to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid the overall overload of the system triggered by importing historical data.
+2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data.
Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially.
-When wholly importing the historical data into TDengine, the two systems run simultaneously and then switch the query request to TDengine to achieve seamless application switching.
+While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching.
## Appendix 1: OpenTSDB query function correspondence table
@@ -222,12 +223,12 @@ Example:
SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s)
```
-Remark:
+Remarks:
1. The value in Interval needs to be the same as the interval value in the outer query.
-2. The interpolation processing in TDengine needs to use subqueries to assist in the completion. As shown above, it is enough to specify the interpolation type in the inner query. Since the interpolation of the values in OpenTSDB uses linear interpolation, use fill( in the interpolation clause. linear) to declare the interpolation type. The following functions with the exact interpolation calculation requirements are processed by this method.
-3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that producing interpolation results equivalent to the original data.
-4. Due to the particular interpolation strategy and mechanism of OpenTSDB, the method of the first interpolation and then calculation in the aggregate query (Aggregate) makes the calculation results impossible to be utterly consistent with TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries).
+2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements.
+3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data.
+4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries).
### Count
@@ -261,7 +262,7 @@ Select apercentile(col1, 50, “t-digest”) from table_name
Remark:
-1. During the approximate query processing, OpenTSDB uses the t-digest algorithm by default, so in order to obtain the same calculation result, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different approximation processing algorithms, declared by "default" and "t-digest" respectively.
+1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively.
### First
@@ -379,35 +380,34 @@ We still use the hypothetical environment from Chapter 4. There are three measur
### Storage resource estimation
Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7.
-With additional 20% redundancy, you can calculate the required storage resources:
+With additional 20% redundancy, you can calculate the required storage resources:
```matlab
(n * t * L) * (365 * 1.5) * (1+20%)/C
````
-
-Combined with the above calculation formula, bring the parameters into the formula, and the raw data scale generated every year is 11.8TB without considering the label information. Note that since tag information is associated with each timeline in TDengine, not every record. The scale of the amount of data to be recorded is somewhat reduced relative to the generated data, and this part of label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB.
+Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB.
### Storage Device Selection Considerations
-The hard disk should be capable of better random read performance. Considering using an SSD as much as possible is a better choice. A disk with better random read performance is a great help to improve the system's query performance and improve the query response performance as a whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. Recommend to use `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files.
+A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files.
Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance.
### Computational resource estimates
-Due to the particularity of IoT data, after the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
+Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
-In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many as 10 cores or 20 cores.
+In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores.
### Memory resource estimation
-The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which can accommodate all the tables. Then it takes about 1.5 hours to write a block, which triggers the drop, and no adjustment is required. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB.
+The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB.
In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough.
## Appendix 3: Cluster Deployment and Startup
-TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of corresponding document for your reference.
+TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference.
### Cluster Deployment
@@ -421,7 +421,7 @@ To ensure that the system can obtain the necessary information for regular opera
FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)"
-Follow the same steps to set parameters on the nodes that need running, start the taosd service, and then add Dnodes to the cluster.
+Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster.
Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)".
diff --git a/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp
new file mode 100644
index 0000000000000000000000000000000000000000..147a65b17bff2aa0e44faa206618bdce5664e1ca
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3ca99c835b33df8845adf1b52d8fb8eb63076e82
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..04811f61b9b318e129552d87cd48eabf6e99feab
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp
new file mode 100644
index 0000000000000000000000000000000000000000..36930068758556f4de5b58321804a96401c64b22
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fd5461ec9b37be66cac4c17fb1f81fec76158330
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..879c27a1a5843c714ff3c33c1dccfa32a2154b82
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1d4c655970b5f3fcb3be2d65d67eb42f08f35862
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp differ
diff --git a/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..105afcdb8312b23675f62ff6339d5e737b5cd958
Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ
diff --git a/docs-en/27-train-faq/01-faq.md b/docs-en/27-train-faq/01-faq.md
index 439775170937ef11fc964914232b2739d688b26f..e182e25b9e98bad11b9c90146400e3720605489e 100644
--- a/docs-en/27-train-faq/01-faq.md
+++ b/docs-en/27-train-faq/01-faq.md
@@ -5,38 +5,38 @@ title: Frequently Asked Questions
## Submit an Issue
-If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem description, including TDengine version, hardware and OS information, the steps to reproduce the problem, etc. It would be very helpful if you package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine, if they have been changed in your configuration, please use according to the actual configuration. It's recommended to firstly set `debugFlag` to 135 in `taos.cfg`, restart `taosd`, then reproduce the problem and collect logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
+If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
## Frequently Asked Questions
### 1. How to upgrade to TDengine 2.0 from older version?
-version 2.x is not compatible with version 1.x regarding configuration file and data file, please do following before upgrading:
+version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data.
-1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
+1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
2. Delete log files: `sudo rm -rf /var/log/taos/`
3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/`
-4. Install latests 2.x version
-5. If the data needs to be kept and migrated to newer version, please contact professional service of TDengine for assistance
+4. Install latest 2.x version
+5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance.
### 2. How to handle "Unable to establish connection"?
-When the client is unable to connect to the server, you can try following ways to find out why.
+When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem.
1. Check the network
- - Check if the hosts where the client and server are running can be accessible to each other, for example by `ping` command.
- - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. It's better to firstly disable firewall for diagnostics.
- - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side
- - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side
+ - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command.
+ - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols.
+ - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side.
+ - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side.
2. Make sure the client version and server version are same.
3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally.
-4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect toe the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
+4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
-5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path, it's suggested to put `taos.dll` under `C:\Windows\System32`.
+5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`.
6. Some advanced network diagnostics tools
@@ -45,7 +45,7 @@ When the client is unable to connect to the server, you can try following ways t
Check whether a TCP port on server side is open: `nc -l {port}`
Check whether a TCP port on client side is open: `nc {hostIP} {port}`
- - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on serer side is open for access.
+ - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access.
7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell).
diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md
index ba435a9307c1d6595579a295df83030c58ba0f22..afee13c1377b0b4331d6f7ec20251d1aa2db81a1 100644
--- a/docs-en/27-train-faq/03-docker.md
+++ b/docs-en/27-train-faq/03-docker.md
@@ -3,15 +3,15 @@ sidebar_label: TDengine in Docker
title: Deploy TDengine in Docker
---
-Even though it's not recommended to deploy TDengine using docker in production system, docker is still very useful in development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 .
+We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 .
-In this chapter a simple step by step guide of using TDengine in docker is introduced.
+In this chapter we introduce a simple step by step guide to use TDengine in Docker.
## Install Docker
-The installation of docker please refer to [Get Docker](https://docs.docker.com/get-docker/).
+To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/).
-After docker is installed, you can check whether Docker is installed properly by displaying Docker version.
+After Docker is installed, you can check whether Docker is installed properly by displaying Docker version.
```bash
$ docker -v
@@ -27,7 +27,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdeng
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
```
-In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. Regarding the requirements about ports on the host, please refer to [Port Configuration](/reference/config/#serverport).
+In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport).
- **docker run**: Launch a docker container
- **-d**: the container will run in background mode
@@ -95,7 +95,7 @@ In TDengine CLI, SQL commands can be executed to create/drop databases, tables,
### Access TDengine from host
-If `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host.
+If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host.
```
$ taos
@@ -118,7 +118,7 @@ Output is like below:
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
-For details of REST API please refer to [REST API]](/reference/rest-api/).
+For details of REST API please refer to [REST API](/reference/rest-api/).
### Run TDengine server and taosAdapter inside container
@@ -265,13 +265,13 @@ Below is an example output:
$ taos> select groupid, location from test.d0;
groupid | location |
=================================
- 0 | shanghai |
+ 0 | California.SanDiego |
Query OK, 1 row(s) in set (0.003490s)
```
### Access TDengine from 3rd party tools
-A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter` , for details please refer to [3rd party tools](/third-party/).
+A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/).
There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host.
diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c
index 262757f02b5c52f2d4402d363663db80bb38a54d..b370420b124a21b05f8e0b4041fb1461b1e2478a 100644
--- a/docs-examples/c/async_query_example.c
+++ b/docs-examples/c/async_query_example.c
@@ -182,14 +182,14 @@ int main() {
// query callback ...
// ts current voltage phase location groupid
// numOfRow = 8
-// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2
-// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2
-// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2
-// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3
-// 1538548685500 11.800000 221 0.280000 beijing.haidian 2
-// 1538548696600 13.400000 223 0.290000 beijing.haidian 2
-// 1538548685000 10.800000 223 0.290000 beijing.haidian 3
-// 1538548686500 11.500000 221 0.350000 beijing.haidian 3
+// 1538548685500 11.800000 221 0.280000 california.losangeles 2
+// 1538548696600 13.400000 223 0.290000 california.losangeles 2
+// 1538548685000 10.800000 223 0.290000 california.losangeles 3
+// 1538548686500 11.500000 221 0.350000 california.losangeles 3
+// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2
+// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2
+// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2
+// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3
// numOfRow = 0
// no more data, close the connection.
// ANCHOR_END: demo
\ No newline at end of file
diff --git a/docs-examples/c/insert_example.c b/docs-examples/c/insert_example.c
index ca12be9314efbda707dbd05449c746794c209743..ce8fdc5b9372aec7b02d3c9254ec25c4c4f62adc 100644
--- a/docs-examples/c/insert_example.c
+++ b/docs-examples/c/insert_example.c
@@ -36,10 +36,10 @@ int main() {
executeSQL(taos, "CREATE DATABASE power");
executeSQL(taos, "USE power");
executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
- executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)"
- "d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)"
- "d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)"
- "d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)");
+ executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)"
+ "d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)"
+ "d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)"
+ "d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)");
taos_close(taos);
taos_cleanup();
}
diff --git a/docs-examples/c/json_protocol_example.c b/docs-examples/c/json_protocol_example.c
index 182fd201308facc80c76f36cfa57580784d70413..9d276127a64c3d74322e30587ab2e319c29cbf65 100644
--- a/docs-examples/c/json_protocol_example.c
+++ b/docs-examples/c/json_protocol_example.c
@@ -29,11 +29,11 @@ int main() {
executeSQL(taos, "USE test");
char *line =
"[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": "
- "\"Beijing.Chaoyang\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, "
- "\"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}},{\"metric\": \"meters.current\", "
- "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": "
+ "\"California.SanFrancisco\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, "
+ "\"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}},{\"metric\": \"meters.current\", "
+ "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": "
"2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": "
- "\"Beijing.Haidian\", \"groupid\": 1}}]";
+ "\"California.LosAngeles\", \"groupid\": 1}}]";
char *lines[] = {line};
TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
diff --git a/docs-examples/c/line_example.c b/docs-examples/c/line_example.c
index 8dd4b1a5075369625645959da0476b76b9fbf290..ce39f8d9df744082a450ce246529bf56adebd1e0 100644
--- a/docs-examples/c/line_example.c
+++ b/docs-examples/c/line_example.c
@@ -27,10 +27,10 @@ int main() {
executeSQL(taos, "DROP DATABASE IF EXISTS test");
executeSQL(taos, "CREATE DATABASE test");
executeSQL(taos, "USE test");
- char *lines[] = {"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"};
+ char *lines[] = {"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"};
TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (taos_errno(res) != 0) {
printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res));
diff --git a/docs-examples/c/multi_bind_example.c b/docs-examples/c/multi_bind_example.c
index fe11df9caad3e216fbd0b1ff2f40a54fe3ba86e5..02e6568e9e88ac8703a4993ed406e770d23c2438 100644
--- a/docs-examples/c/multi_bind_example.c
+++ b/docs-examples/c/multi_bind_example.c
@@ -52,7 +52,7 @@ void insertData(TAOS *taos) {
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags
TAOS_BIND tags[2];
- char *location = "Beijing.Chaoyang";
+ char *location = "California.SanFrancisco";
int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
tags[0].buffer_length = strlen(location);
diff --git a/docs-examples/c/query_example.c b/docs-examples/c/query_example.c
index f88b2467ceb3d9bbeaf6b3beb6a24befd3e398c6..fcae95bcd45a282eaa3ae911b4115e6300c6af8e 100644
--- a/docs-examples/c/query_example.c
+++ b/docs-examples/c/query_example.c
@@ -139,5 +139,5 @@ int main() {
// output:
// ts current voltage phase location groupid
-// 1648432611249 10.300000 219 0.310000 Beijing.Chaoyang 2
-// 1648432611749 12.600000 218 0.330000 Beijing.Chaoyang 2
\ No newline at end of file
+// 1648432611249 10.300000 219 0.310000 California.SanFrancisco 2
+// 1648432611749 12.600000 218 0.330000 California.SanFrancisco 2
\ No newline at end of file
diff --git a/docs-examples/c/stmt_example.c b/docs-examples/c/stmt_example.c
index fab1506f953ef68050e4318406fa2ba1a0202929..28dae5f9d5ea2faec0aa3c0a784d39e252651c65 100644
--- a/docs-examples/c/stmt_example.c
+++ b/docs-examples/c/stmt_example.c
@@ -59,7 +59,7 @@ void insertData(TAOS *taos) {
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags
TAOS_BIND tags[2];
- char* location = "Beijing.Chaoyang";
+ char* location = "California.SanFrancisco";
int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
tags[0].buffer_length = strlen(location);
diff --git a/docs-examples/c/telnet_line_example.c b/docs-examples/c/telnet_line_example.c
index 913d433f6aec07b3bce115d45536ffa4b45a0481..da62da4ba492856b0d73a564c1bf9cdd60b5b742 100644
--- a/docs-examples/c/telnet_line_example.c
+++ b/docs-examples/c/telnet_line_example.c
@@ -28,14 +28,14 @@ int main() {
executeSQL(taos, "CREATE DATABASE test");
executeSQL(taos, "USE test");
char *lines[] = {
- "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (taos_errno(res) != 0) {
diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs
index fe30d21efe82e8d1dc414bd4723227ca93bc944f..3dabbebd1630a207af2e1b1b11cc4ba15bdd94a9 100644
--- a/docs-examples/csharp/AsyncQueryExample.cs
+++ b/docs-examples/csharp/AsyncQueryExample.cs
@@ -224,15 +224,15 @@ namespace TDengineExample
}
//output:
-//Connect to TDengine success
-//8 rows async retrieved
-
-//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 |
-//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 |
-//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 |
-//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 |
-//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 |
-//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 |
-//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 |
-//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 |
-//async retrieve complete.
\ No newline at end of file
+// Connect to TDengine success
+// 8 rows async retrieved
+
+// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 |
+// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 |
+// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 |
+// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 |
+// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 |
+// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 |
+// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 |
+// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 |
+// async retrieve complete.
\ No newline at end of file
diff --git a/docs-examples/csharp/InfluxDBLineExample.cs b/docs-examples/csharp/InfluxDBLineExample.cs
index 7aad08825209db568d61e5963ec7a00034ab7ca7..7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc 100644
--- a/docs-examples/csharp/InfluxDBLineExample.cs
+++ b/docs-examples/csharp/InfluxDBLineExample.cs
@@ -9,10 +9,10 @@ namespace TDengineExample
IntPtr conn = GetConnection();
PrepareDatabase(conn);
string[] lines = {
- "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"
+ "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (TDengine.ErrorNo(res) != 0)
diff --git a/docs-examples/csharp/OptsJsonExample.cs b/docs-examples/csharp/OptsJsonExample.cs
index d774a325afa1a8d93eb858f23dcd97dd29f8653d..2c41acc5c9628befda7eb4ad5c30af5b921de948 100644
--- a/docs-examples/csharp/OptsJsonExample.cs
+++ b/docs-examples/csharp/OptsJsonExample.cs
@@ -8,10 +8,10 @@ namespace TDengineExample
{
IntPtr conn = GetConnection();
PrepareDatabase(conn);
- string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
- " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " +
- "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
- " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]"
+ string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
+ " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
+ "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
+ " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
diff --git a/docs-examples/csharp/OptsTelnetExample.cs b/docs-examples/csharp/OptsTelnetExample.cs
index 81608c32213fa0618a2ca6e0769aacf8e9c8e64d..bb752db1afbbb2ef68df9ca25314c8b91cd9a266 100644
--- a/docs-examples/csharp/OptsTelnetExample.cs
+++ b/docs-examples/csharp/OptsTelnetExample.cs
@@ -9,14 +9,14 @@ namespace TDengineExample
IntPtr conn = GetConnection();
PrepareDatabase(conn);
string[] lines = {
- "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
diff --git a/docs-examples/csharp/QueryExample.cs b/docs-examples/csharp/QueryExample.cs
index f00e391100c7ce42177e2987f5b0b32dc02262c4..97f0c456d412e2ed608c345ba87469d3f5ccfc15 100644
--- a/docs-examples/csharp/QueryExample.cs
+++ b/docs-examples/csharp/QueryExample.cs
@@ -158,5 +158,5 @@ namespace TDengineExample
// Connect to TDengine success
// fieldCount=6
// ts current voltage phase location groupid
-// 1648432611249 10.3 219 0.31 Beijing.Chaoyang 2
-// 1648432611749 12.6 218 0.33 Beijing.Chaoyang 2
\ No newline at end of file
+// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
+// 1648432611749 12.6 218 0.33 California.SanFrancisco 2
\ No newline at end of file
diff --git a/docs-examples/csharp/SQLInsertExample.cs b/docs-examples/csharp/SQLInsertExample.cs
index fa2e2a50daf06f4d948479e7f5b0df82c517f809..d5462c1062e01fd5c93bac983696d0350117ad92 100644
--- a/docs-examples/csharp/SQLInsertExample.cs
+++ b/docs-examples/csharp/SQLInsertExample.cs
@@ -15,10 +15,10 @@ namespace TDengineExample
CheckRes(conn, res, "failed to change database");
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
CheckRes(conn, res, "failed to create stable");
- var sql = "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
- "d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
- "d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
- "d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
+ var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
+ "d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
+ "d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
+ "d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
res = TDengine.Query(conn, sql);
CheckRes(conn, res, "failed to insert data");
int affectedRows = TDengine.AffectRows(res);
diff --git a/docs-examples/csharp/StmtInsertExample.cs b/docs-examples/csharp/StmtInsertExample.cs
index d6e00dd4ac54ab8dbfc33b93896d19fc585e7642..6ade424b95d64529b7a40a782de13e3106d0c78a 100644
--- a/docs-examples/csharp/StmtInsertExample.cs
+++ b/docs-examples/csharp/StmtInsertExample.cs
@@ -21,7 +21,7 @@ namespace TDengineExample
CheckStmtRes(res, "failed to prepare stmt");
// 2. bind table name and tags
- TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("Beijing.Chaoyang"), TaosBind.BindInt(2) };
+ TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) };
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
CheckStmtRes(res, "failed to bind table name and tags");
diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go
index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644
--- a/docs-examples/go/connect/cgoexample/main.go
+++ b/docs-examples/go/connect/cgoexample/main.go
@@ -20,4 +20,4 @@ func main() {
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
-// if you want to connect to a default database.
+// if you want to connect a specified database named "dbName".
diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go
index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644
--- a/docs-examples/go/connect/restexample/main.go
+++ b/docs-examples/go/connect/restexample/main.go
@@ -18,6 +18,6 @@ func main() {
defer taos.Close()
}
-// use
+// use
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
-// if you want to connect to a default database.
+// if you want to connect a specified database named "dbName".
diff --git a/docs-examples/go/insert/json/main.go b/docs-examples/go/insert/json/main.go
index 47d9e9984adc05896fb9954ad3deffde3764b836..6be375270e32a5091c015f88de52c9dda2246b59 100644
--- a/docs-examples/go/insert/json/main.go
+++ b/docs-examples/go/insert/json/main.go
@@ -25,10 +25,10 @@ func main() {
defer conn.Close()
prepareDatabase(conn)
- payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}},
- {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]`
+ payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
+ {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
+ {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
+ {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]`
err = conn.OpenTSDBInsertJsonPayload(payload)
if err != nil {
diff --git a/docs-examples/go/insert/line/main.go b/docs-examples/go/insert/line/main.go
index bbc41468fe5f13d3e6f896445bb88f3eba584d0f..c17e1a5270850e6a8b497e0dbec4ae714ee1e2d6 100644
--- a/docs-examples/go/insert/line/main.go
+++ b/docs-examples/go/insert/line/main.go
@@ -25,10 +25,10 @@ func main() {
defer conn.Close()
prepareDatabase(conn)
var lines = []string{
- "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
+ "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
}
err = conn.InfluxDBInsertLines(lines, "ms")
diff --git a/docs-examples/go/insert/sql/main.go b/docs-examples/go/insert/sql/main.go
index 91386855334c1930af721e0b4f43395c6a6d8e82..6cd5f860e65f4fffd139668f69cc1772f5310eae 100644
--- a/docs-examples/go/insert/sql/main.go
+++ b/docs-examples/go/insert/sql/main.go
@@ -19,10 +19,10 @@ func createStable(taos *sql.DB) {
}
func insertData(taos *sql.DB) {
- sql := `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`
+ sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`
result, err := taos.Exec(sql)
if err != nil {
fmt.Println("failed to insert, err:", err)
diff --git a/docs-examples/go/insert/stmt/main.go b/docs-examples/go/insert/stmt/main.go
index c50200ebb427c4c64c2737cb8fe4c3d287551a34..7093fdf1e52bc5a14fc92cec995fd81e70717d9f 100644
--- a/docs-examples/go/insert/stmt/main.go
+++ b/docs-examples/go/insert/stmt/main.go
@@ -37,7 +37,7 @@ func main() {
checkErr(err, "failed to create prepare statement")
// bind table name and tags
- tagParams := param.NewParam(2).AddBinary([]byte("Beijing.Chaoyang")).AddInt(2)
+ tagParams := param.NewParam(2).AddBinary([]byte("California.SanFrancisco")).AddInt(2)
err = stmt.SetTableNameWithTags("d1001", tagParams)
checkErr(err, "failed to execute SetTableNameWithTags")
diff --git a/docs-examples/go/insert/telnet/main.go b/docs-examples/go/insert/telnet/main.go
index 879e6d5cece74fd0b7c815dd34614dca3c9d4544..91fafbe71adbf60d9341b903f5a25708b7011852 100644
--- a/docs-examples/go/insert/telnet/main.go
+++ b/docs-examples/go/insert/telnet/main.go
@@ -25,14 +25,14 @@ func main() {
defer conn.Close()
prepareDatabase(conn)
var lines = []string{
- "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
}
err = conn.OpenTSDBInsertTelnetLines(lines)
diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644
--- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
@@ -22,4 +22,4 @@ public class JNIConnectExample {
// use
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
-// if you want to connect to a default database.
\ No newline at end of file
+// if you want to connect a specified database named "dbName".
\ No newline at end of file
diff --git a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java
index cb83424576a4fd7dfa09ea297294ed77b66bd12d..c8e649482fbd747cdc238daa9e7a237cf63295b6 100644
--- a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java
@@ -23,10 +23,10 @@ public class JSONProtocolExample {
}
private static String getJSONData() {
- return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
- " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " +
- "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," +
- " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]";
+ return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
+ " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
+ "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
+ " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]";
}
public static void main(String[] args) throws SQLException {
diff --git a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java
index 8a2eabe0a91f7966cc3cc6b7dfeeb71b71b88d92..990922b7a516bd32a7e299f5743bd1b5e321868a 100644
--- a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java
@@ -12,11 +12,11 @@ import java.sql.Statement;
public class LineProtocolExample {
// format: measurement,tag_set field_set timestamp
private static String[] lines = {
- "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro
+ "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro
// seconds
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
};
private static Connection getConnection() throws SQLException {
diff --git a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java
index de89f26cbe38f9343d60aeb8d3e9ce7f67c2e764..af97fe4373ca964260e5614f133f359e229b0e15 100644
--- a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java
@@ -16,28 +16,28 @@ public class RestInsertExample {
private static List getRawData() {
return Arrays.asList(
- "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3"
+ "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
);
}
/**
* The generated SQL is:
- * INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)
- * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000)
- * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000)
- * power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000)
- * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000)
- * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000)
- * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000)
- * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000)
+ * INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)
+ * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000)
+ * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000)
+ * power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000)
+ * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000)
+ * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000)
+ * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000)
+ * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000)
*/
private static String getSQL() {
StringBuilder sb = new StringBuilder("INSERT INTO ");
diff --git a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java
index b1a1d224c6d9af2b83ac039726dcdb49a33ec2b0..a3581a1f4733e8bf3e3f561bb6cab5a725d8a1c0 100644
--- a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java
@@ -51,5 +51,5 @@ public class RestQueryExample {
// possible output:
// avg(voltage) location
-// 222.0 Beijing.Haidian
-// 219.0 Beijing.Chaoyang
+// 222.0 California.LosAngeles
+// 219.0 California.SanFrancisco
diff --git a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java
index 2a7ccebf41cae1a22d7516966e2c6ffb10011b64..bbcc92b22f67c31384b0fb7a082975eaac2ff2bc 100644
--- a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java
@@ -30,14 +30,14 @@ public class StmtInsertExample {
private static List getRawData() {
return Arrays.asList(
- "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3"
+ "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
);
}
diff --git a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
index 1431eccf16dabaac20f60ae7e971ef49707ba509..4c9368288df74f829121aeab5b925d1d083d29f0 100644
--- a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
@@ -11,14 +11,14 @@ import java.sql.Statement;
public class TelnetLineProtocolExample {
// format: =[ =]
- private static String[] lines = { "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ private static String[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
private static Connection getConnection() throws SQLException {
diff --git a/docs-examples/java/src/test/java/com/taos/test/TestAll.java b/docs-examples/java/src/test/java/com/taos/test/TestAll.java
index 92fe14a49d5f5ea5d7ea5f1d809867b3de0cc9d2..42db24485afec05298159f7b0c3a4e15835d98ed 100644
--- a/docs-examples/java/src/test/java/com/taos/test/TestAll.java
+++ b/docs-examples/java/src/test/java/com/taos/test/TestAll.java
@@ -23,16 +23,16 @@ public class TestAll {
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
try (Statement stmt = conn.createStatement()) {
- String sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
- " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
- " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
- " power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
- " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
- " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
+ String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
+ " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
+ " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
+ " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
+ " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
+ " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
stmt.execute(sql);
}
diff --git a/docs-examples/node/nativeexample/influxdb_line_example.js b/docs-examples/node/nativeexample/influxdb_line_example.js
index a9fc6d11df0b335b92bb3292baaa017cb4bc42ea..2050bee54506a3ee6fe7d89de97b3b41334dd4a6 100644
--- a/docs-examples/node/nativeexample/influxdb_line_example.js
+++ b/docs-examples/node/nativeexample/influxdb_line_example.js
@@ -13,10 +13,10 @@ function createDatabase() {
function insertData() {
const lines = [
- "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
+ "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250",
];
cursor.schemalessInsert(
lines,
diff --git a/docs-examples/node/nativeexample/insert_example.js b/docs-examples/node/nativeexample/insert_example.js
index 85a353f889176655654d8c39c9a905054d3b6622..ade9d83158362cbf00a856b43a973de31def7601 100644
--- a/docs-examples/node/nativeexample/insert_example.js
+++ b/docs-examples/node/nativeexample/insert_example.js
@@ -11,10 +11,10 @@ try {
cursor.execute(
"CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
);
- var sql = `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
-power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
-power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
-power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`;
+ var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`;
cursor.execute(sql);
} finally {
cursor.close();
diff --git a/docs-examples/node/nativeexample/multi_bind_example.js b/docs-examples/node/nativeexample/multi_bind_example.js
index d52581ec8e10c6edfbc8fc8f7ca78512b5c93d74..6ef8b30c097393fef8c6a2837f8683c736b363f1 100644
--- a/docs-examples/node/nativeexample/multi_bind_example.js
+++ b/docs-examples/node/nativeexample/multi_bind_example.js
@@ -25,7 +25,7 @@ function insertData() {
// bind table name and tags
let tagBind = new taos.TaosBind(2);
- tagBind.bindBinary("Beijing.Chaoyang");
+ tagBind.bindBinary("California.SanFrancisco");
tagBind.bindInt(2);
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
diff --git a/docs-examples/node/nativeexample/opentsdb_json_example.js b/docs-examples/node/nativeexample/opentsdb_json_example.js
index 6d436a8e9ebe0230bba22064e8fb6c180c14b5d1..2d78444a3f805bc77ab5e11925a28dd18fe221fe 100644
--- a/docs-examples/node/nativeexample/opentsdb_json_example.js
+++ b/docs-examples/node/nativeexample/opentsdb_json_example.js
@@ -17,25 +17,25 @@ function insertData() {
metric: "meters.current",
timestamp: 1648432611249,
value: 10.3,
- tags: { location: "Beijing.Chaoyang", groupid: 2 },
+ tags: { location: "California.SanFrancisco", groupid: 2 },
},
{
metric: "meters.voltage",
timestamp: 1648432611249,
value: 219,
- tags: { location: "Beijing.Haidian", groupid: 1 },
+ tags: { location: "California.LosAngeles", groupid: 1 },
},
{
metric: "meters.current",
timestamp: 1648432611250,
value: 12.6,
- tags: { location: "Beijing.Chaoyang", groupid: 2 },
+ tags: { location: "California.SanFrancisco", groupid: 2 },
},
{
metric: "meters.voltage",
timestamp: 1648432611250,
value: 221,
- tags: { location: "Beijing.Haidian", groupid: 1 },
+ tags: { location: "California.LosAngeles", groupid: 1 },
},
];
diff --git a/docs-examples/node/nativeexample/opentsdb_telnet_example.js b/docs-examples/node/nativeexample/opentsdb_telnet_example.js
index 01e79c2dcacd923cd708d1d228959a628d0ff26a..7f80f558838e18f07ad79e580e7d08638b74e940 100644
--- a/docs-examples/node/nativeexample/opentsdb_telnet_example.js
+++ b/docs-examples/node/nativeexample/opentsdb_telnet_example.js
@@ -13,14 +13,14 @@ function createDatabase() {
function insertData() {
const lines = [
- "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
];
cursor.schemalessInsert(
lines,
diff --git a/docs-examples/node/nativeexample/param_bind_example.js b/docs-examples/node/nativeexample/param_bind_example.js
index 9117f46c3eeabd9009b72fa9d4a8503e65884242..c7e04c71a0d19ff8666f3d43fe09109009741266 100644
--- a/docs-examples/node/nativeexample/param_bind_example.js
+++ b/docs-examples/node/nativeexample/param_bind_example.js
@@ -24,7 +24,7 @@ function insertData() {
// bind table name and tags
let tagBind = new taos.TaosBind(2);
- tagBind.bindBinary("Beijing.Chaoyang");
+ tagBind.bindBinary("California.SanFrancisco");
tagBind.bindInt(2);
cursor.stmtSetTbnameTags("d1001", tagBind.getBind());
diff --git a/docs-examples/php/connect.php b/docs-examples/php/connect.php
index 5af77b9768e5c5ac4b774b433479a4ac8902beda..b825b447805a3923248042d2cdff79c51bdcdbe3 100644
--- a/docs-examples/php/connect.php
+++ b/docs-examples/php/connect.php
@@ -4,7 +4,7 @@ use TDengine\Connection;
use TDengine\Exception\TDengineException;
try {
- // 实例化
+ // instantiate
$host = 'localhost';
$port = 6030;
$username = 'root';
@@ -12,9 +12,9 @@ try {
$dbname = null;
$connection = new Connection($host, $port, $username, $password, $dbname);
- // 连接
+ // connect
$connection->connect();
} catch (TDengineException $e) {
- // 连接失败捕获异常
+ // throw exception
throw $e;
}
diff --git a/docs-examples/php/insert.php b/docs-examples/php/insert.php
index 0d9cfc4843a2ec3e72d0ad128fa4c2650d6b9cf6..6e38fa0c46d31aa0a939d471ccbd255cfa453a16 100644
--- a/docs-examples/php/insert.php
+++ b/docs-examples/php/insert.php
@@ -4,7 +4,7 @@ use TDengine\Connection;
use TDengine\Exception\TDengineException;
try {
- // 实例化
+ // instantiate
$host = 'localhost';
$port = 6030;
$username = 'root';
@@ -12,22 +12,22 @@ try {
$dbname = 'power';
$connection = new Connection($host, $port, $username, $password, $dbname);
- // 连接
+ // connect
$connection->connect();
- // 插入
+ // insert
$connection->query('CREATE DATABASE if not exists power');
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
$resource = $connection->query(<<<'SQL'
- INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
+ INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
SQL);
- // 影响行数
+ // get affected rows
var_dump($resource->affectedRows());
} catch (TDengineException $e) {
- // 捕获异常
+ // throw exception
throw $e;
}
diff --git a/docs-examples/php/insert_stmt.php b/docs-examples/php/insert_stmt.php
index 5d4b4809d215d781807c21172982feff2171fe07..99a9a6aef3f69a8880316355e17396e06ca985c9 100644
--- a/docs-examples/php/insert_stmt.php
+++ b/docs-examples/php/insert_stmt.php
@@ -4,7 +4,7 @@ use TDengine\Connection;
use TDengine\Exception\TDengineException;
try {
- // 实例化
+ // instantiate
$host = 'localhost';
$port = 6030;
$username = 'root';
@@ -12,18 +12,18 @@ try {
$dbname = 'power';
$connection = new Connection($host, $port, $username, $password, $dbname);
- // 连接
+ // connect
$connection->connect();
- // 插入
+ // insert
$connection->query('CREATE DATABASE if not exists power');
$connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)');
$stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)');
- // 设置表名和标签
+ // set table name and tags
$stmt->setTableNameTags('d1001', [
// 支持格式同参数绑定
- [TDengine\TSDB_DATA_TYPE_BINARY, 'Beijing.Chaoyang'],
+ [TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'],
[TDengine\TSDB_DATA_TYPE_INT, 2],
]);
@@ -41,9 +41,9 @@ try {
]);
$resource = $stmt->execute();
- // 影响行数
+ // get affected rows
var_dump($resource->affectedRows());
} catch (TDengineException $e) {
- // 捕获异常
+ // throw exception
throw $e;
}
diff --git a/docs-examples/php/query.php b/docs-examples/php/query.php
index 4e86a2cec7426887686049977a8647e786ac2744..2607940ea06a70eaa30e4c165c05bd72aa89857c 100644
--- a/docs-examples/php/query.php
+++ b/docs-examples/php/query.php
@@ -4,7 +4,7 @@ use TDengine\Connection;
use TDengine\Exception\TDengineException;
try {
- // 实例化
+ // instantiate
$host = 'localhost';
$port = 6030;
$username = 'root';
@@ -12,12 +12,12 @@ try {
$dbname = 'power';
$connection = new Connection($host, $port, $username, $password, $dbname);
- // 连接
+ // connect
$connection->connect();
$resource = $connection->query('SELECT ts, current FROM meters LIMIT 2');
var_dump($resource->fetch());
} catch (TDengineException $e) {
- // 捕获异常
+ // throw exception
throw $e;
}
diff --git a/docs-examples/python/bind_param_example.py b/docs-examples/python/bind_param_example.py
index 503a2eb5dd91a3516f87a4d3c1c3218cb6505236..6a67434f876f159cf32069a55e9527ca19034640 100644
--- a/docs-examples/python/bind_param_example.py
+++ b/docs-examples/python/bind_param_example.py
@@ -2,14 +2,14 @@ import taos
from datetime import datetime
# note: lines have already been sorted by table name
-lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'Beijing.Chaoyang', 2),
- ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'Beijing.Chaoyang', 2),
- ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'Beijing.Chaoyang', 2),
- ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'Beijing.Chaoyang', 3),
- ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'Beijing.Haidian', 2),
- ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'Beijing.Haidian', 2),
- ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'Beijing.Haidian', 3),
- ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'Beijing.Haidian', 3)]
+lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'California.SanFrancisco', 2),
+ ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'California.SanFrancisco', 2),
+ ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'California.SanFrancisco', 2),
+ ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'California.SanFrancisco', 3),
+ ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'California.LosAngeles', 2),
+ ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'California.LosAngeles', 2),
+ ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'California.LosAngeles', 3),
+ ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'California.LosAngeles', 3)]
def get_ts(ts: str):
diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py
index 314759f7662c7bf4c9df2c8b3396ad3101c91cd4..56942ef57085766cd128b03cabb7a357587eab16 100644
--- a/docs-examples/python/conn_native_pandas.py
+++ b/docs-examples/python/conn_native_pandas.py
@@ -13,7 +13,7 @@ print(df.head(3))
# output:
# RangeIndex(start=0, stop=8, step=1)
#
-# ts current voltage phase location groupid
-# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2
-# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2
-# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2
+# ts current ... location groupid
+# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2
+# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2
+# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3
diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py
index 143e4275fa4eda685766297e4b90cba3935a574d..0164080cd5a05e72dce40b1d111ea423623ff9b2 100644
--- a/docs-examples/python/conn_rest_pandas.py
+++ b/docs-examples/python/conn_rest_pandas.py
@@ -11,9 +11,9 @@ print(type(df.ts[0]))
print(df.head(3))
# output:
-#
# RangeIndex(start=0, stop=8, step=1)
-# ts current ... location groupid
-# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2
-# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2
-# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2
+#
+# ts current ... location groupid
+# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2
+# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2
+# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3
diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py
index a043d506b965bc31179dbb6f38749d196ab338ff..3303eb0e194ac28e9486ab153183c3b1f0b639f2 100644
--- a/docs-examples/python/connect_rest_examples.py
+++ b/docs-examples/python/connect_rest_examples.py
@@ -16,10 +16,10 @@ cursor.execute("CREATE DATABASE power")
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
# insert data
-cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
+cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
print("inserted row count:", cursor.rowcount)
# query data
@@ -38,8 +38,7 @@ for row in data:
# inserted row count: 8
# queried row count: 3
# ['ts', 'current', 'voltage', 'phase', 'location', 'groupid']
-# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2]
-# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2]
-# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2]
-
+# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2]
+# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2]
+# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3]
# ANCHOR_END: basic
diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py
index 5bb4d629bccf3d79e74b381d6259de86d6522315..58b38f3ff667bcbbd902434d3409441a4d2c5b45 100644
--- a/docs-examples/python/json_protocol_example.py
+++ b/docs-examples/python/json_protocol_example.py
@@ -3,12 +3,12 @@ import json
import taos
from taos import SmlProtocol, SmlPrecision
-lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
+lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219,
- "tags": {"location": "Beijing.Haidian", "groupid": 1}},
+ "tags": {"location": "California.LosAngeles", "groupid": 1}},
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6,
- "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]
+ "tags": {"location": "California.SanFrancisco", "groupid": 2}},
+ {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]
def get_connection():
diff --git a/docs-examples/python/line_protocol_example.py b/docs-examples/python/line_protocol_example.py
index 02baeb2104f9f48984b4d34afb5e67af641d4e32..735e8e7eb8aed1a8133de7a6de50bd50d076c472 100644
--- a/docs-examples/python/line_protocol_example.py
+++ b/docs-examples/python/line_protocol_example.py
@@ -1,10 +1,10 @@
import taos
from taos import SmlProtocol, SmlPrecision
-lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
+lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800",
]
diff --git a/docs-examples/python/multi_bind_example.py b/docs-examples/python/multi_bind_example.py
index 1714121d72705ab8d619a41f3463af4aa3193871..205ba69fb267ae1781415e4f0995b41f908ceb17 100644
--- a/docs-examples/python/multi_bind_example.py
+++ b/docs-examples/python/multi_bind_example.py
@@ -3,10 +3,10 @@ from datetime import datetime
# ANCHOR: bind_batch
table_tags = {
- "d1001": ('Beijing.Chaoyang', 2),
- "d1002": ('Beijing.Chaoyang', 3),
- "d1003": ('Beijing.Haidian', 2),
- "d1004": ('Beijing.Haidian', 3)
+ "d1001": ('California.SanFrancisco', 2),
+ "d1002": ('California.SanFrancisco', 3),
+ "d1003": ('California.LosAngeles', 2),
+ "d1004": ('California.LosAngeles', 3)
}
table_values = {
diff --git a/docs-examples/python/native_insert_example.py b/docs-examples/python/native_insert_example.py
index 94d4888a8f5330b9e39d5ae051fcb68f9825505f..3b6b73cb2236c8d9d11019349f99f79135a5c1d6 100644
--- a/docs-examples/python/native_insert_example.py
+++ b/docs-examples/python/native_insert_example.py
@@ -1,13 +1,13 @@
import taos
-lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2"]
+lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"]
def get_connection() -> taos.TaosConnection:
@@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection):
# The generated SQL is:
-# INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
-# d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
-# d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
-# d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
+# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
def get_sql():
global lines
diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py
index 6d33c49c968d9210b475931b5d8cecca0ceff3e3..8afd7f07358d7e9c9a3677ee04f8eb92aae6856b 100644
--- a/docs-examples/python/query_example.py
+++ b/docs-examples/python/query_example.py
@@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection):
# field count: 7
-# meta of files[1]: {name: ts, type: 9, bytes: 8}
+# meta of fields[1]: {name: ts, type: 9, bytes: 8}
# ======================Iterate on result=========================
-# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'Beijing.Chaoyang', 2)
-# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'Beijing.Chaoyang', 2)
+# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2)
+# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2)
# ANCHOR_END: iter
# ANCHOR: fetch_all
@@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection):
# row count: 2
# ===============all data===================
-# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863},
-# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}]
+# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863},
+# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}]
# ANCHOR_END: fetch_all
if __name__ == '__main__':
diff --git a/docs-examples/python/telnet_line_protocol_example.py b/docs-examples/python/telnet_line_protocol_example.py
index 072835109ee238940e6fe5880b72b2b04e0157fa..d812e186af86be6811ee7774f10458e46df1f39f 100644
--- a/docs-examples/python/telnet_line_protocol_example.py
+++ b/docs-examples/python/telnet_line_protocol_example.py
@@ -2,14 +2,14 @@ import taos
from taos import SmlProtocol, SmlPrecision
# format: =[ =]
-lines = ["meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+lines = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
]
diff --git a/docs-examples/rust/nativeexample/examples/stmt_example.rs b/docs-examples/rust/nativeexample/examples/stmt_example.rs
index a791a4135984a33dded145e8175d7ade57de8d77..190f8c1ef6d50a8e9c925178c1a9d31c22e3d4df 100644
--- a/docs-examples/rust/nativeexample/examples/stmt_example.rs
+++ b/docs-examples/rust/nativeexample/examples/stmt_example.rs
@@ -12,7 +12,7 @@ async fn main() -> Result<(), Error> {
stmt.set_tbname_tags(
"d1001",
[
- Field::Binary(BString::from("Beijing.Chaoyang")),
+ Field::Binary(BString::from("California.SanFrancisco")),
Field::Int(2),
],
)?;
diff --git a/docs-examples/rust/restexample/examples/insert_example.rs b/docs-examples/rust/restexample/examples/insert_example.rs
index d7acc98d096fb3cd6bea22d6c5f6f0f5caea50af..9261536f627c297fc707708f88f57eed647dbf3e 100644
--- a/docs-examples/rust/restexample/examples/insert_example.rs
+++ b/docs-examples/rust/restexample/examples/insert_example.rs
@@ -5,10 +5,10 @@ async fn main() -> Result<(), Error> {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.create_database("power").await?;
taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
- let sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
+ let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
let result = taos.query(sql).await?;
println!("{:?}", result);
Ok(())
diff --git a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs
index e93888cc83d12f3bec7370a66e8a85d38cec42ad..64d1a3c9ac6037c16e3e1c3be0258e19cce632a0 100644
--- a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs
+++ b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs
@@ -5,10 +5,10 @@ fn main() {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
- let lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
+ let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
+ "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
+ "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
+ "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
let affected_rows = taos
.schemaless_insert(
&lines,
diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs
index 1d66bd1f2b1bcbe82dc3ee3e8e25ea4c521c81f0..e61691596704c8aaf979081429802df6e5aa86f9 100644
--- a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs
+++ b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs
@@ -6,10 +6,10 @@ fn main() {
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
let lines = [
- r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}},
- {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]"#,
+ r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
+ {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
+ {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
+ {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#,
];
let affected_rows = taos
diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs
index 18d7500714d9e41b1bebd490199d296ead3dc7c4..c8cab7655a24806e5c7659af80e83da383539c55 100644
--- a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs
+++ b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs
@@ -6,14 +6,14 @@ fn main() {
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
let lines = [
- "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2",
- "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3",
- "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2",
- "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3",
- "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3",
+ "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
+ "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
+ "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
+ "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
+ "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
];
let affected_rows = taos
.schemaless_insert(
diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt
deleted file mode 100644
index 365b1b7172f394111c5e75b113a9ce1e1ce8822b..0000000000000000000000000000000000000000
--- a/example/CMakeLists.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-add_executable(tmq "")
-add_executable(tstream "")
-add_executable(demoapi "")
-
-target_sources(tmq
- PRIVATE
- "src/tmq.c"
-)
-
-target_sources(tstream
- PRIVATE
- "src/tstream.c"
-)
-
-target_sources(demoapi
- PRIVATE
- "src/demoapi.c"
-)
-
-target_link_libraries(tmq
- taos_static
-)
-
-target_link_libraries(tstream
- taos_static
-)
-
-target_link_libraries(demoapi
- taos_static
-)
-
-target_include_directories(tmq
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
-)
-
-target_include_directories(tstream
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
-)
-
-target_include_directories(demoapi
- PUBLIC "${TD_SOURCE_DIR}/include/client"
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
-)
-
-SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
-SET_TARGET_PROPERTIES(tstream PROPERTIES OUTPUT_NAME tstream)
-SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 17a9257c499c6a1efd24fb23b47a9e9835ad7ade..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -3,20 +3,70 @@ PROJECT(TDengine)
IF (TD_LINUX)
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
AUX_SOURCE_DIRECTORY(. SRC)
- ADD_EXECUTABLE(demo apitest.c)
- TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread )
- ADD_EXECUTABLE(sml schemaless.c)
- TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread )
- ADD_EXECUTABLE(subscribe subscribe.c)
- TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread )
- ADD_EXECUTABLE(epoll epoll.c)
- TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
+ # ADD_EXECUTABLE(demo apitest.c)
+ #TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread )
+ #ADD_EXECUTABLE(sml schemaless.c)
+ #TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread )
+ #ADD_EXECUTABLE(subscribe subscribe.c)
+ #TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread )
+ #ADD_EXECUTABLE(epoll epoll.c)
+ #TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
+
+ add_executable(tmq "")
+ add_executable(stream_demo "")
+ add_executable(demoapi "")
+
+ target_sources(tmq
+ PRIVATE
+ "tmq.c"
+ )
+
+ target_sources(stream_demo
+ PRIVATE
+ "stream_demo.c"
+ )
+
+ target_sources(demoapi
+ PRIVATE
+ "demoapi.c"
+ )
+
+ target_link_libraries(tmq
+ taos_static
+ )
+
+ target_link_libraries(stream_demo
+ taos_static
+ )
+
+ target_link_libraries(demoapi
+ taos_static
+ )
+
+ target_include_directories(tmq
+ PUBLIC "${TD_SOURCE_DIR}/include/os"
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
+ )
+
+ target_include_directories(stream_demo
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
+ )
+
+ target_include_directories(demoapi
+ PUBLIC "${TD_SOURCE_DIR}/include/client"
+ PUBLIC "${TD_SOURCE_DIR}/include/os"
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
+ )
+
+ SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
+ SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
+ SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
IF (TD_DARWIN)
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
AUX_SOURCE_DIRECTORY(. SRC)
- ADD_EXECUTABLE(demo demo.c)
- TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua)
- ADD_EXECUTABLE(epoll epoll.c)
- TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
+ #ADD_EXECUTABLE(demo demo.c)
+ #TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua)
+ #ADD_EXECUTABLE(epoll epoll.c)
+ #TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
ENDIF ()
diff --git a/example/src/demoapi.c b/examples/c/demoapi.c
similarity index 100%
rename from example/src/demoapi.c
rename to examples/c/demoapi.c
diff --git a/examples/c/stream.c b/examples/c/stream.c
deleted file mode 100644
index 41365813aeecc042d736fab8694642937abd27e4..0000000000000000000000000000000000000000
--- a/examples/c/stream.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "../../../include/client/taos.h" // include TDengine header file
-
-typedef struct {
- char server_ip[64];
- char db_name[64];
- char tbl_name[64];
-} param;
-
-int g_thread_exit_flag = 0;
-void* insert_rows(void *sarg);
-
-void streamCallBack(void *param, TAOS_RES *res, TAOS_ROW row)
-{
- // in this simple demo, it just print out the result
- char temp[128];
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
- int numFields = taos_num_fields(res);
-
- taos_print_row(temp, row, fields, numFields);
-
- printf("\n%s\n", temp);
-}
-
-int main(int argc, char *argv[])
-{
- TAOS *taos;
- char db_name[64];
- char tbl_name[64];
- char sql[1024] = { 0 };
-
- if (argc != 4) {
- printf("usage: %s server-ip dbname tblname\n", argv[0]);
- exit(0);
- }
-
- strcpy(db_name, argv[2]);
- strcpy(tbl_name, argv[3]);
-
- // create pthread to insert into row per second for stream calc
- param *t_param = (param *)malloc(sizeof(param));
- if (NULL == t_param)
- {
- printf("failed to malloc\n");
- exit(1);
- }
- memset(t_param, 0, sizeof(param));
- strcpy(t_param->server_ip, argv[1]);
- strcpy(t_param->db_name, db_name);
- strcpy(t_param->tbl_name, tbl_name);
-
- pthread_t pid;
- pthread_create(&pid, NULL, (void * (*)(void *))insert_rows, t_param);
-
- sleep(3); // waiting for database is created.
- // open connection to database
- taos = taos_connect(argv[1], "root", "taosdata", db_name, 0);
- if (taos == NULL) {
- printf("failed to connet to server:%s\n", argv[1]);
- free(t_param);
- exit(1);
- }
-
- // starting stream calc,
- printf("please input stream SQL:[e.g., select count(*) from tblname interval(5s) sliding(2s);]\n");
- fgets(sql, sizeof(sql), stdin);
- if (sql[0] == 0) {
- printf("input NULL stream SQL, so exit!\n");
- free(t_param);
- exit(1);
- }
-
- // param is set to NULL in this demo, it shall be set to the pointer to app context
- TAOS_STREAM *pStream = taos_open_stream(taos, sql, streamCallBack, 0, NULL, NULL);
- if (NULL == pStream) {
- printf("failed to create stream\n");
- free(t_param);
- exit(1);
- }
-
- printf("presss any key to exit\n");
- getchar();
-
- taos_close_stream(pStream);
-
- g_thread_exit_flag = 1;
- pthread_join(pid, NULL);
-
- taos_close(taos);
- free(t_param);
-
- return 0;
-}
-
-
-void* insert_rows(void *sarg)
-{
- TAOS *taos;
- char command[1024] = { 0 };
- param *winfo = (param * )sarg;
-
- if (NULL == winfo){
- printf("para is null!\n");
- exit(1);
- }
-
- taos = taos_connect(winfo->server_ip, "root", "taosdata", NULL, 0);
- if (taos == NULL) {
- printf("failed to connet to server:%s\n", winfo->server_ip);
- exit(1);
- }
-
- // drop database
- sprintf(command, "drop database %s;", winfo->db_name);
- if (taos_query(taos, command) != 0) {
- printf("failed to drop database, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- // create database
- sprintf(command, "create database %s;", winfo->db_name);
- if (taos_query(taos, command) != 0) {
- printf("failed to create database, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- // use database
- sprintf(command, "use %s;", winfo->db_name);
- if (taos_query(taos, command) != 0) {
- printf("failed to use database, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- // create table
- sprintf(command, "create table %s (ts timestamp, speed int);", winfo->tbl_name);
- if (taos_query(taos, command) != 0) {
- printf("failed to create table, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- // insert data
- int64_t begin = (int64_t)time(NULL);
- int index = 0;
- while (1) {
- if (g_thread_exit_flag) break;
-
- index++;
- sprintf(command, "insert into %s values (%ld, %d)", winfo->tbl_name, (begin + index) * 1000, index);
- if (taos_query(taos, command)) {
- printf("failed to insert row [%s], reason:%s\n", command, taos_errstr(taos));
- }
- sleep(1);
- }
-
- taos_close(taos);
- return 0;
-}
-
diff --git a/example/src/tstream.c b/examples/c/stream_demo.c
similarity index 100%
rename from example/src/tstream.c
rename to examples/c/stream_demo.c
diff --git a/examples/c/subscribe.c b/examples/c/subscribe.c
deleted file mode 100644
index 66d64d295ce5c2700088842dd2c3ce013225f3bd..0000000000000000000000000000000000000000
--- a/examples/c/subscribe.c
+++ /dev/null
@@ -1,263 +0,0 @@
-// sample code for TDengine subscribe/consume API
-// to compile: gcc -o subscribe subscribe.c -ltaos
-
-#include
-#include
-#include
-#include
-#include "../../../include/client/taos.h" // include TDengine header file
-
-int nTotalRows;
-
-void print_result(TAOS_RES* res, int blockFetch) {
- TAOS_ROW row = NULL;
- int num_fields = taos_num_fields(res);
- TAOS_FIELD* fields = taos_fetch_fields(res);
- int nRows = 0;
-
- if (blockFetch) {
- nRows = taos_fetch_block(res, &row);
- //for (int i = 0; i < nRows; i++) {
- // taos_print_row(buf, row + i, fields, num_fields);
- // puts(buf);
- //}
- } else {
- while ((row = taos_fetch_row(res))) {
- char buf[4096] = {0};
- taos_print_row(buf, row, fields, num_fields);
- puts(buf);
- nRows++;
- }
- }
-
- nTotalRows += nRows;
- printf("%d rows consumed.\n", nRows);
-}
-
-
-void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
- print_result(res, *(int*)param);
-}
-
-
-void check_row_count(int line, TAOS_RES* res, int expected) {
- int actual = 0;
- TAOS_ROW row;
- while ((row = taos_fetch_row(res))) {
- actual++;
- }
- if (actual != expected) {
- printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual);
- } else {
- printf("line %d: %d rows consumed as expected\n", line, actual);
- }
-}
-
-
-void do_query(TAOS* taos, const char* sql) {
- TAOS_RES* res = taos_query(taos, sql);
- taos_free_result(res);
-}
-
-
-void run_test(TAOS* taos) {
- do_query(taos, "drop database if exists test;");
-
- usleep(100000);
- do_query(taos, "create database test;");
- usleep(100000);
- do_query(taos, "use test;");
-
- usleep(100000);
- do_query(taos, "create table meters(ts timestamp, a int) tags(area int);");
-
- do_query(taos, "create table t0 using meters tags(0);");
- do_query(taos, "create table t1 using meters tags(1);");
- do_query(taos, "create table t2 using meters tags(2);");
- do_query(taos, "create table t3 using meters tags(3);");
- do_query(taos, "create table t4 using meters tags(4);");
- do_query(taos, "create table t5 using meters tags(5);");
- do_query(taos, "create table t6 using meters tags(6);");
- do_query(taos, "create table t7 using meters tags(7);");
- do_query(taos, "create table t8 using meters tags(8);");
- do_query(taos, "create table t9 using meters tags(9);");
-
- do_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t0 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t0 values('2020-01-01 00:02:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:02:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:01.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t3 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t4 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t5 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t6 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t7 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t8 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t9 values('2020-01-01 00:01:02.000', 0);");
-
- // super tables subscription
- usleep(1000000);
-
- TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- TAOS_RES* res = taos_consume(tsub);
- check_row_count(__LINE__, res, 18);
-
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- do_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);");
- do_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 2);
-
- do_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 2);
-
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 1);
-
- // keep progress information and restart subscription
- taos_unsubscribe(tsub, 1);
- do_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);");
- tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 24);
-
- // keep progress information and continue previous subscription
- taos_unsubscribe(tsub, 1);
- tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- // don't keep progress information and continue previous subscription
- taos_unsubscribe(tsub, 0);
- tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 24);
-
- // single meter subscription
-
- taos_unsubscribe(tsub, 0);
- tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 5);
-
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- do_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 1);
-
- taos_unsubscribe(tsub, 0);
-}
-
-
-int main(int argc, char *argv[]) {
- const char* host = "127.0.0.1";
- const char* user = "root";
- const char* passwd = "taosdata";
- const char* sql = "select * from meters;";
- const char* topic = "test-multiple";
- int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0;
-
- for (int i = 1; i < argc; i++) {
- if (strncmp(argv[i], "-h=", 3) == 0) {
- host = argv[i] + 3;
- continue;
- }
- if (strncmp(argv[i], "-u=", 3) == 0) {
- user = argv[i] + 3;
- continue;
- }
- if (strncmp(argv[i], "-p=", 3) == 0) {
- passwd = argv[i] + 3;
- continue;
- }
- if (strcmp(argv[i], "-sync") == 0) {
- async = 0;
- continue;
- }
- if (strcmp(argv[i], "-restart") == 0) {
- restart = 1;
- continue;
- }
- if (strcmp(argv[i], "-single") == 0) {
- sql = "select * from t0;";
- topic = "test-single";
- continue;
- }
- if (strcmp(argv[i], "-nokeep") == 0) {
- keep = 0;
- continue;
- }
- if (strncmp(argv[i], "-sql=", 5) == 0) {
- sql = argv[i] + 5;
- topic = "test-custom";
- continue;
- }
- if (strcmp(argv[i], "-test") == 0) {
- test = 1;
- continue;
- }
- if (strcmp(argv[i], "-block-fetch") == 0) {
- blockFetch = 1;
- continue;
- }
- }
-
- TAOS* taos = taos_connect(host, user, passwd, "", 0);
- if (taos == NULL) {
- printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- if (test) {
- run_test(taos);
- taos_close(taos);
- exit(0);
- }
-
- taos_select_db(taos, "test");
- TAOS_SUB* tsub = NULL;
- if (async) {
- // create an asynchronized subscription, the callback function will be called every 1s
- tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
- } else {
- // create an synchronized subscription, need to call 'taos_consume' manually
- tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
- }
-
- if (tsub == NULL) {
- printf("failed to create subscription.\n");
- exit(0);
- }
-
- if (async) {
- getchar();
- } else while(1) {
- TAOS_RES* res = taos_consume(tsub);
- if (res == NULL) {
- printf("failed to consume data.");
- break;
- } else {
- print_result(res, blockFetch);
- getchar();
- }
- }
-
- printf("total rows consumed: %d\n", nTotalRows);
- taos_unsubscribe(tsub, keep);
- taos_close(taos);
-
- return 0;
-}
diff --git a/example/src/tmq.c b/examples/c/tmq.c
similarity index 84%
rename from example/src/tmq.c
rename to examples/c/tmq.c
index 913096ee90294cf65ba81d605ed3e7d4f2fa803c..2e8aa21da7a2bdd83e4a995beccb99ac40228a48 100644
--- a/example/src/tmq.c
+++ b/examples/c/tmq.c
@@ -24,6 +24,7 @@ static void msg_process(TAOS_RES* msg) {
char buf[1024];
/*memset(buf, 0, 1024);*/
printf("topic: %s\n", tmq_get_topic_name(msg));
+ printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
@@ -106,7 +107,7 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- /*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/
+ /*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
@@ -165,7 +166,6 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "group.id", "tg2");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
- /*tmq_conf_set(conf, "td.connect.db", "abc1");*/
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "false");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@@ -191,20 +191,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
return;
}
int32_t cnt = 0;
- /*clock_t startTime = clock();*/
while (running) {
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 0);
if (tmqmessage) {
cnt++;
+ msg_process(tmqmessage);
+ /*if (cnt >= 2) break;*/
/*printf("get data\n");*/
- /*msg_process(tmqmessage);*/
taos_free_result(tmqmessage);
/*} else {*/
/*break;*/
}
}
- /*clock_t endTime = clock();*/
- /*printf("log cnt: %d %f s\n", cnt, (double)(endTime - startTime) / CLOCKS_PER_SEC);*/
err = tmq_consumer_close(tmq);
if (err)
@@ -253,39 +251,6 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
fprintf(stderr, "%% Consumer closed\n");
}
-void perf_loop(tmq_t* tmq, tmq_list_t* topics) {
- tmq_resp_err_t err;
-
- if ((err = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err));
- printf("subscribe err\n");
- return;
- }
- int32_t batchCnt = 0;
- int32_t skipLogNum = 0;
- clock_t startTime = clock();
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 500);
- if (tmqmessage) {
- batchCnt++;
- /*skipLogNum += tmqGetSkipLogNum(tmqmessage);*/
- /*msg_process(tmqmessage);*/
- taos_free_result(tmqmessage);
- } else {
- break;
- }
- }
- clock_t endTime = clock();
- printf("log batch cnt: %d, skip log cnt: %d, time used:%f s\n", batchCnt, skipLogNum,
- (double)(endTime - startTime) / CLOCKS_PER_SEC);
-
- err = tmq_consumer_close(tmq);
- if (err)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(err));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
int main(int argc, char* argv[]) {
if (argc > 1) {
printf("env init\n");
@@ -296,7 +261,6 @@ int main(int argc, char* argv[]) {
}
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
- /*perf_loop(tmq, topic_list);*/
- /*basic_consume_loop(tmq, topic_list);*/
- sync_consume_loop(tmq, topic_list);
+ basic_consume_loop(tmq, topic_list);
+ /*sync_consume_loop(tmq, topic_list);*/
}
diff --git a/include/client/taos.h b/include/client/taos.h
index 0b8c67aa794363ff851c69e5848978c78c6a4abc..b65091f52bdd218138891970f079158033cb2d69 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -85,6 +85,14 @@ typedef struct taosField {
int32_t bytes;
} TAOS_FIELD;
+typedef struct TAOS_FIELD_E {
+ char name[65];
+ int8_t type;
+ uint8_t precision;
+ uint8_t scale;
+ int32_t bytes;
+} TAOS_FIELD_E;
+
#ifdef WINDOWS
#define DLL_EXPORT __declspec(dllexport)
#else
@@ -134,7 +142,10 @@ DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags);
DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name);
+DLL_EXPORT int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags);
DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name);
+DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
+DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
@@ -230,7 +241,7 @@ DLL_EXPORT const char *tmq_err2str(tmq_resp_err_t);
DLL_EXPORT tmq_resp_err_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT tmq_resp_err_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
-DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t wait_time);
+DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT tmq_resp_err_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param);
@@ -258,6 +269,7 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
diff --git a/include/common/systable.h b/include/common/systable.h
index e36beb13f2eb2cae1ec93495c3b84550fce617ce..8b0bb4a3fba107e1d74bee6885c39ae06d425a19 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -34,7 +34,6 @@ extern "C" {
#define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions"
#define TSDB_INS_TABLE_USER_INDEXES "user_indexes"
#define TSDB_INS_TABLE_USER_STABLES "user_stables"
-#define TSDB_INS_TABLE_USER_STREAMS "user_streams"
#define TSDB_INS_TABLE_USER_TABLES "user_tables"
#define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed"
#define TSDB_INS_TABLE_USER_USERS "user_users"
diff --git a/include/common/taosdef.h b/include/common/taosdef.h
index d39c7a121593e6feeb5cfbf104d07642bdbfaff7..516df71b0b886872fc1676bb058c9dc91ea9c3cb 100644
--- a/include/common/taosdef.h
+++ b/include/common/taosdef.h
@@ -97,6 +97,7 @@ extern char *qtypeStr[];
#undef TD_DEBUG_PRINT_ROW
#undef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
+#undef TD_DEBUG_PRINT_TAG
#ifdef __cplusplus
}
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index 9e3ad42a82fe779bc507417d84718b342a98a34e..88fa0e728f397006759e296cf1e3533816ee540f 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -53,10 +53,9 @@ typedef enum EStreamType {
} EStreamType;
typedef struct {
- uint32_t numOfTables;
- SArray* pGroupList;
+ SArray* pTableList;
SHashObj* map; // speedup acquire the tableQueryInfo by table uid
-} STableGroupInfo;
+} STableListInfo;
typedef struct SColumnDataAgg {
int16_t colId;
@@ -106,12 +105,14 @@ typedef struct SColumnInfoData {
} SColumnInfoData;
typedef struct SQueryTableDataCond {
- STimeWindow twindow;
+ //STimeWindow twindow;
int32_t order; // desc|asc order to iterate the data block
int32_t numOfCols;
SColumnInfo *colList;
bool loadExternalRows; // load external rows or not
int32_t type; // data block load type:
+ int32_t numOfTWindows;
+ STimeWindow *twindows;
} SQueryTableDataCond;
void* blockDataDestroy(SSDataBlock* pBlock);
@@ -219,6 +220,16 @@ typedef struct {
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
+#define SORT_QSORT_T 0x1
+#define SORT_SPILLED_MERGE_SORT_T 0x2
+typedef struct SSortExecInfo {
+ int32_t sortMethod;
+ int32_t sortBuffer;
+ int32_t loops; // loop count
+ int32_t writeBytes; // write io bytes
+ int32_t readBytes; // read io bytes
+} SSortExecInfo;
+
#ifdef __cplusplus
}
#endif
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index db8644ecfed50f354e61ff20b424f93dc559f8d7..66b81efc5b32b961de01fce1dbe5a5a6cee808ef 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -198,7 +198,7 @@ void colDataTrim(SColumnInfoData* pColumnInfoData);
size_t blockDataGetNumOfCols(const SSDataBlock* pBlock);
size_t blockDataGetNumOfRows(const SSDataBlock* pBlock);
-int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap);
+int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc);
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex,
int32_t pageSize);
int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock);
@@ -227,12 +227,16 @@ int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
+void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
+const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
+
void blockDebugShowData(const SArray* dataBlocks);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
- tb_uid_t uid, tb_uid_t suid);
+ tb_uid_t suid);
-SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, int32_t vgId);
+SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
+ const char* stbFullName, int32_t vgId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(pBlock) + blockDataGetSize(pBlock);
@@ -245,57 +249,8 @@ static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32
colSize + COMP_OVERFLOW_BYTES, compressed, NULL, 0);
}
-static FORCE_INLINE void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols,
- int8_t needCompress) {
- int32_t* actualLen = (int32_t*)data;
- data += sizeof(int32_t);
-
- uint64_t* groupId = (uint64_t*)data;
- data += sizeof(uint64_t);
-
- int32_t* colSizes = (int32_t*)data;
- data += numOfCols * sizeof(int32_t);
-
- *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t));
-
- int32_t numOfRows = pBlock->info.rows;
- for (int32_t col = 0; col < numOfCols; ++col) {
- SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col);
-
- // copy the null bitmap
- if (IS_VAR_DATA_TYPE(pColRes->info.type)) {
- size_t metaSize = numOfRows * sizeof(int32_t);
- memcpy(data, pColRes->varmeta.offset, metaSize);
- data += metaSize;
- (*dataLen) += metaSize;
- } else {
- int32_t len = BitmapLen(numOfRows);
- memcpy(data, pColRes->nullbitmap, len);
- data += len;
- (*dataLen) += len;
- }
-
- if (needCompress) {
- colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress);
- data += colSizes[col];
- (*dataLen) += colSizes[col];
- } else {
- colSizes[col] = colDataGetLength(pColRes, numOfRows);
- (*dataLen) += colSizes[col];
- memmove(data, pColRes->pData, colSizes[col]);
- data += colSizes[col];
- }
-
- colSizes[col] = htonl(colSizes[col]);
- }
-
- *actualLen = *dataLen;
- *groupId = pBlock->info.groupId;
-}
-
#ifdef __cplusplus
}
#endif
#endif /*_TD_COMMON_EP_H_*/
-
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index f1f96bfedd880466bea08d2e87ad8f22341f70bb..10bc6a61764f6500305e0712650e93d55255c58a 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -18,6 +18,7 @@
#include "os.h"
#include "talgo.h"
+#include "tarray.h"
#include "tencode.h"
#include "ttypes.h"
#include "tutil.h"
@@ -29,6 +30,7 @@ extern "C" {
typedef struct SSchema SSchema;
typedef struct STColumn STColumn;
typedef struct STSchema STSchema;
+typedef struct SValue SValue;
typedef struct SColVal SColVal;
typedef struct STSRow2 STSRow2;
typedef struct STSRowBuilder STSRowBuilder;
@@ -39,31 +41,37 @@ typedef struct STag STag;
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
void tTSchemaDestroy(STSchema *pTSchema);
-// SColVal
-#define ColValNONE ((SColVal){.type = COL_VAL_NONE, .nData = 0, .pData = NULL})
-#define ColValNULL ((SColVal){.type = COL_VAL_NULL, .nData = 0, .pData = NULL})
-#define ColValDATA(nData, pData) ((SColVal){.type = COL_VAL_DATA, .nData = (nData), .pData = (pData)})
-
// STSRow2
+#define COL_VAL_NONE(CID) ((SColVal){.cid = (CID), .isNone = 1})
+#define COL_VAL_NULL(CID) ((SColVal){.cid = (CID), .isNull = 1})
+#define COL_VAL_VALUE(CID, V) ((SColVal){.cid = (CID), .value = (V)})
+
+int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow);
+void tTSRowFree(STSRow2 *pRow);
+void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
+int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
-int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow);
-void tTSRowFree(STSRow2 *pRow);
-int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
// STSRowBuilder
+#if 0
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema);
void tTSRowBuilderClear(STSRowBuilder *pBuilder);
void tTSRowBuilderReset(STSRowBuilder *pBuilder);
int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, uint8_t *pData, uint32_t nData);
int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow);
+#endif
// STag
-int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag);
+int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
void tTagFree(STag *pTag);
-void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData);
-int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag);
-int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag);
+bool tTagGet(const STag *pTag, STagVal *pTagVal);
+char* tTagValToData(const STagVal *pTagVal, bool isJson);
+int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
+int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
+int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
+void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
+void debugCheckTags(STag *pTag); // TODO: remove
// STRUCT =================
struct STColumn {
@@ -86,7 +94,9 @@ struct STSchema {
#define TSROW_HAS_NONE ((uint8_t)0x1)
#define TSROW_HAS_NULL ((uint8_t)0x2U)
#define TSROW_HAS_VAL ((uint8_t)0x4U)
-#define TSROW_KV_ROW ((uint8_t)0x10U)
+#define TSROW_KV_SMALL ((uint8_t)0x10U)
+#define TSROW_KV_MID ((uint8_t)0x20U)
+#define TSROW_KV_BIG ((uint8_t)0x40U)
struct STSRow2 {
TSKEY ts;
uint8_t flags;
@@ -109,20 +119,60 @@ struct STSRowBuilder {
STSRow2 row;
};
-typedef enum { COL_VAL_NONE = 0, COL_VAL_NULL = 1, COL_VAL_DATA = 2 } EColValT;
+struct SValue {
+ union {
+ int8_t i8; // TSDB_DATA_TYPE_BOOL||TSDB_DATA_TYPE_TINYINT
+ uint8_t u8; // TSDB_DATA_TYPE_UTINYINT
+ int16_t i16; // TSDB_DATA_TYPE_SMALLINT
+ uint16_t u16; // TSDB_DATA_TYPE_USMALLINT
+ int32_t i32; // TSDB_DATA_TYPE_INT
+ uint32_t u32; // TSDB_DATA_TYPE_UINT
+ int64_t i64; // TSDB_DATA_TYPE_BIGINT
+ uint64_t u64; // TSDB_DATA_TYPE_UBIGINT
+ TSKEY ts; // TSDB_DATA_TYPE_TIMESTAMP
+ float f; // TSDB_DATA_TYPE_FLOAT
+ double d; // TSDB_DATA_TYPE_DOUBLE
+ struct {
+ uint32_t nData;
+ uint8_t *pData;
+ };
+ };
+};
+
struct SColVal {
- EColValT type;
- uint32_t nData;
- uint8_t *pData;
+ int16_t cid;
+ int8_t isNone;
+ int8_t isNull;
+ SValue value;
};
+#pragma pack(push, 1)
struct STagVal {
- int16_t cid;
- int8_t type;
- uint32_t nData;
- uint8_t *pData;
+ union {
+ int16_t cid;
+ char *pKey;
+ };
+ int8_t type;
+ union {
+ int64_t i64;
+ struct {
+ uint32_t nData;
+ uint8_t *pData;
+ };
+ };
};
+#define TD_TAG_JSON ((int8_t)0x40) // distinguish JSON string and JSON value with the highest bit
+#define TD_TAG_LARGE ((int8_t)0x20)
+struct STag {
+ int8_t flags;
+ int16_t len;
+ int16_t nTag;
+ int32_t ver;
+ int8_t idx[];
+};
+#pragma pack(pop)
+
#if 1 //================================================================================================================================================
// Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap.
#define TD_SUPPORT_BITMAP
@@ -365,109 +415,6 @@ SDataCols *tdFreeDataCols(SDataCols *pCols);
int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update,
TDRowVerT maxVer);
-// ----------------- K-V data row structure
-/* |<-------------------------------------- len -------------------------------------------->|
- * |<----- header ----->|<--------------------------- body -------------------------------->|
- * +----------+----------+---------------------------------+---------------------------------+
- * | uint16_t | int16_t | | |
- * +----------+----------+---------------------------------+---------------------------------+
- * | len | ncols | cols index | data part |
- * +----------+----------+---------------------------------+---------------------------------+
- */
-typedef void *SKVRow;
-
-typedef struct {
- int16_t colId;
- uint16_t offset;
-} SColIdx;
-
-#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
-
-#define kvRowLen(r) (*(uint16_t *)(r))
-#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t)))
-#define kvRowSetLen(r, len) kvRowLen(r) = (len)
-#define kvRowSetNCols(r, n) kvRowNCols(r) = (n)
-#define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE)
-#define kvRowValues(r) POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * kvRowNCols(r))
-#define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r))
-#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
-#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
-#define kvRowFree(r) taosMemoryFreeClear(r)
-#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
-#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r))
-#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r)))
-#define kvRowKey(r) tdGetKey(kvRowTKey(r))
-#define kvRowKeys(r) POINTER_SHIFT(r, *(uint16_t *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(int16_t)))
-#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
-
-SKVRow tdKVRowDup(SKVRow row);
-int32_t tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
-int32_t tdEncodeKVRow(void **buf, SKVRow row);
-void *tdDecodeKVRow(void *buf, SKVRow *row);
-void tdSortKVRowByColIdx(SKVRow row);
-
-static FORCE_INLINE int32_t comparTagId(const void *key1, const void *key2) {
- if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) {
- return 1;
- } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) {
- return -1;
- } else {
- return 0;
- }
-}
-
-static FORCE_INLINE void *tdGetKVRowValOfCol(const SKVRow row, int16_t colId) {
- void *ret = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ);
- if (ret == NULL) return NULL;
- return kvRowColVal(row, (SColIdx *)ret);
-}
-
-static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
- return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ);
-}
-
-// ----------------- K-V data row builder
-typedef struct {
- int16_t tCols;
- int16_t nCols;
- SColIdx *pColIdx;
- uint16_t alloc;
- uint16_t size;
- void *buf;
-} SKVRowBuilder;
-
-int32_t tdInitKVRowBuilder(SKVRowBuilder *pBuilder);
-void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder);
-void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
-SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
-
-static FORCE_INLINE int32_t tdAddColToKVRow(SKVRowBuilder *pBuilder, col_id_t colId, const void *value, int32_t tlen) {
- if (pBuilder->nCols >= pBuilder->tCols) {
- pBuilder->tCols *= 2;
- SColIdx *pColIdx = (SColIdx *)taosMemoryRealloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
- if (pColIdx == NULL) return -1;
- pBuilder->pColIdx = pColIdx;
- }
-
- pBuilder->pColIdx[pBuilder->nCols].colId = colId;
- pBuilder->pColIdx[pBuilder->nCols].offset = pBuilder->size;
-
- pBuilder->nCols++;
-
- if (tlen > pBuilder->alloc - pBuilder->size) {
- while (tlen > pBuilder->alloc - pBuilder->size) {
- pBuilder->alloc *= 2;
- }
- void *buf = taosMemoryRealloc(pBuilder->buf, pBuilder->alloc);
- if (buf == NULL) return -1;
- pBuilder->buf = buf;
- }
-
- memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);
- pBuilder->size += tlen;
-
- return 0;
-}
#endif
#ifdef __cplusplus
@@ -475,3 +422,4 @@ static FORCE_INLINE int32_t tdAddColToKVRow(SKVRowBuilder *pBuilder, col_id_t co
#endif
#endif /*_TD_COMMON_DATA_FORMAT_H_*/
+
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 2a4ef565dd1c6b6742446adee2daf953665b99e5..30ae6c2adb49a811803d04309f43f3068065269c 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -45,6 +45,8 @@ extern bool tsEnableSlaveQuery;
extern bool tsPrintAuth;
extern int64_t tsTickPerMin[3];
+extern int32_t tsCountAlwaysReturnValue;
+
// multi-process
extern int32_t tsMultiProcess;
extern int32_t tsMnodeShmSize;
@@ -102,7 +104,6 @@ extern int32_t tsMaxStreamComputDelay;
extern int32_t tsStreamCompStartDelay;
extern int32_t tsRetryStreamCompDelay;
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
-extern int32_t tsProjectExecInterval;
extern int64_t tsMaxRetentWindow;
// build info
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index acf2587d9d005a1f9ff87252b0385f09dd9d3c90..e0dcfbd54442854c991f03510b1e7ba78fc9032b 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -244,7 +244,7 @@ typedef struct {
const void* pMsg;
} SSubmitMsgIter;
-int32_t tInitSubmitMsgIter(SSubmitReq* pMsg, SSubmitMsgIter* pIter);
+int32_t tInitSubmitMsgIter(const SSubmitReq* pMsg, SSubmitMsgIter* pIter);
int32_t tGetSubmitMsgNext(SSubmitMsgIter* pIter, SSubmitBlk** pPBlock);
int32_t tInitSubmitBlkIter(SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkIter* pIter);
STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter);
@@ -287,7 +287,7 @@ typedef struct SSchema {
char name[TSDB_COL_NAME_LEN];
} SSchema;
-#define COL_IS_SET(FLG) ((FLG) & (COL_SET_VAL | COL_SET_NULL) != 0)
+#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
@@ -300,9 +300,7 @@ typedef struct SSchema {
typedef struct {
int32_t nCols;
- int32_t sver;
- int32_t tagVer;
- int32_t colVer;
+ int32_t version;
SSchema* pSchema;
} SSchemaWrapper;
@@ -310,9 +308,7 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
if (pSW == NULL) return pSW;
pSW->nCols = pSchemaWrapper->nCols;
- pSW->sver = pSchemaWrapper->sver;
- pSW->tagVer = pSchemaWrapper->tagVer;
- pSW->colVer = pSchemaWrapper->colVer;
+ pSW->version = pSchemaWrapper->version;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
taosMemoryFree(pSW);
@@ -367,9 +363,7 @@ static FORCE_INLINE int32_t tDecodeSSchema(SDecoder* pDecoder, SSchema* pSchema)
static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWrapper* pSW) {
int32_t tlen = 0;
tlen += taosEncodeVariantI32(buf, pSW->nCols);
- tlen += taosEncodeVariantI32(buf, pSW->sver);
- tlen += taosEncodeVariantI32(buf, pSW->tagVer);
- tlen += taosEncodeVariantI32(buf, pSW->colVer);
+ tlen += taosEncodeVariantI32(buf, pSW->version);
for (int32_t i = 0; i < pSW->nCols; i++) {
tlen += taosEncodeSSchema(buf, &pSW->pSchema[i]);
}
@@ -378,9 +372,7 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr
static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapper* pSW) {
buf = taosDecodeVariantI32(buf, &pSW->nCols);
- buf = taosDecodeVariantI32(buf, &pSW->sver);
- buf = taosDecodeVariantI32(buf, &pSW->tagVer);
- buf = taosDecodeVariantI32(buf, &pSW->colVer);
+ buf = taosDecodeVariantI32(buf, &pSW->version);
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
return NULL;
@@ -394,9 +386,7 @@ static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapp
static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSchemaWrapper* pSW) {
if (tEncodeI32v(pEncoder, pSW->nCols) < 0) return -1;
- if (tEncodeI32v(pEncoder, pSW->sver) < 0) return -1;
- if (tEncodeI32v(pEncoder, pSW->tagVer) < 0) return -1;
- if (tEncodeI32v(pEncoder, pSW->colVer) < 0) return -1;
+ if (tEncodeI32v(pEncoder, pSW->version) < 0) return -1;
for (int32_t i = 0; i < pSW->nCols; i++) {
if (tEncodeSSchema(pEncoder, &pSW->pSchema[i]) < 0) return -1;
}
@@ -406,9 +396,7 @@ static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSch
static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
+ if (tDecodeI32v(pDecoder, &pSW->version) < 0) return -1;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
@@ -421,9 +409,7 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWra
static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
+ if (tDecodeI32v(pDecoder, &pSW->version) < 0) return -1;
pSW->pSchema = (SSchema*)tDecoderMalloc(pDecoder, pSW->nCols * sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
@@ -469,7 +455,8 @@ int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;
- int32_t verInBlock;
+ int32_t tagVer;
+ int32_t colVer;
int32_t numOfFields;
SArray* pFields;
int32_t ttl;
@@ -492,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp);
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
void* taosDecodeSEpSet(const void* buf, SEpSet* pEp);
-typedef struct {
- SEpSet epSet;
-} SMEpSet;
-
-int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq);
-int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq);
+int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset);
+int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset);
typedef struct {
int8_t connType;
@@ -588,13 +571,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR
int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp);
-typedef struct {
- int16_t colId; // column id
- int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
- int16_t flag; // denote if it is a tag or a normal column
- char name[TSDB_DB_FNAME_LEN];
-} SColIndex;
-
typedef struct {
int16_t lowerRelOptr;
int16_t upperRelOptr;
@@ -660,8 +636,7 @@ typedef struct {
int32_t tz; // query client timezone
char intervalUnit;
char slidingUnit;
- char
- offsetUnit; // TODO Remove it, the offset is the number of precision tickle, and it must be a immutable duration.
+ char offsetUnit;
int8_t precision;
int64_t interval;
int64_t sliding;
@@ -670,6 +645,9 @@ typedef struct {
typedef struct {
int32_t code;
+ char tbFName[TSDB_TABLE_FNAME_LEN];
+ int32_t sversion;
+ int32_t tversion;
} SQueryTableRsp;
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
@@ -696,6 +674,7 @@ typedef struct {
int8_t replications;
int8_t strict;
int8_t cacheLastRow;
+ int8_t schemaless;
int8_t ignoreExist;
int32_t numOfRetensions;
SArray* pRetensions; // SRetention
@@ -793,6 +772,7 @@ typedef struct {
int8_t cacheLastRow;
int32_t numOfRetensions;
SArray* pRetensions;
+ int8_t schemaless;
} SDbCfgRsp;
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
@@ -805,19 +785,24 @@ typedef struct {
int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
+typedef struct SQueryNodeAddr {
+ int32_t nodeId; // vgId or qnodeId
+ SEpSet epSet;
+} SQueryNodeAddr;
+
typedef struct {
- SArray* addrsList; // SArray
+ SQueryNodeAddr addr;
+ uint64_t load;
+} SQueryNodeLoad;
+
+typedef struct {
+ SArray* qnodeList; // SArray
} SQnodeListRsp;
int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
void tFreeSQnodeListRsp(SQnodeListRsp* pRsp);
-typedef struct SQueryNodeAddr {
- int32_t nodeId; // vgId or qnodeId
- SEpSet epSet;
-} SQueryNodeAddr;
-
typedef struct {
SArray* pArray; // Array of SUseDbRsp
} SUseDbBatchRsp;
@@ -940,6 +925,20 @@ typedef struct {
int32_t syncState;
} SMnodeLoad;
+typedef struct {
+ int32_t dnodeId;
+ int64_t numOfProcessedQuery;
+ int64_t numOfProcessedCQuery;
+ int64_t numOfProcessedFetch;
+ int64_t numOfProcessedDrop;
+ int64_t numOfProcessedHb;
+ int64_t cacheDataSize;
+ int64_t numOfQueryInQueue;
+ int64_t numOfFetchInQueue;
+ int64_t timeInQueryQueue;
+ int64_t timeInFetchQueue;
+} SQnodeLoad;
+
typedef struct {
int32_t sver; // software version
int64_t dnodeVer; // dnode table version in sdb
@@ -950,6 +949,8 @@ typedef struct {
int32_t numOfCores;
int32_t numOfSupportVnodes;
char dnodeEp[TSDB_EP_LEN];
+ SMnodeLoad mload;
+ SQnodeLoad qload;
SClusterCfg clusterCfg;
SArray* pVloads; // array of SVnodeLoad
} SStatusReq;
@@ -994,7 +995,6 @@ typedef struct {
typedef struct {
int32_t vgId;
- int32_t dnodeId;
char db[TSDB_DB_FNAME_LEN];
int64_t dbUid;
int32_t vgVersion;
@@ -1017,11 +1017,14 @@ typedef struct {
int8_t compression;
int8_t strict;
int8_t cacheLastRow;
+ int8_t isTsma;
+ int8_t standby;
int8_t replica;
int8_t selfIndex;
SReplica replicas[TSDB_MAX_REPLICA];
int32_t numOfRetensions;
SArray* pRetensions; // SRetention
+ void* pTsma;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@@ -1059,8 +1062,8 @@ typedef struct {
int8_t walLevel;
int8_t strict;
int8_t cacheLastRow;
- int8_t replica;
int8_t selfIndex;
+ int8_t replica;
SReplica replicas[TSDB_MAX_REPLICA];
} SAlterVnodeReq;
@@ -1118,6 +1121,14 @@ typedef struct {
SSchema* pSchemas;
} STableMetaRsp;
+typedef struct {
+ STableMetaRsp* pMeta;
+} SMAlterStbRsp;
+
+int32_t tEncodeSMAlterStbRsp(SEncoder *pEncoder, const SMAlterStbRsp *pRsp);
+int32_t tDecodeSMAlterStbRsp(SDecoder *pDecoder, SMAlterStbRsp *pRsp);
+void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp);
+
int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
void tFreeSTableMetaRsp(STableMetaRsp* pRsp);
@@ -1210,9 +1221,10 @@ typedef struct {
} SRetrieveMetaTableRsp;
typedef struct SExplainExecInfo {
- uint64_t startupCost;
- uint64_t totalCost;
+ double startupCost;
+ double totalCost;
uint64_t numOfRows;
+ uint32_t verboseLen;
void* verboseInfo;
} SExplainExecInfo;
@@ -1221,6 +1233,18 @@ typedef struct {
SExplainExecInfo* subplanInfo;
} SExplainRsp;
+typedef struct STableScanAnalyzeInfo {
+ uint64_t totalRows;
+ uint64_t totalCheckedRows;
+ uint32_t totalBlocks;
+ uint32_t loadBlocks;
+ uint32_t loadBlockStatis;
+ uint32_t skipBlocks;
+ uint32_t filterOutBlocks;
+ double elapsedTime;
+ uint64_t filterTime;
+} STableScanAnalyzeInfo;
+
int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
int32_t tDeserializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
@@ -1260,7 +1284,6 @@ int32_t tSerializeSCreateDropMQSBNodeReq(void* buf, int32_t bufLen, SMCreateQnod
int32_t tDeserializeSCreateDropMQSBNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
typedef struct {
- int32_t dnodeId;
int8_t replica;
SReplica replicas[TSDB_MAX_REPLICA];
} SDCreateMnodeReq, SDAlterMnodeReq;
@@ -1436,8 +1459,10 @@ typedef struct {
int32_t code;
} STaskDropRsp;
-#define STREAM_TRIGGER_AT_ONCE 1
-#define STREAM_TRIGGER_WINDOW_CLOSE 2
+#define STREAM_TRIGGER_AT_ONCE_SMA 0
+#define STREAM_TRIGGER_AT_ONCE 1
+#define STREAM_TRIGGER_WINDOW_CLOSE 2
+#define STREAM_TRIGGER_WINDOW_CLOSE_SMA 3
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
@@ -1469,15 +1494,22 @@ typedef struct {
int64_t streamId;
} SMVCreateStreamRsp, SMSCreateStreamRsp;
+enum {
+ TOPIC_SUB_TYPE__DB = 1,
+ TOPIC_SUB_TYPE__TABLE,
+ TOPIC_SUB_TYPE__COLUMN,
+};
+
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic
int8_t igExists;
- int8_t withTbName;
- int8_t withSchema;
- int8_t withTag;
+ int8_t subType;
char* sql;
- char* ast;
- char subscribeDbName[TSDB_DB_NAME_LEN];
+ char subDbName[TSDB_DB_FNAME_LEN];
+ union {
+ char* ast;
+ char subStbName[TSDB_TABLE_FNAME_LEN];
+ };
} SCMCreateTopicReq;
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
@@ -1633,8 +1665,8 @@ _err:
return NULL;
}
-// this message is sent from mnode to mnode(read thread to write thread), so there is no need for serialization or
-// deserialization
+// this message is sent from mnode to mnode(read thread to write thread),
+// so there is no need for serialization or deserialization
typedef struct {
SHashObj* rebSubHash; // SHashObj
} SMqDoRebalanceMsg;
@@ -1660,6 +1692,10 @@ typedef struct {
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
+typedef struct {
+ int8_t reserved;
+} SMDropCgroupRsp;
+
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;
@@ -1700,7 +1736,7 @@ typedef struct SVCreateStbReq {
char* name;
tb_uid_t suid;
int8_t rollup;
- SSchemaWrapper schema;
+ SSchemaWrapper schemaRow;
SSchemaWrapper schemaTag;
SRSmaParam pRSmaParam;
} SVCreateStbReq;
@@ -1721,9 +1757,9 @@ int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq);
#define TD_CREATE_IF_NOT_EXISTS 0x1
typedef struct SVCreateTbReq {
int32_t flags;
+ char* name;
tb_uid_t uid;
int64_t ctime;
- char* name;
int32_t ttl;
int8_t type;
union {
@@ -1732,7 +1768,7 @@ typedef struct SVCreateTbReq {
uint8_t* pTag;
} ctb;
struct {
- SSchemaWrapper schema;
+ SSchemaWrapper schemaRow;
} ntb;
};
} SVCreateTbReq;
@@ -1740,6 +1776,15 @@ typedef struct SVCreateTbReq {
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
+static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
+ taosMemoryFreeClear(req->name);
+ if (req->type == TSDB_CHILD_TABLE) {
+ taosMemoryFreeClear(req->ctb.pTag);
+ } else if (req->type == TSDB_NORMAL_TABLE) {
+ taosMemoryFreeClear(req->ntb.schemaRow.pSchema);
+ }
+}
+
typedef struct {
int32_t nReqs;
union {
@@ -1837,7 +1882,8 @@ int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
typedef struct {
- int32_t code;
+ int32_t code;
+ STableMetaRsp* pMeta;
} SVAlterTbRsp;
int32_t tEncodeSVAlterTbRsp(SEncoder* pEncoder, const SVAlterTbRsp* pRsp);
@@ -1930,6 +1976,7 @@ typedef struct {
int8_t killConnection;
int8_t align[3];
SEpSet epSet;
+ SArray* pQnodeList;
} SQueryHbRspBasic;
typedef struct {
@@ -2009,7 +2056,10 @@ static FORCE_INLINE void tFreeClientKv(void* pKv) {
static FORCE_INLINE void tFreeClientHbRsp(void* pRsp) {
SClientHbRsp* rsp = (SClientHbRsp*)pRsp;
- taosMemoryFreeClear(rsp->query);
+ if (rsp->query) {
+ taosArrayDestroy(rsp->query->pQnodeList);
+ taosMemoryFreeClear(rsp->query);
+ }
if (rsp->info) taosArrayDestroyEx(rsp->info, tFreeClientKv);
}
@@ -2137,11 +2187,6 @@ static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) {
return buf;
}
-enum {
- TOPIC_SUB_TYPE__DB = 1,
- TOPIC_SUB_TYPE__TABLE,
-};
-
typedef struct {
SMsgHead head;
int64_t leftForVer;
@@ -2161,10 +2206,8 @@ typedef struct {
int64_t newConsumerId;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t subType;
- int8_t withTbName;
- int8_t withSchema;
- int8_t withTag;
char* qmsg;
+ int64_t suid;
} SMqRebVgReq;
static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) {
@@ -2175,11 +2218,10 @@ static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pR
tlen += taosEncodeFixedI64(buf, pReq->newConsumerId);
tlen += taosEncodeString(buf, pReq->subKey);
tlen += taosEncodeFixedI8(buf, pReq->subType);
- tlen += taosEncodeFixedI8(buf, pReq->withTbName);
- tlen += taosEncodeFixedI8(buf, pReq->withSchema);
- tlen += taosEncodeFixedI8(buf, pReq->withTag);
- if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
tlen += taosEncodeString(buf, pReq->qmsg);
+ } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
+ tlen += taosEncodeFixedI64(buf, pReq->suid);
}
return tlen;
}
@@ -2191,11 +2233,10 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq)
buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
buf = taosDecodeStringTo(buf, pReq->subKey);
buf = taosDecodeFixedI8(buf, &pReq->subType);
- buf = taosDecodeFixedI8(buf, &pReq->withTbName);
- buf = taosDecodeFixedI8(buf, &pReq->withSchema);
- buf = taosDecodeFixedI8(buf, &pReq->withTag);
- if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
buf = taosDecodeString(buf, &pReq->qmsg);
+ } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
+ buf = taosDecodeFixedI64(buf, &pReq->suid);
}
return (void*)buf;
}
@@ -2259,6 +2300,7 @@ typedef struct {
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
int8_t timezoneInt; // sma data expired if timezone changes.
+ int32_t dstVgId;
char indexName[TSDB_INDEX_NAME_LEN];
int32_t exprLen;
int32_t tagsFilterLen;
@@ -2305,19 +2347,19 @@ typedef struct {
STSma* tSma;
} STSmaWrapper;
-static FORCE_INLINE void tdDestroyTSma(STSma* pSma) {
+static FORCE_INLINE void tDestroyTSma(STSma* pSma) {
if (pSma) {
taosMemoryFreeClear(pSma->expr);
taosMemoryFreeClear(pSma->tagsFilter);
}
}
-static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) {
+static FORCE_INLINE void tDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) {
if (pSW) {
if (pSW->tSma) {
if (deepCopy) {
for (uint32_t i = 0; i < pSW->number; ++i) {
- tdDestroyTSma(pSW->tSma + i);
+ tDestroyTSma(pSW->tSma + i);
}
}
taosMemoryFreeClear(pSW->tSma);
@@ -2325,8 +2367,8 @@ static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy)
}
}
-static FORCE_INLINE void* tdFreeTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) {
- tdDestroyTSmaWrapper(pSW, deepCopy);
+static FORCE_INLINE void* tFreeTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) {
+ tDestroyTSmaWrapper(pSW, deepCopy);
taosMemoryFreeClear(pSW);
return NULL;
}
@@ -2353,6 +2395,17 @@ static int32_t tDecodeTSmaWrapper(SDecoder* pDecoder, STSmaWrapper* pReq) {
return 0;
}
+typedef struct {
+ int64_t tsmaIndexUid;
+ STimeWindow queryWindow;
+} SVGetTsmaExpWndsReq;
+
+typedef struct {
+ int64_t tsmaIndexUid;
+ int32_t numExpWnds;
+ TSKEY* expWndsStartTs;
+} SVGetTsmaExpWndsRsp;
+
typedef struct {
int idx;
} SMCreateFullTextReq;
@@ -2401,7 +2454,7 @@ typedef struct {
int32_t epoch;
uint64_t reqId;
int64_t consumerId;
- int64_t waitTime;
+ int64_t timeout;
int64_t currentOffset;
} SMqPollReq;
@@ -2428,7 +2481,7 @@ static FORCE_INLINE void* tDecodeSMqSubVgEp(void* buf, SMqSubVgEp* pVgEp) {
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
- int8_t isSchemaAdaptive;
+ char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray
SSchemaWrapper schema;
} SMqSubTopicEp;
@@ -2436,7 +2489,7 @@ typedef struct {
static FORCE_INLINE int32_t tEncodeSMqSubTopicEp(void** buf, const SMqSubTopicEp* pTopicEp) {
int32_t tlen = 0;
tlen += taosEncodeString(buf, pTopicEp->topic);
- tlen += taosEncodeFixedI8(buf, pTopicEp->isSchemaAdaptive);
+ tlen += taosEncodeString(buf, pTopicEp->db);
int32_t sz = taosArrayGetSize(pTopicEp->vgs);
tlen += taosEncodeFixedI32(buf, sz);
for (int32_t i = 0; i < sz; i++) {
@@ -2449,7 +2502,7 @@ static FORCE_INLINE int32_t tEncodeSMqSubTopicEp(void** buf, const SMqSubTopicEp
static FORCE_INLINE void* tDecodeSMqSubTopicEp(void* buf, SMqSubTopicEp* pTopicEp) {
buf = taosDecodeStringTo(buf, pTopicEp->topic);
- buf = taosDecodeFixedI8(buf, &pTopicEp->isSchemaAdaptive);
+ buf = taosDecodeStringTo(buf, pTopicEp->db);
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
pTopicEp->vgs = taosArrayInit(sz, sizeof(SMqSubVgEp));
@@ -2527,6 +2580,12 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p
buf = taosDecodeFixedI8(buf, &pRsp->withTbName);
buf = taosDecodeFixedI8(buf, &pRsp->withSchema);
buf = taosDecodeFixedI8(buf, &pRsp->withTag);
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
+ }
for (int32_t i = 0; i < pRsp->blockNum; i++) {
int32_t bLen = 0;
@@ -2536,20 +2595,14 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p
taosArrayPush(pRsp->blockDataLen, &bLen);
taosArrayPush(pRsp->blockData, &data);
if (pRsp->withSchema) {
- pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
buf = taosDecodeSSchemaWrapper(buf, pSW);
taosArrayPush(pRsp->blockSchema, &pSW);
- } else {
- pRsp->blockSchema = NULL;
}
if (pRsp->withTbName) {
- pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
char* name = NULL;
buf = taosDecodeString(buf, &name);
taosArrayPush(pRsp->blockTbName, &name);
- } else {
- pRsp->blockTbName = NULL;
}
}
}
@@ -2616,6 +2669,23 @@ typedef struct {
int32_t tEncodeSVSubmitReq(SEncoder* pCoder, const SVSubmitReq* pReq);
int32_t tDecodeSVSubmitReq(SDecoder* pCoder, SVSubmitReq* pReq);
+// TDMT_VND_DELETE
+typedef struct {
+ TSKEY sKey;
+ TSKEY eKey;
+
+ // super table
+ char* stbName;
+
+ // child/normal
+ char* tbName;
+} SVDeleteReq;
+
+typedef struct {
+ int32_t code;
+ // TODO
+} SVDeleteRsp;
+
#pragma pack(pop)
#ifdef __cplusplus
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index e8e931daa50292f333b3c56cff0983ed09bb3638..860674b34361a42bb8b6ef2267d32b191b1c78d0 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -150,6 +150,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "mnode-mq-consumer-lost", SMqConsumerLostMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "mnode-mq-consumer-recover", SMqConsumerRecoverMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "mnode-mq-do-rebalance", SMqDoRebalanceMsg, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "mnode-mq-drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-mq-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "mnode-create-stream", SCMCreateStreamReq, SCMCreateStreamRsp)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "mnode-alter-stream", NULL, NULL)
@@ -180,8 +181,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_RES_READY, "vnode-res-ready", NULL, NULL)
- TD_DEF_MSG_TYPE(TDMT_VND_TASKS_STATUS, "vnode-tasks-status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
@@ -194,11 +193,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_EXPLAIN, "vnode-explain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBSCRIBE, "vnode-subscribe", SMVSubscribeReq, SMVSubscribeRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqCVConsumeReq, SMqCVConsumeRsp)
+ TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_DEPLOY, "vnode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_TASK_PIPE_EXEC, "vnode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_TASK_MERGE_EXEC, "vnode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL)
@@ -209,6 +205,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
+ TD_DEF_MSG_TYPE(TDMT_VND_GET_TSMA_EXP_WNDS, "vnode-get-tsma-expired-windows", SVGetTsmaExpWndsReq, SVGetTsmaExpWndsRsp)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL)
@@ -225,9 +222,11 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_APPLY_MSG, "vnode-sync-apply-msg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_CONFIG_CHANGE, "vnode-sync-config-change", NULL, NULL)
- TD_DEF_MSG_TYPE(TDMT_VND_SYNC_VNODE, "vnode-sync-vnode", NULL, NULL)
- TD_DEF_MSG_TYPE(TDMT_VND_ALTER_VNODE, "vnode-alter-vnode", NULL, NULL)
- TD_DEF_MSG_TYPE(TDMT_VND_COMPACT_VNODE, "vnode-compact-vnode", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "vnode-alter-config", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "vnode-alter-replica", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "vnode-compact", NULL, NULL)
+
+ TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "vnode-delete-data", SVDeleteReq, SVDeleteRsp)
// Requests handled by QNODE
TD_NEW_MSG_SEG(TDMT_QND_MSG)
@@ -235,9 +234,13 @@ enum {
// Requests handled by SNODE
TD_NEW_MSG_SEG(TDMT_SND_MSG)
TD_DEF_MSG_TYPE(TDMT_SND_TASK_DEPLOY, "snode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
- TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
- TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
- TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
+ //TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
+ //TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
+ //TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
+
+ TD_DEF_MSG_TYPE(TDMT_SND_TASK_RUN, "snode-stream-task-run", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SND_TASK_DISPATCH, "snode-stream-task-dispatch", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SND_TASK_RECOVER, "snode-stream-task-recover", NULL, NULL)
// Requests handled by SCHEDULER
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
@@ -252,6 +255,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MON_BM_INFO, "monitor-binfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MON_VM_LOAD, "monitor-vload", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MON_MM_LOAD, "monitor-mload", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MON_QM_LOAD, "monitor-qload", NULL, NULL)
#if defined(TD_MSG_NUMBER_)
TDMT_MAX
diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h
index 68199fa51997d657b9de180ce6773c759d51c5a9..c3b0e54f3da416ccc2e2b2dbd6c05ec356a50a30 100644
--- a/include/common/ttokendef.h
+++ b/include/common/ttokendef.h
@@ -93,40 +93,40 @@
#define TK_VGROUPS 75
#define TK_SINGLE_STABLE 76
#define TK_RETENTIONS 77
-#define TK_NK_COLON 78
-#define TK_TABLE 79
-#define TK_NK_LP 80
-#define TK_NK_RP 81
-#define TK_STABLE 82
-#define TK_ADD 83
-#define TK_COLUMN 84
-#define TK_MODIFY 85
-#define TK_RENAME 86
-#define TK_TAG 87
-#define TK_SET 88
-#define TK_NK_EQ 89
-#define TK_USING 90
-#define TK_TAGS 91
-#define TK_COMMENT 92
-#define TK_BOOL 93
-#define TK_TINYINT 94
-#define TK_SMALLINT 95
-#define TK_INT 96
-#define TK_INTEGER 97
-#define TK_BIGINT 98
-#define TK_FLOAT 99
-#define TK_DOUBLE 100
-#define TK_BINARY 101
-#define TK_TIMESTAMP 102
-#define TK_NCHAR 103
-#define TK_UNSIGNED 104
-#define TK_JSON 105
-#define TK_VARCHAR 106
-#define TK_MEDIUMBLOB 107
-#define TK_BLOB 108
-#define TK_VARBINARY 109
-#define TK_DECIMAL 110
-#define TK_DELAY 111
+#define TK_SCHEMALESS 78
+#define TK_NK_COLON 79
+#define TK_TABLE 80
+#define TK_NK_LP 81
+#define TK_NK_RP 82
+#define TK_STABLE 83
+#define TK_ADD 84
+#define TK_COLUMN 85
+#define TK_MODIFY 86
+#define TK_RENAME 87
+#define TK_TAG 88
+#define TK_SET 89
+#define TK_NK_EQ 90
+#define TK_USING 91
+#define TK_TAGS 92
+#define TK_COMMENT 93
+#define TK_BOOL 94
+#define TK_TINYINT 95
+#define TK_SMALLINT 96
+#define TK_INT 97
+#define TK_INTEGER 98
+#define TK_BIGINT 99
+#define TK_FLOAT 100
+#define TK_DOUBLE 101
+#define TK_BINARY 102
+#define TK_TIMESTAMP 103
+#define TK_NCHAR 104
+#define TK_UNSIGNED 105
+#define TK_JSON 106
+#define TK_VARCHAR 107
+#define TK_MEDIUMBLOB 108
+#define TK_BLOB 109
+#define TK_VARBINARY 110
+#define TK_DECIMAL 111
#define TK_FILE_FACTOR 112
#define TK_NK_FLOAT 113
#define TK_ROLLUP 114
@@ -161,8 +161,8 @@
#define TK_INTERVAL 143
#define TK_TOPIC 144
#define TK_AS 145
-#define TK_WITH 146
-#define TK_SCHEMA 147
+#define TK_CONSUMER 146
+#define TK_GROUP 147
#define TK_DESC 148
#define TK_DESCRIBE 149
#define TK_RESET 150
@@ -237,22 +237,21 @@
#define TK_PREV 219
#define TK_LINEAR 220
#define TK_NEXT 221
-#define TK_GROUP 222
-#define TK_HAVING 223
-#define TK_ORDER 224
-#define TK_SLIMIT 225
-#define TK_SOFFSET 226
-#define TK_LIMIT 227
-#define TK_OFFSET 228
-#define TK_ASC 229
-#define TK_NULLS 230
-#define TK_ID 231
-#define TK_NK_BITNOT 232
-#define TK_INSERT 233
-#define TK_VALUES 234
-#define TK_IMPORT 235
-#define TK_NK_SEMI 236
-#define TK_FILE 237
+#define TK_HAVING 222
+#define TK_ORDER 223
+#define TK_SLIMIT 224
+#define TK_SOFFSET 225
+#define TK_LIMIT 226
+#define TK_OFFSET 227
+#define TK_ASC 228
+#define TK_NULLS 229
+#define TK_ID 230
+#define TK_NK_BITNOT 231
+#define TK_INSERT 232
+#define TK_VALUES 233
+#define TK_IMPORT 234
+#define TK_NK_SEMI 235
+#define TK_FILE 236
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301
diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h
index f2c8c916c8b9704f69a8a0d6caaf214c2b34e7fd..ab090940f218abe745fff2bfea170c9b6abf9248 100644
--- a/include/dnode/mnode/mnode.h
+++ b/include/dnode/mnode/mnode.h
@@ -29,6 +29,8 @@ extern "C" {
typedef struct SMnode SMnode;
typedef struct {
+ int32_t dnodeId;
+ bool standby;
bool deploy;
int8_t replica;
int8_t selfIndex;
@@ -53,15 +55,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption);
*/
void mndClose(SMnode *pMnode);
-/**
- * @brief Close a mnode.
- *
- * @param pMnode The mnode object to close.
- * @param pOption Options of the mnode.
- * @return int32_t 0 for success, -1 for failure.
- */
-int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption);
-
/**
* @brief Start mnode
*
@@ -88,7 +81,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad);
* @param pMsg The request msg.
* @return int32_t 0 for success, -1 for failure.
*/
-int32_t mndProcessMsg(SRpcMsg *pMsg);
+int32_t mndProcessRpcMsg(SRpcMsg *pMsg);
int32_t mndProcessSyncMsg(SRpcMsg *pMsg);
/**
diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h
index 1ab101f705ac3f71fad134c200a22f903e4a8e86..7d342c4ba12fba1edb74cd7ce3d093e1dea037b3 100644
--- a/include/dnode/qnode/qnode.h
+++ b/include/dnode/qnode/qnode.h
@@ -25,17 +25,6 @@ extern "C" {
/* ------------------------ TYPES EXPOSED ------------------------ */
typedef struct SQnode SQnode;
-typedef struct {
- int64_t numOfStartTask;
- int64_t numOfStopTask;
- int64_t numOfRecvedFetch;
- int64_t numOfSentHb;
- int64_t numOfSentFetch;
- int64_t numOfTaskInQueue;
- int64_t numOfFetchInQueue;
- int64_t numOfErrors;
-} SQnodeLoad;
-
typedef struct {
SMsgCb msgCb;
} SQnodeOpt;
@@ -71,10 +60,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad);
* @param pQnode The qnode object.
* @param pMsg The request message
*/
-int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg);
+int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
-#endif /*_TD_QNODE_H_*/
\ No newline at end of file
+#endif /*_TD_QNODE_H_*/
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index e64fb4235cc6c9c765f7aff4285683b7d2d2cbbd..f0e642bc9af8060d0b6bc0380f2c85284f307642 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -52,23 +52,31 @@ typedef struct SUserAuthInfo {
AUTH_TYPE type;
} SUserAuthInfo;
+typedef struct SDbInfo {
+ int32_t vgVer;
+ int32_t tbNum;
+ int64_t dbId;
+} SDbInfo;
+
typedef struct SCatalogReq {
- SArray *pTableMeta; // element is SNAME
SArray *pDbVgroup; // element is db full name
+ SArray *pDbCfg; // element is db full name
+ SArray *pDbInfo; // element is db full name
+ SArray *pTableMeta; // element is SNAME
SArray *pTableHash; // element is SNAME
SArray *pUdf; // element is udf name
- SArray *pDbCfg; // element is db full name
SArray *pIndex; // element is index name
SArray *pUser; // element is SUserAuthInfo
bool qNodeRequired; // valid qnode
} SCatalogReq;
typedef struct SMetaData {
- SArray *pTableMeta; // SArray
SArray *pDbVgroup; // SArray*>
+ SArray *pDbCfg; // SArray
+ SArray *pDbInfo; // SArray
+ SArray *pTableMeta; // SArray
SArray *pTableHash; // SArray
SArray *pUdfList; // SArray
- SArray *pDbCfg; // SArray
SArray *pIndex; // SArray
SArray *pUser; // SArray
SArray *pQnodeList; // SArray
@@ -101,6 +109,7 @@ typedef struct SDbVgVersion {
typedef struct STbSVersion {
char* tbFName;
int32_t sver;
+ int32_t tver;
} STbSVersion;
typedef struct SUserAuthVersion {
@@ -174,7 +183,7 @@ int32_t catalogGetTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpSe
*/
int32_t catalogGetSTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta);
-int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
+int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
/**
@@ -248,6 +257,8 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, void * pTransporter, const
*/
int32_t catalogGetAllMeta(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, const SCatalogReq* pReq, SMetaData* pRsp);
+int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
+
int32_t catalogGetQnodeList(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, SArray* pQnodeList);
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableMetaVersion **stables, uint32_t *num);
@@ -266,6 +277,11 @@ int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
+int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet);
+
+
+int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId);
+
/**
* Destroy catalog and relase all resources
diff --git a/include/libs/command/command.h b/include/libs/command/command.h
index 0cd566ee464dc23d0af5288281448c98204e6a2e..aee6b837837d7b3d9e3cbf37cde21c7a626c1a4f 100644
--- a/include/libs/command/command.h
+++ b/include/libs/command/command.h
@@ -24,7 +24,7 @@ int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);
int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp);
-int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp);
+int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp);
void qExplainFreeCtx(SExplainCtx *pCtx);
diff --git a/include/libs/executor/dataSinkMgt.h b/include/libs/executor/dataSinkMgt.h
index 339743f153968a2ae6910ac68735bbf295925041..2cc9caca6fa4d8e4dd4bd6a8d7b490e7baaf2c34 100644
--- a/include/libs/executor/dataSinkMgt.h
+++ b/include/libs/executor/dataSinkMgt.h
@@ -32,6 +32,10 @@ extern "C" {
struct SDataSink;
struct SSDataBlock;
+typedef struct SDataSinkStat {
+ uint64_t cachedSize;
+} SDataSinkStat;
+
typedef struct SDataSinkMgtCfg {
uint32_t maxDataBlockNum; // todo: this should be numOfRows?
uint32_t maxDataBlockNumPerQuery;
@@ -62,6 +66,8 @@ typedef struct SOutputData {
*/
int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle);
+int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat);
+
/**
* Put the result set returned by the executor into datasinker.
* @param handle
@@ -88,6 +94,8 @@ void dsGetDataLength(DataSinkHandle handle, int32_t* pLen, bool* pQueryEnd);
*/
int32_t dsGetDataBlock(DataSinkHandle handle, SOutputData* pOutput);
+int32_t dsGetCacheSize(DataSinkHandle handle, uint64_t *pSize);
+
/**
* After dsGetStatus returns DS_NEED_SCHEDULE, the caller need to put this into the work queue.
* @param ahandle
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index 9cafb4ee04543f1978f68c982a5208fcde2c25a4..288248422b8288b98d8f0fccaef040186294cb76 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -61,7 +61,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle);
* @param type
* @return
*/
-int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type);
+int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid);
/**
* Set multiple input data blocks for the stream scan.
@@ -71,7 +71,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type);
* @param type
* @return
*/
-int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
+int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid);
/**
* Update the table id list, add or remove.
@@ -156,18 +156,6 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo);
*/
int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList);
-/**
- * Create the table group according to the group by tags info
- * @param pTableIdList
- * @param skey
- * @param groupInfo
- * @param groupByIndex
- * @param numOfIndex
- * @return
- */
-// int32_t qCreateTableGroupByGroupExpr(SArray* pTableIdList, TSKEY skey, STableGroupInfo groupInfo, SColIndex*
-// groupByIndex, int32_t numOfIndex);
-
/**
* Update the table id list of a given query.
* @param uid child table uid
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index 7d3e969c4119cc2e4eaf140188e0f85ee62bcc6e..e8cb363e08fa65385d36762face331f5de5cf1eb 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -39,6 +39,7 @@ typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInf
typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx);
typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
+typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx);
typedef struct SScalarFuncExecFuncs {
FExecGetEnv getEnv;
@@ -50,6 +51,7 @@ typedef struct SFuncExecFuncs {
FExecInit init;
FExecProcess process;
FExecFinalize finalize;
+ FExecCombine combine;
} SFuncExecFuncs;
typedef struct SFileBlockInfo {
@@ -59,56 +61,9 @@ typedef struct SFileBlockInfo {
#define TSDB_BLOCK_DIST_STEP_ROWS 8
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
-#define FUNCTION_TYPE_SCALAR 1
-#define FUNCTION_TYPE_AGG 2
-
#define TOP_BOTTOM_QUERY_LIMIT 100
#define FUNCTIONS_NAME_MAX_LENGTH 16
-#define FUNCTION_INVALID_ID -1
-#define FUNCTION_COUNT 0
-#define FUNCTION_SUM 1
-#define FUNCTION_AVG 2
-#define FUNCTION_MIN 3
-#define FUNCTION_MAX 4
-#define FUNCTION_STDDEV 5
-#define FUNCTION_PERCT 6
-#define FUNCTION_APERCT 7
-#define FUNCTION_FIRST 8
-#define FUNCTION_LAST 9
-#define FUNCTION_LAST_ROW 10
-#define FUNCTION_TOP 11
-#define FUNCTION_BOTTOM 12
-#define FUNCTION_SPREAD 13
-#define FUNCTION_TWA 14
-#define FUNCTION_LEASTSQR 15
-
-#define FUNCTION_TS 16
-#define FUNCTION_TS_DUMMY 17
-#define FUNCTION_TAG_DUMMY 18
-#define FUNCTION_TS_COMP 19
-
-#define FUNCTION_TAG 20
-#define FUNCTION_PRJ 21
-
-#define FUNCTION_TAGPRJ 22
-#define FUNCTION_ARITHM 23
-#define FUNCTION_DIFF 24
-
-#define FUNCTION_FIRST_DST 25
-#define FUNCTION_LAST_DST 26
-#define FUNCTION_STDDEV_DST 27
-#define FUNCTION_INTERP 28
-
-#define FUNCTION_RATE 29
-#define FUNCTION_IRATE 30
-#define FUNCTION_TID_TAG 31
-#define FUNCTION_DERIVATIVE 32
-#define FUNCTION_BLKINFO 33
-
-
-#define FUNCTION_COV 38
-
typedef struct SResultRowEntryInfo {
bool initialized:1; // output buffer has been initialized
bool complete:1; // query has completed
@@ -178,10 +133,9 @@ typedef struct SqlFunctionCtx {
char *pOutput; // final result output buffer, point to sdata->data
int32_t numOfParams;
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
- int64_t *ptsList; // corresponding timestamp array list
+ int64_t *ptsList; // corresponding timestamp array list, todo remove it
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
int32_t offset;
- SVariant tag;
struct SResultRowEntryInfo *resultInfo;
SSubsidiaryResInfo subsidiaries;
SPoint1 start;
@@ -208,9 +162,6 @@ enum {
typedef struct tExprNode {
int32_t nodeType;
union {
- SSchema *pSchema;// column node
- struct SVariant *pVal; // value node
-
struct {// function node
char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor
int32_t functionId;
@@ -253,47 +204,23 @@ struct SScalarParam {
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength,
bool isSuperTable);
-bool qIsValidUdf(SArray* pUdfInfo, const char* name, int32_t len, int32_t* functionId);
-
void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num);
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);
bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
-///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-// fill api
-struct SFillInfo;
-struct SFillColInfo;
-
typedef struct SPoint {
int64_t key;
void * val;
} SPoint;
-//void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
-//void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
-//void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
-//struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const SValueNode* val);
-//bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
-//
-//struct SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
-// SInterval* pInterval, int32_t fillType,
-// struct SFillColInfo* pCol, const char* id);
-//
-//void* taosDestroyFillInfo(struct SFillInfo *pFillInfo);
-//int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, void** output, int32_t capacity);
-//int64_t getFillInfoStart(struct SFillInfo *pFillInfo);
-
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// udf api
struct SUdfInfo;
-void qAddUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
-void qRemoveUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
-
/**
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
* @return error code
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 3d86adb573cd27dfce3b93409b96a11b47b7aaf5..f3e28936afc1b1556502eacd08f6b1e699abc198 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -23,6 +23,9 @@ extern "C" {
#include "function.h"
#include "querynodes.h"
+#define FUNC_AGGREGATE_UDF_ID 5001
+#define FUNC_SCALAR_UDF_ID 5002
+
typedef enum EFunctionType {
// aggregate function
FUNCTION_TYPE_APERCENTILE = 1,
@@ -126,21 +129,12 @@ typedef enum EFunctionType {
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
struct STimeWindow;
-struct SCatalog;
-
-typedef struct SFmGetFuncInfoParam {
- struct SCatalog* pCtg;
- void* pRpc;
- const SEpSet* pMgmtEps;
- char* pErrBuf;
- int32_t errBufLen;
-} SFmGetFuncInfoParam;
int32_t fmFuncMgtInit();
void fmFuncMgtDestroy();
-int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
+int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
bool fmIsBuiltinFunc(const char* pFunc);
@@ -162,6 +156,9 @@ bool fmIsDynamicScanOptimizedFunc(int32_t funcId);
bool fmIsMultiResFunc(int32_t funcId);
bool fmIsRepeatScanFunc(int32_t funcId);
bool fmIsUserDefinedFunc(int32_t funcId);
+bool fmIsDistExecFunc(int32_t funcId);
+
+int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
typedef enum EFuncDataRequired {
FUNC_DATA_REQUIRED_DATA_LOAD = 1,
diff --git a/include/libs/index/index.h b/include/libs/index/index.h
index 05db99db0f199169ce71e4a76d56899361aa403b..180c7e7216153f0cdfd5b4240de89bc586fd9b88 100644
--- a/include/libs/index/index.h
+++ b/include/libs/index/index.h
@@ -192,11 +192,17 @@ void indexTermDestroy(SIndexTerm* p);
void indexInit();
/* index filter */
+typedef struct SIndexMetaArg {
+ void* metaHandle;
+ void* metaEx;
+ uint64_t suid;
+} SIndexMetaArg;
+
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode);
-int32_t doFilterTag(const SNode* pFilterNode, SArray* result);
+int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result);
/*
* destory index env
*
diff --git a/include/libs/monitor/monitor.h b/include/libs/monitor/monitor.h
index 9d8cf61b0646c764cee7056152f7873caa61b14f..39e8042b931ecbee48fbe389ab1160c613636f28 100644
--- a/include/libs/monitor/monitor.h
+++ b/include/libs/monitor/monitor.h
@@ -171,6 +171,7 @@ void tFreeSMonVmInfo(SMonVmInfo *pInfo);
typedef struct {
SMonSysInfo sys;
SMonLogs log;
+ SQnodeLoad load;
} SMonQmInfo;
int32_t tSerializeSMonQmInfo(void *buf, int32_t bufLen, SMonQmInfo *pInfo);
@@ -210,6 +211,10 @@ typedef struct {
int32_t tSerializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInfo);
int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInfo);
+int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo);
+int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo);
+
+
typedef struct {
const char *server;
uint16_t port;
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 82bf4e1f45a0cab5c7f1b61d04e08d137148e44d..82924bef3f206911b803ace70ea15435dc29e882 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -50,6 +50,7 @@ typedef struct SDatabaseOptions {
int32_t numOfVgroups;
int8_t singleStable;
SNodeList* pRetentions;
+ int8_t schemaless;
} SDatabaseOptions;
typedef struct SCreateDatabaseStmt {
@@ -79,8 +80,7 @@ typedef struct SAlterDatabaseStmt {
typedef struct STableOptions {
ENodeType type;
char comment[TSDB_TB_COMMENT_LEN];
- int32_t delay;
- float filesFactor;
+ double filesFactor;
SNodeList* pRollupFuncs;
int32_t ttl;
SNodeList* pSma;
@@ -238,20 +238,13 @@ typedef struct SDropComponentNodeStmt {
int32_t dnodeId;
} SDropComponentNodeStmt;
-typedef struct STopicOptions {
- ENodeType type;
- bool withTable;
- bool withSchema;
- bool withTag;
-} STopicOptions;
-
typedef struct SCreateTopicStmt {
- ENodeType type;
- char topicName[TSDB_TABLE_NAME_LEN];
- char subscribeDbName[TSDB_DB_NAME_LEN];
- bool ignoreExists;
- SNode* pQuery;
- STopicOptions* pOptions;
+ ENodeType type;
+ char topicName[TSDB_TABLE_NAME_LEN];
+ char subDbName[TSDB_DB_NAME_LEN];
+ char subSTbName[TSDB_TABLE_NAME_LEN];
+ bool ignoreExists;
+ SNode* pQuery;
} SCreateTopicStmt;
typedef struct SDropTopicStmt {
@@ -260,6 +253,13 @@ typedef struct SDropTopicStmt {
bool ignoreNotExists;
} SDropTopicStmt;
+typedef struct SDropCGroupStmt {
+ ENodeType type;
+ char topicName[TSDB_TABLE_NAME_LEN];
+ char cgroup[TSDB_CGROUP_LEN];
+ bool ignoreNotExists;
+} SDropCGroupStmt;
+
typedef struct SAlterLocalStmt {
ENodeType type;
char config[TSDB_DNODE_CONFIG_LEN];
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index b9cb708c9c172fc522cfef3f7c41bdbd46149cae..15e5e14e41e2eb3b7bc0036932102d42cefecf79 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -95,7 +95,6 @@ typedef enum ENodeType {
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
- QUERY_NODE_TOPIC_OPTIONS,
QUERY_NODE_LEFT_VALUE,
// Statement nodes are used in parser and planner module.
@@ -131,6 +130,7 @@ typedef enum ENodeType {
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
+ QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
@@ -189,6 +189,7 @@ typedef enum ENodeType {
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIF,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
+ QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
@@ -206,12 +207,16 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_JOIN,
QUERY_NODE_PHYSICAL_PLAN_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
+ QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
@@ -242,7 +247,6 @@ typedef struct SNodeList {
#define SNodeptr void*
-int32_t nodesNodeSize(ENodeType type);
SNodeptr nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNodeptr pNode);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 6c4d14ffa10be13974e4651868fda955e41cebb7..eb37316402538a1c89c7f3c7dbf0fa21b57843c0 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -31,6 +31,7 @@ typedef struct SLogicNode {
SNodeList* pChildren;
struct SLogicNode* pParent;
int32_t optimizedFlag;
+ uint8_t precision;
} SLogicNode;
typedef enum EScanType { SCAN_TYPE_TAG = 1, SCAN_TYPE_TABLE, SCAN_TYPE_SYSTEM_TABLE, SCAN_TYPE_STREAM } EScanType;
@@ -55,12 +56,17 @@ typedef struct SScanLogicNode {
int8_t intervalUnit;
int8_t slidingUnit;
SNode* pTagCond;
+ int8_t triggerType;
+ int64_t watermark;
+ int16_t tsColId;
+ double filesFactor;
} SScanLogicNode;
typedef struct SJoinLogicNode {
SLogicNode node;
EJoinType joinType;
SNode* pOnConditions;
+ bool isSingleTableJoin;
} SJoinLogicNode;
typedef struct SAggLogicNode {
@@ -89,25 +95,39 @@ typedef struct SVnodeModifLogicNode {
typedef struct SExchangeLogicNode {
SLogicNode node;
int32_t srcGroupId;
- uint8_t precision;
} SExchangeLogicNode;
+typedef struct SMergeLogicNode {
+ SLogicNode node;
+ SNodeList* pMergeKeys;
+ int32_t numOfChannels;
+ int32_t srcGroupId;
+} SMergeLogicNode;
+
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType;
+typedef enum EStreamIntervalAlgorithm {
+ STREAM_INTERVAL_ALGO_FINAL = 1,
+ STREAM_INTERVAL_ALGO_SEMI,
+ STREAM_INTERVAL_ALGO_SINGLE
+} EStreamIntervalAlgorithm;
+
typedef struct SWindowLogicNode {
- SLogicNode node;
- EWindowType winType;
- SNodeList* pFuncs;
- int64_t interval;
- int64_t offset;
- int64_t sliding;
- int8_t intervalUnit;
- int8_t slidingUnit;
- int64_t sessionGap;
- SNode* pTspk;
- SNode* pStateExpr;
- int8_t triggerType;
- int64_t watermark;
+ SLogicNode node;
+ EWindowType winType;
+ SNodeList* pFuncs;
+ int64_t interval;
+ int64_t offset;
+ int64_t sliding;
+ int8_t intervalUnit;
+ int8_t slidingUnit;
+ int64_t sessionGap;
+ SNode* pTspk;
+ SNode* pStateExpr;
+ int8_t triggerType;
+ int64_t watermark;
+ double filesFactor;
+ EStreamIntervalAlgorithm stmInterAlgo;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@@ -214,6 +234,10 @@ typedef struct STableScanPhysiNode {
int64_t sliding;
int8_t intervalUnit;
int8_t slidingUnit;
+ int8_t triggerType;
+ int64_t watermark;
+ int16_t tsColId;
+ double filesFactor;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
@@ -257,6 +281,13 @@ typedef struct SExchangePhysiNode {
SNodeList* pSrcEndPoints; // element is SDownstreamSource, scheduler fill by calling qSetSuplanExecutionNode
} SExchangePhysiNode;
+typedef struct SMergePhysiNode {
+ SPhysiNode node;
+ SNodeList* pMergeKeys;
+ int32_t numOfChannels;
+ int32_t srcGroupId;
+} SMergePhysiNode;
+
typedef struct SWinodwPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of parameter expression of function
@@ -264,6 +295,7 @@ typedef struct SWinodwPhysiNode {
SNode* pTspk; // timestamp primary key
int8_t triggerType;
int64_t watermark;
+ double filesFactor;
} SWinodwPhysiNode;
typedef struct SIntervalPhysiNode {
@@ -276,6 +308,8 @@ typedef struct SIntervalPhysiNode {
} SIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamIntervalPhysiNode;
+typedef SIntervalPhysiNode SStreamFinalIntervalPhysiNode;
+typedef SIntervalPhysiNode SStreamSemiIntervalPhysiNode;
typedef struct SFillPhysiNode {
SPhysiNode node;
@@ -296,6 +330,8 @@ typedef struct SSessionWinodwPhysiNode {
int64_t gap;
} SSessionWinodwPhysiNode;
+typedef SSessionWinodwPhysiNode SStreamSessionWinodwPhysiNode;
+
typedef struct SStateWinodwPhysiNode {
SWinodwPhysiNode window;
SNode* pStateKey;
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 606c0acd5b1261aae72dc52b041d35d9dbdb3933..e4af78892baaf3757ab58be41fec776e2cb7186f 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -132,6 +132,7 @@ typedef struct STableNode {
char tableName[TSDB_TABLE_NAME_LEN];
char tableAlias[TSDB_TABLE_NAME_LEN];
uint8_t precision;
+ bool singleTable;
} STableNode;
struct STableMeta;
@@ -242,6 +243,8 @@ typedef struct SSelectStmt {
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool hasIndefiniteRowsFunc;
+ bool hasSelectFunc;
+ bool hasSelectValFunc;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@@ -319,21 +322,22 @@ typedef enum EQueryExecMode {
} EQueryExecMode;
typedef struct SQuery {
- ENodeType type;
- EQueryExecMode execMode;
- bool haveResultSet;
- SNode* pRoot;
- int32_t numOfResCols;
- SSchema* pResSchema;
- int8_t precision;
- SCmdMsgInfo* pCmdMsg;
- int32_t msgType;
- SArray* pDbList;
- SArray* pTableList;
- bool showRewrite;
- int32_t placeholderNum;
- SArray* pPlaceholderValues;
- SNode* pPrepareRoot;
+ ENodeType type;
+ EQueryExecMode execMode;
+ bool haveResultSet;
+ SNode* pRoot;
+ int32_t numOfResCols;
+ SSchema* pResSchema;
+ int8_t precision;
+ SCmdMsgInfo* pCmdMsg;
+ int32_t msgType;
+ SArray* pTableList;
+ SArray* pDbList;
+ bool showRewrite;
+ int32_t placeholderNum;
+ SArray* pPlaceholderValues;
+ SNode* pPrepareRoot;
+ struct SParseMetaCache* pMetaCache;
} SQuery;
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 2d8fd9a93cadc1275e937ade9a8b859dcebe7dc9..6abd1ffa6d57834b2d36b72071001019276f5e99 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -23,6 +23,9 @@ extern "C" {
#include "query.h"
#include "querynodes.h"
+struct SCatalogReq;
+struct SMetaData;
+
typedef struct SStmtCallback {
TAOS_STMT* pStmt;
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
@@ -45,14 +48,22 @@ typedef struct SParseContext {
SStmtCallback* pStmtCb;
const char* pUser;
bool isSuperUser;
+ bool async;
+ int8_t schemalessType;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
-bool isInsertSql(const char* pStr, size_t length);
+bool qIsInsertSql(const char* pStr, size_t length);
+
+// for async mode
+int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
+int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
+ const struct SMetaData* pMetaData, SQuery* pQuery);
void qDestroyQuery(SQuery* pQueryNode);
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
+int32_t qSetSTableIdForRSma(SNode* pStmt, int64_t uid);
int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash);
int32_t qResetStmtDataBlock(void* block, bool keepBuf);
@@ -67,8 +78,8 @@ int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t colIdx,
int32_t rowNum);
-int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD** fields);
-int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD** fields);
+int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
+int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tName, TAOS_MULTI_BIND* bind,
char* msgBuf, int32_t msgBufLen);
void destroyBoundColumnInfo(void* pBoundInfo);
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index c4f71e57a8174c62cf331e4afec35604786282a0..af30ec4c6bf7d657dfdec1af49f871eed38b53d7 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -36,6 +36,7 @@ typedef struct SPlanContext {
int64_t watermark;
char* pMsg;
int32_t msgLen;
+ double filesFactor;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 68a1e08f518f5c5e230076cd56344ea1161804cb..45a7e9a29f3457a68e9998659237a9e0d70d39ab 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -43,6 +43,12 @@ typedef enum {
TASK_TYPE_TEMP,
} ETaskType;
+typedef enum {
+ TARGET_TYPE_MNODE = 1,
+ TARGET_TYPE_VNODE,
+ TARGET_TYPE_OTHER,
+} ETargetType;
+
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
uint8_t precision; // the number of precision
@@ -50,6 +56,11 @@ typedef struct STableComInfo {
int32_t rowSize; // row size of the schema
} STableComInfo;
+typedef struct SQueryExecRes {
+ int32_t msgType;
+ void* res;
+} SQueryExecRes;
+
typedef struct SIndexMeta {
#ifdef WINDOWS
size_t avoidCompilationErrors;
@@ -126,11 +137,18 @@ typedef struct SDataBuf {
void* handle;
} SDataBuf;
+typedef struct STargetInfo {
+ ETargetType type;
+ char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset
+ int32_t vgId;
+} STargetInfo;
+
typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code);
typedef int32_t (*__async_exec_fn_t)(void* param);
typedef struct SMsgSendInfo {
__async_send_cb_fn_t fp; // async callback function
+ STargetInfo target; // for update epset
void* param;
uint64_t requestId;
uint64_t requestObjRefId;
@@ -179,8 +197,9 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STabl
char* jobTaskStatusStr(int32_t status);
SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name);
+void destroyQueryExecRes(SQueryExecRes* pRes);
-extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen);
+extern int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t));
extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize);
#define SET_META_TYPE_NULL(t) (t) = META_TYPE_NULL_TABLE
@@ -191,7 +210,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \
(_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \
- (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED)
+ (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code == TSDB_CODE_PAR_VALUE_TOO_LONG) || \
+ (_code == TSDB_CODE_PAR_INVALID_DROP_COL))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h
index 0846841cef1b509edf2ccc189bf9e81453169aa1..91cf975a56660cd13a9fac992cb59c79bd2362b4 100644
--- a/include/libs/qworker/qworker.h
+++ b/include/libs/qworker/qworker.h
@@ -22,7 +22,7 @@ extern "C" {
#include "tmsgcb.h"
#include "trpc.h"
-
+#include "executor.h"
enum {
NODE_TYPE_VNODE = 1,
@@ -40,44 +40,42 @@ typedef struct SQWorkerCfg {
} SQWorkerCfg;
typedef struct {
- uint64_t numOfStartTask;
- uint64_t numOfStopTask;
- uint64_t numOfRecvedFetch;
- uint64_t numOfSentHb;
- uint64_t numOfSentFetch;
- uint64_t numOfTaskInQueue;
+ uint64_t cacheDataSize;
+
+ uint64_t queryProcessed;
+ uint64_t cqueryProcessed;
+ uint64_t fetchProcessed;
+ uint64_t dropProcessed;
+ uint64_t hbProcessed;
+
+ uint64_t numOfQueryInQueue;
uint64_t numOfFetchInQueue;
+ uint64_t timeInQueryQueue;
+ uint64_t timeInFetchQueue;
+
uint64_t numOfErrors;
} SQWorkerStat;
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
-int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
-
-int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
-
-int32_t qWorkerProcessDataSinkMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
-
-int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
-
-int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
-
-int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
void qWorkerDestroy(void **qWorkerMgmt);
+int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h
index dcd058a293f0a35080335b30b38e32a792c43a74..331b78769029fb97764f81ce6bb646f28854918a 100644
--- a/include/libs/scheduler/scheduler.h
+++ b/include/libs/scheduler/scheduler.h
@@ -23,6 +23,8 @@ extern "C" {
#include "catalog.h"
#include "planner.h"
+extern tsem_t schdRspSem;
+
typedef struct SSchedulerCfg {
uint32_t maxJobNum;
int32_t maxNodeTableNum;
@@ -54,9 +56,7 @@ typedef struct SQueryProfileSummary {
typedef struct SQueryResult {
int32_t code;
uint64_t numOfRows;
- int32_t msgSize;
- char *msg;
- void *res;
+ SQueryExecRes res;
} SQueryResult;
typedef struct STaskInfo {
@@ -64,6 +64,15 @@ typedef struct STaskInfo {
SSubQueryMsg *msg;
} STaskInfo;
+typedef struct SSchdFetchParam {
+ void **pData;
+ int32_t* code;
+} SSchdFetchParam;
+
+typedef void (*schedulerExecCallback)(SQueryResult* pResult, void* param, int32_t code);
+typedef void (*schedulerFetchCallback)(void* pResult, void* param, int32_t code);
+
+
int32_t schedulerInit(SSchedulerCfg *cfg);
/**
@@ -80,7 +89,8 @@ int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, in
* @param pNodeList Qnode/Vnode address list, element is SQueryNodeAddr
* @return
*/
-int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan* pDag, const char* sql, int64_t *pJob);
+ int32_t schedulerAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
+ int64_t startTs, schedulerExecCallback fp, void* param);
/**
* Fetch query result from the remote query executor
@@ -90,6 +100,8 @@ int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan* pD
*/
int32_t schedulerFetchRows(int64_t job, void **data);
+int32_t schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param);
+
int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub);
@@ -108,23 +120,8 @@ void schedulerFreeJob(int64_t job);
void schedulerDestroy(void);
-/**
- * convert dag to task list
- * @param pDag
- * @param pTasks SArray**
- * @return
- */
-int32_t schedulerConvertDagToTaskList(SQueryPlan* pDag, SArray **pTasks);
-
-/**
- * make one task info's multiple copies
- * @param src
- * @param dst SArray**
- * @return
- */
-int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum);
-
-void schedulerFreeTaskList(SArray *taskList);
+void schdExecCallback(SQueryResult* pResult, void* param, int32_t code);
+void schdFetchCallback(void* pResult, void* param, int32_t code);
#ifdef __cplusplus
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index d18f609d543e375eee495f0516aa93a25c649653..f7ad7b4ed8dcecb65bec074480e36226f583727b 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -61,11 +61,8 @@ enum {
};
typedef struct {
- int8_t type;
-
- int32_t sourceVg;
- int64_t sourceVer;
-
+ int8_t type;
+ int64_t ver;
int32_t* dataRef;
SSubmitReq* data;
} SStreamDataSubmit;
@@ -83,16 +80,48 @@ typedef struct {
int8_t type;
} SStreamCheckpoint;
+typedef struct {
+ STaosQueue* queue;
+ STaosQall* qall;
+ void* qItem;
+ int8_t failed;
+} SStreamQ;
+
+static FORCE_INLINE void* streamQCurItem(SStreamQ* queue) {
+ //
+ return queue->qItem;
+}
+
+static FORCE_INLINE void* streamQNextItem(SStreamQ* queue) {
+ int8_t failed = atomic_load_8(&queue->failed);
+ if (failed) {
+ ASSERT(queue->qItem != NULL);
+ return streamQCurItem(queue);
+ } else {
+ taosGetQitem(queue->qall, &queue->qItem);
+ if (queue->qItem == NULL) {
+ taosReadAllQitems(queue->queue, queue->qall);
+ taosGetQitem(queue->qall, &queue->qItem);
+ }
+ return streamQCurItem(queue);
+ }
+}
+
+static FORCE_INLINE void streamQSetFail(SStreamQ* queue) { atomic_store_8(&queue->failed, 1); }
+
+static FORCE_INLINE void streamQSetSuccess(SStreamQ* queue) { atomic_store_8(&queue->failed, 0); }
+
static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) {
- SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosMemoryCalloc(1, sizeof(SStreamDataSubmit));
+ SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
if (pDataSubmit == NULL) return NULL;
- pDataSubmit->data = pReq;
pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
- if (pDataSubmit->data == NULL) goto FAIL;
+ if (pDataSubmit->dataRef == NULL) goto FAIL;
+ pDataSubmit->data = pReq;
*pDataSubmit->dataRef = 1;
+ pDataSubmit->type = STREAM_INPUT__DATA_SUBMIT;
return pDataSubmit;
FAIL:
- taosMemoryFree(pDataSubmit);
+ taosFreeQitem(pDataSubmit);
return NULL;
}
@@ -107,10 +136,11 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
- // taosFreeQitem(pDataSubmit);
}
}
+SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit);
+
int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput);
void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput);
@@ -142,6 +172,7 @@ typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
typedef struct {
int64_t stbUid;
+ char stbFullName[TSDB_TABLE_FNAME_LEN];
SSchemaWrapper* pSchemaWrapper;
// not applicable to encoder and decoder
void* vnode;
@@ -208,8 +239,6 @@ struct SStreamTask {
int32_t nodeId;
SEpSet epSet;
- // source preprocess
-
// exec
STaskExec exec;
@@ -317,8 +346,6 @@ int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
int32_t streamTaskRun(SStreamTask* pTask);
-int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
-
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp);
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index 1eed353f33150c1ca7790d2ec8d082d8d0ac8eea..a587ad6ef22fb80538147a61980ae4cdadd8ec03 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -66,12 +66,6 @@ typedef struct SSyncCfg {
SNodeInfo nodeInfo[TSDB_MAX_REPLICA];
} SSyncCfg;
-typedef struct SSnapshot {
- void* data;
- SyncIndex lastApplyIndex;
- SyncTerm lastApplyTerm;
-} SSnapshot;
-
typedef struct SFsmCbMeta {
SyncIndex index;
bool isWeak;
@@ -80,16 +74,45 @@ typedef struct SFsmCbMeta {
uint64_t seqNum;
SyncTerm term;
SyncTerm currentTerm;
+ uint64_t flag;
} SFsmCbMeta;
+typedef struct SReConfigCbMeta {
+ int32_t code;
+ SyncIndex index;
+ SyncTerm term;
+ SyncTerm currentTerm;
+ SSyncCfg oldCfg;
+ bool isDrop;
+ uint64_t flag;
+} SReConfigCbMeta;
+
+typedef struct SSnapshot {
+ void *data;
+ SyncIndex lastApplyIndex;
+ SyncTerm lastApplyTerm;
+} SSnapshot;
+
typedef struct SSyncFSM {
void* data;
+
void (*FpCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpPreCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
- void (*FpRestoreFinish)(struct SSyncFSM* pFsm);
+
+ void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
+ void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
+
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
- int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
+
+ int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
+ int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader);
+ int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len);
+
+ int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter);
+ int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply);
+ int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len);
+
} SSyncFSM;
// abstract definition of log store in raft
@@ -121,6 +144,7 @@ typedef struct SSyncLogStore {
} SSyncLogStore;
typedef struct SSyncInfo {
+ bool isStandBy;
SyncGroupId vgId;
SSyncCfg syncCfg;
char path[TSDB_FILENAME_LEN];
@@ -135,8 +159,8 @@ int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(const SSyncInfo* pSyncInfo);
void syncStart(int64_t rid);
-void syncStartStandBy(int64_t rid);
void syncStop(int64_t rid);
+int32_t syncSetStandby(int64_t rid);
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg);
ESyncState syncGetMyRole(int64_t rid);
const char* syncGetMyRoleStr(int64_t rid);
@@ -146,6 +170,11 @@ int32_t syncGetVgId(int64_t rid);
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
+bool syncIsRestoreFinish(int64_t rid);
+
+// to be moved to static
+void syncStartNormal(int64_t rid);
+void syncStartStandBy(int64_t rid);
#ifdef __cplusplus
}
diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h
index 754a203471fb3810adbd5e17d66b8e0a7d6d8902..839194da94e5a184ab11b446077e334f085d68b5 100644
--- a/include/libs/transport/trpc.h
+++ b/include/libs/transport/trpc.h
@@ -89,19 +89,18 @@ typedef struct SRpcInit {
typedef struct {
void *val;
int32_t (*clone)(void *src, void **dst);
- void (*freeFunc)(const void *arg);
} SRpcCtxVal;
typedef struct {
int32_t msgType;
void * val;
int32_t (*clone)(void *src, void **dst);
- void (*freeFunc)(const void *arg);
} SRpcBrokenlinkVal;
typedef struct {
SHashObj * args;
SRpcBrokenlinkVal brokenVal;
+ void (*freeFunc)(const void *arg);
} SRpcCtx;
int32_t rpcInit();
@@ -125,6 +124,7 @@ void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet);
void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
+void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
#ifdef __cplusplus
}
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index e541c214deba8e9b9ad3cc4e95cc2d2224f3c5a3..95af8ac30666b67b0a933477ff8ca0764d2d0a43 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -184,6 +184,7 @@ int32_t walRollback(SWal *, int64_t ver);
// notify that previous logs can be pruned safely
int32_t walBeginSnapshot(SWal *, int64_t ver);
int32_t walEndSnapshot(SWal *);
+void walRestoreFromSnapshot(SWal *, int64_t ver);
// int32_t walDataCorrupted(SWal*);
// read
diff --git a/include/os/osDir.h b/include/os/osDir.h
index a4c686e2807ee3d1fb9a8a0e1e05066d1b616c0b..9019d4f80240b2335824cb5626488bf4d0957f06 100644
--- a/include/os/osDir.h
+++ b/include/os/osDir.h
@@ -33,8 +33,19 @@ extern "C" {
#ifdef WINDOWS
#define TD_TMP_DIR_PATH "C:\\Windows\\Temp\\"
+#define TD_CFG_DIR_PATH "C:\\TDengine\\cfg\\"
+#define TD_DATA_DIR_PATH "C:\\TDengine\\data\\"
+#define TD_LOG_DIR_PATH "C:\\TDengine\\log\\"
+#elif defined(_TD_DARWIN_64)
+#define TD_TMP_DIR_PATH "/tmp/taosd/"
+#define TD_CFG_DIR_PATH "/usr/local/etc/taos/"
+#define TD_DATA_DIR_PATH "/usr/local/var/lib/taos/"
+#define TD_LOG_DIR_PATH "/usr/local/var/log/taos/"
#else
#define TD_TMP_DIR_PATH "/tmp/"
+#define TD_CFG_DIR_PATH "/etc/taos/"
+#define TD_DATA_DIR_PATH "/var/lib/taos/"
+#define TD_LOG_DIR_PATH "/var/log/taos/"
#endif
typedef struct TdDir *TdDirPtr;
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index e318978339316de794cb8d455f7e1f68a82800a1..7335d174a4cf67917f58d4219a5f576a4c3cff86 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -69,6 +69,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027)
#define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028)
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029)
+#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030)
+#define TSDB_CODE_MSG_DECODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0031)
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
@@ -83,6 +85,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0102)
#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0103)
#define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0104)
+#define TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0105)
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
@@ -181,7 +184,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_BNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0356)
#define TSDB_CODE_MND_BNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0357)
#define TSDB_CODE_MND_TOO_FEW_MNODES TAOS_DEF_ERROR_CODE(0, 0x0358)
-#define TSDB_CODE_MND_MNODE_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0359)
+#define TSDB_CODE_MND_TOO_MANY_MNODES TAOS_DEF_ERROR_CODE(0, 0x0359)
#define TSDB_CODE_MND_CANT_DROP_MASTER TAOS_DEF_ERROR_CODE(0, 0x035A)
// mnode-acct
@@ -268,6 +271,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_OFFSET_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03E9)
#define TSDB_CODE_MND_CONSUMER_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x03EA)
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
+#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
// mnode-stream
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
@@ -313,6 +317,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
+#define TSDB_CODE_VND_READ_END TAOS_DEF_ERROR_CODE(0, 0x051c)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
@@ -640,6 +645,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650)
#define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651)
#define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652)
+#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
+#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2654)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
@@ -651,7 +658,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801)
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
-#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
+#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
@@ -668,6 +675,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000)
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
+#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
#ifdef __cplusplus
}
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 808fcf01526f003bae9878a997cb338438528121..0ae22d195395f6225dddf33c52a99046ad41354d 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -209,7 +209,7 @@ typedef enum ELogicConditionType {
#define TSDB_INDEX_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_INDEX_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
#define TSDB_TYPE_STR_MAX_LEN 32
#define TSDB_TABLE_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
-#define TSDB_TOPIC_FNAME_LEN TSDB_TABLE_FNAME_LEN
+#define TSDB_TOPIC_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
#define TSDB_STREAM_FNAME_LEN TSDB_TABLE_FNAME_LEN
#define TSDB_SUBSCRIBE_KEY_LEN (TSDB_CGROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2)
#define TSDB_PARTITION_KEY_LEN (TSDB_SUBSCRIBE_KEY_LEN + 20)
@@ -247,13 +247,13 @@ typedef enum ELogicConditionType {
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
#define TSDB_IPv4ADDR_LEN 16
#define TSDB_FILENAME_LEN 128
-#define TSDB_SHOW_SQL_LEN 512
+#define TSDB_SHOW_SQL_LEN 1024
#define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_SHOW_SUBQUERY_LEN 1000
#define TSDB_TRANS_STAGE_LEN 12
#define TSDB_TRANS_TYPE_LEN 16
-#define TSDB_TRANS_ERROR_LEN 64
+#define TSDB_TRANS_ERROR_LEN 512
#define TSDB_STEP_NAME_LEN 32
#define TSDB_STEP_DESC_LEN 128
@@ -334,16 +334,16 @@ typedef enum ELogicConditionType {
#define TSDB_DB_STREAM_MODE_OFF 0
#define TSDB_DB_STREAM_MODE_ON 1
#define TSDB_DEFAULT_DB_STREAM_MODE 0
-#define TSDB_DB_SINGLE_STABLE_ON 0
-#define TSDB_DB_SINGLE_STABLE_OFF 1
-#define TSDB_DEFAULT_DB_SINGLE_STABLE 0
+#define TSDB_DB_SINGLE_STABLE_ON 1
+#define TSDB_DB_SINGLE_STABLE_OFF 0
+#define TSDB_DEFAULT_DB_SINGLE_STABLE TSDB_DB_SINGLE_STABLE_OFF
+#define TSDB_DB_SCHEMALESS_ON 1
+#define TSDB_DB_SCHEMALESS_OFF 0
+#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
#define TSDB_MIN_ROLLUP_FILE_FACTOR 0
-#define TSDB_MAX_ROLLUP_FILE_FACTOR 1
+#define TSDB_MAX_ROLLUP_FILE_FACTOR 10
#define TSDB_DEFAULT_ROLLUP_FILE_FACTOR 0.1
-#define TSDB_MIN_ROLLUP_DELAY 1
-#define TSDB_MAX_ROLLUP_DELAY 10
-#define TSDB_DEFAULT_ROLLUP_DELAY 2
#define TSDB_MIN_TABLE_TTL 0
#define TSDB_DEFAULT_TABLE_TTL 0
@@ -365,7 +365,11 @@ typedef enum ELogicConditionType {
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
+#ifdef WINDOWS
+#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
+#else
#define TSDB_MAX_RPC_THREADS 5
+#endif
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
@@ -428,11 +432,11 @@ enum {
};
#define DEFAULT_HANDLE 0
-#define MNODE_HANDLE -1
-#define QNODE_HANDLE -2
-#define SNODE_HANDLE -3
-#define VNODE_HANDLE -4
-#define BNODE_HANDLE -5
+#define MNODE_HANDLE 1
+#define QNODE_HANDLE -1
+#define SNODE_HANDLE -2
+#define VNODE_HANDLE -3
+#define BNODE_HANDLE -4
#define TSDB_CONFIG_OPTION_LEN 16
#define TSDB_CONIIG_VALUE_LEN 48
diff --git a/include/util/tdigest.h b/include/util/tdigest.h
new file mode 100644
index 0000000000000000000000000000000000000000..f9b615318f5c33f0cf386653367ddfe36ae759f8
--- /dev/null
+++ b/include/util/tdigest.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+/*
+ * include/tdigest.c
+ *
+ * Copyright (c) 2016, Usman Masood
+ */
+
+#ifndef TDIGEST_H
+#define TDIGEST_H
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338327950288 /* pi */
+#endif
+
+#define DOUBLE_MAX 1.79e+308
+
+#define ADDITION_CENTROID_NUM 2
+#define COMPRESSION 300
+#define GET_CENTROID(compression) (ceil(compression * M_PI / 2) + 1 + ADDITION_CENTROID_NUM)
+#define GET_THRESHOLD(compression) (7.5 + 0.37 * compression - 2e-4 * pow(compression, 2))
+#define TDIGEST_SIZE(compression) (sizeof(TDigest) + sizeof(SCentroid)*GET_CENTROID(compression) + sizeof(SPt)*GET_THRESHOLD(compression))
+
+typedef struct SCentroid {
+ double mean;
+ int64_t weight;
+}SCentroid;
+
+typedef struct SPt {
+ double value;
+ int64_t weight;
+}SPt;
+
+typedef struct TDigest {
+ double compression;
+ int32_t threshold;
+ int64_t size;
+
+ int64_t total_weight;
+ double min;
+ double max;
+
+ int32_t num_buffered_pts;
+ SPt *buffered_pts;
+
+ int32_t num_centroids;
+ SCentroid *centroids;
+}TDigest;
+
+TDigest *tdigestNewFrom(void* pBuf, int32_t compression);
+void tdigestAdd(TDigest *t, double x, int64_t w);
+void tdigestMerge(TDigest *t1, TDigest *t2);
+double tdigestQuantile(TDigest *t, double q);
+void tdigestCompress(TDigest *t);
+void tdigestFreeFrom(TDigest *t);
+void tdigestAutoFill(TDigest* t, int32_t compression);
+
+#endif /* TDIGEST_H */
diff --git a/include/util/tencode.h b/include/util/tencode.h
index cbacd59fa7873c4cb05b8fdaefb321ae3f854e5b..a13afd44480eef8397befb42c2fe2a12c322b01e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -378,14 +378,16 @@ static FORCE_INLINE int32_t tDecodeDouble(SDecoder* pCoder, double* val) {
}
static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len) {
- if (tDecodeU32v(pCoder, len) < 0) return -1;
+ uint32_t length = 0;
+ if (tDecodeU32v(pCoder, &length) < 0) return -1;
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, *len)) return -1;
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
if (val) {
*val = (uint8_t*)TD_CODER_CURRENT(pCoder);
}
- TD_CODER_MOVE_POS(pCoder, *len);
+ TD_CODER_MOVE_POS(pCoder, length);
return 0;
}
@@ -410,14 +412,16 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
}
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
- if (tDecodeU64v(pCoder, len) < 0) return -1;
+ uint64_t length = 0;
+ if (tDecodeU64v(pCoder, &length) < 0) return -1;
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, *len)) return -1;
- *val = taosMemoryMalloc(*len);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), *len);
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, *len);
+ TD_CODER_MOVE_POS(pCoder, length);
return 0;
}
@@ -530,6 +534,26 @@ static FORCE_INLINE int32_t tPutI64(uint8_t* p, int64_t v) {
return sizeof(int64_t);
}
+static FORCE_INLINE int32_t tPutFloat(uint8_t* p, float f) {
+ union {
+ uint32_t ui;
+ float f;
+ } v;
+ v.f = f;
+
+ return tPutU32(p, v.ui);
+}
+
+static FORCE_INLINE int32_t tPutDouble(uint8_t* p, double d) {
+ union {
+ uint64_t ui;
+ double d;
+ } v;
+ v.d = d;
+
+ return tPutU64(p, v.ui);
+}
+
static FORCE_INLINE int32_t tPutU16v(uint8_t* p, uint16_t v) { tPutV(p, v); }
static FORCE_INLINE int32_t tPutI16v(uint8_t* p, int16_t v) { return tPutU16v(p, ZIGZAGE(int16_t, v)); }
@@ -619,6 +643,34 @@ static FORCE_INLINE int32_t tGetI64v(uint8_t* p, int64_t* v) {
return n;
}
+static FORCE_INLINE int32_t tGetFloat(uint8_t* p, float* f) {
+ int32_t n = 0;
+
+ union {
+ uint32_t ui;
+ float f;
+ } v;
+
+ n = tGetU32(p, &v.ui);
+
+ *f = v.f;
+ return n;
+}
+
+static FORCE_INLINE int32_t tGetDouble(uint8_t* p, double* d) {
+ int32_t n = 0;
+
+ union {
+ uint64_t ui;
+ double d;
+ } v;
+
+ n = tGetU64(p, &v.ui);
+
+ *d = v.d;
+ return n;
+}
+
// =====================
static FORCE_INLINE int32_t tPutBinary(uint8_t* p, uint8_t* pData, uint32_t nData) {
int n = 0;
@@ -642,6 +694,11 @@ static FORCE_INLINE int32_t tGetBinary(uint8_t* p, uint8_t** ppData, uint32_t* n
return n;
}
+static FORCE_INLINE int32_t tPutCStr(uint8_t* p, char* pData) {
+ return tPutBinary(p, (uint8_t*)pData, strlen(pData) + 1);
+}
+static FORCE_INLINE int32_t tGetCStr(uint8_t* p, char** ppData) { return tGetBinary(p, (uint8_t**)ppData, NULL); }
+
#ifdef __cplusplus
}
#endif
diff --git a/include/util/tlist.h b/include/util/tlist.h
index 43833d7ecd84f09643546f3f3fa838edbd1dabf1..1954bda145a48f249875bda8ea3389b4fbed22be 100644
--- a/include/util/tlist.h
+++ b/include/util/tlist.h
@@ -229,7 +229,7 @@ int32_t tdListAppend(SList *list, void *data);
SListNode *tdListPopHead(SList *list);
SListNode *tdListPopTail(SList *list);
SListNode *tdListGetHead(SList *list);
-SListNode *tsListGetTail(SList *list);
+SListNode *tdListGetTail(SList *list);
SListNode *tdListPopNode(SList *list, SListNode *node);
void tdListMove(SList *src, SList *dst);
void tdListDiscard(SList *list);
diff --git a/include/util/tlog.h b/include/util/tlog.h
index be31aa8115ab91dabe898df45abdcba45b50d72d..988d9c6890832d17a7e9acd2b496e3ef6ba63d90 100644
--- a/include/util/tlog.h
+++ b/include/util/tlog.h
@@ -62,6 +62,7 @@ extern int32_t fsDebugFlag;
extern int32_t metaDebugFlag;
extern int32_t fnDebugFlag;
extern int32_t smaDebugFlag;
+extern int32_t idxDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles);
void taosCloseLog();
@@ -88,6 +89,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
#define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }}
+#define uDebugL(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
diff --git a/include/util/tqueue.h b/include/util/tqueue.h
index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644
--- a/include/util/tqueue.h
+++ b/include/util/tqueue.h
@@ -46,6 +46,7 @@ typedef struct {
void *ahandle;
int32_t workerId;
int32_t threadNum;
+ int64_t timestamp;
} SQueueInfo;
typedef enum {
@@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
int32_t taosGetQueueNumber(STaosQset *qset);
-int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp);
+int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp);
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
void taosResetQsetThread(STaosQset *qset, void *pItem);
diff --git a/include/util/ttimer.h b/include/util/ttimer.h
index 10222596319f445c980e5a03b9ded91a3ca9ce4e..4111a8ca28375cbcf45f60512da06802eeb22669 100644
--- a/include/util/ttimer.h
+++ b/include/util/ttimer.h
@@ -31,16 +31,16 @@ extern int32_t taosTmrThreads;
void *taosTmrInit(int32_t maxTmr, int32_t resoultion, int32_t longest, const char *label);
+void taosTmrCleanUp(void *handle);
+
tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle);
bool taosTmrStop(tmr_h tmrId);
-bool taosTmrStopA(tmr_h *timerId);
+bool taosTmrStopA(tmr_h *tmrId);
bool taosTmrReset(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle, tmr_h *pTmrId);
-void taosTmrCleanUp(void *handle);
-
#ifdef __cplusplus
}
#endif
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index 26349e257676d99d0ea81e03509c8b09c20a2248..35bea0e65ccc5070fe9d4e82adadc7132ae7cc81 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -1,32 +1,25 @@
-FROM ubuntu:18.04
-
-WORKDIR /root
-
-ARG pkgFile
-ARG dirName
-ARG cpuType
-RUN echo ${pkgFile} && echo ${dirName}
-
-COPY ${pkgFile} /root/
-RUN tar -zxf ${pkgFile}
-WORKDIR /root/
-RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
-RUN rm /root/${pkgFile}
-RUN rm -rf /root/${dirName}
-
-ENV DEBIAN_FRONTEND=noninteractive
-RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
-ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
- LC_CTYPE=en_US.UTF-8 \
- LANG=en_US.UTF-8 \
- LC_ALL=en_US.UTF-8
-
-COPY ./bin/* /usr/bin/
-
-ENV TINI_VERSION v0.19.0
-RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
-ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
-RUN chmod +x /tini
-ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
-CMD ["taosd"]
-VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
+FROM ubuntu:18.04
+
+WORKDIR /root
+
+ARG pkgFile
+ARG dirName
+ARG cpuType
+RUN echo ${pkgFile} && echo ${dirName}
+
+COPY ${pkgFile} /root/
+ENV TINI_VERSION v0.19.0
+ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
+ENV DEBIAN_FRONTEND=noninteractive
+WORKDIR /root/
+RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
+
+ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
+ LC_CTYPE=en_US.UTF-8 \
+ LANG=en_US.UTF-8 \
+ LC_ALL=en_US.UTF-8
+COPY ./bin/* /usr/bin/
+
+ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
+CMD ["taosd"]
+VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
diff --git a/packaging/docker/bin/entrypoint.sh b/packaging/docker/bin/entrypoint.sh
index 5fb441004d8b454de1039eb3f4b23eb51f32be64..f4be349c0de0ea0df382fc6fee033120c5c48007 100644
--- a/packaging/docker/bin/entrypoint.sh
+++ b/packaging/docker/bin/entrypoint.sh
@@ -11,39 +11,22 @@ DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
-DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
+DATA_DIR=$(taosd -C|grep -E 'dataDir.*(\S+)' -o |head -n1|sed 's/dataDir *//')
+DATA_DIR=${DATA_DIR:-/var/lib/taos}
-# append env to custom taos.cfg
-CFG_DIR=/tmp/taos
-CFG_FILE=$CFG_DIR/taos.cfg
-
-mkdir -p $CFG_DIR >/dev/null 2>&1
-
-[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
-env-to-cfg >>$CFG_FILE
-
-FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
+FQDN=$(taosd -C|grep -E 'fqdn.*(\S+)' -o |head -n1|sed 's/fqdn *//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
-
+FIRSET_EP=$(taosd -C|grep -E 'firstEp.*(\S+)' -o |head -n1|sed 's/firstEp *//')
# parse first ep host and port
-FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
-FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
+FIRST_EP_HOST=${FIRSET_EP%:*}
+FIRST_EP_PORT=${FIRSET_EP#*:}
# in case of custom server port
-SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
+SERVER_PORT=$(taosd -C|grep -E 'serverPort.*(\S+)' -o |head -n1|sed 's/serverPort *//')
SERVER_PORT=${SERVER_PORT:-6030}
-# for other binaries like interpreters
-if echo $1 | grep -E "taosd$" - >/dev/null; then
- true # will run taosd
-else
- cp -f $CFG_FILE /etc/taos/taos.cfg || true
- $@
- exit $?
-fi
-
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
@@ -62,22 +45,23 @@ fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
- $@ -c $CFG_DIR
+ $@
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
- $@ -c $CFG_DIR
+ $@
exit $?
fi
while true; do
- es=0
- taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
- if [ "$es" -eq 0 ]; then
+ es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check)
+ echo ${es}
+ if [ "${es%%:*}" -eq 2 ]; then
+ echo "execute create dnode"
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
- $@ -c $CFG_DIR
+ $@
fi
diff --git a/packaging/docker/bin/taos-check b/packaging/docker/bin/taos-check
new file mode 100644
index 0000000000000000000000000000000000000000..5dc06b6018b93b627610b446ca6363773fd0fd72
--- /dev/null
+++ b/packaging/docker/bin/taos-check
@@ -0,0 +1,8 @@
+#!/bin/sh
+es=$(taos --check)
+code=${es%%:*}
+if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then
+ exit 0
+fi
+echo $es
+exit 1
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index 4cf95454e022da6f8d3e497d335175d86da486c5..5f449e5d91122522d595eb2ccfb948aa4f8a66fe 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -17,6 +17,7 @@ serverName="taosd"
clientName="taos"
uninstallScript="rmtaos"
configFile="taos.cfg"
+tarName="taos.tar.gz"
osType=Linux
pagMode=full
@@ -242,6 +243,11 @@ function install_examples() {
function update_TDengine() {
# Start to update
+ if [ ! -e ${tarName} ]; then
+ echo "File ${tarName} does not exist"
+ exit 1
+ fi
+ tar -zxf ${tarName}
echo -e "${GREEN}Start to update ${productName} client...${NC}"
# Stop the client shell if running
if pidof ${clientName} &> /dev/null; then
@@ -264,42 +270,49 @@ function update_TDengine() {
echo
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf ${tarName})
}
function install_TDengine() {
- # Start to install
- echo -e "${GREEN}Start to install ${productName} client...${NC}"
-
- install_main_path
- install_log
- install_header
- install_lib
- install_jemalloc
- if [ "$verMode" == "cluster" ]; then
- install_connector
- fi
- install_examples
- install_bin
- install_config
+ # Start to install
+ if [ ! -e ${tarName} ]; then
+ echo "File ${tarName} does not exist"
+ exit 1
+ fi
+ tar -zxf ${tarName}
+ echo -e "${GREEN}Start to install ${productName} client...${NC}"
- echo
- echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+ if [ "$verMode" == "cluster" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
- rm -rf $(tar -tf ${tarName})
+ rm -rf $(tar -tf ${tarName})
}
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
- if [ -e ${bin_dir}/${serverName} ]; then
- echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
- exit 0
- fi
+if [ -e ${bin_dir}/${serverName} ]; then
+ echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
+ exit 0
+fi
- if [ -x ${bin_dir}/${clientName} ]; then
- update_flag=1
- update_TDengine
- else
- install_TDengine
- fi
+if [ -x ${bin_dir}/${clientName} ]; then
+ update_flag=1
+ update_TDengine
+else
+ install_TDengine
+fi
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index d9f33510088cf228215edf0f77368334edd4b956..3ff61eca25fa54692569ff2db9a53813ce6b7a36 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -119,6 +119,8 @@ typedef struct SHeartBeatInfo {
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
+ TdThreadMutex qnodeMutex;
+ SArray* pQnodeList;
SInstanceSummary summary;
SList* pConnList; // STscObj linked list
uint64_t clusterId;
@@ -149,6 +151,7 @@ typedef struct STscObj {
int32_t numOfReqs; // number of sqlObj bound to this connection
SAppInstInfo* pAppInfo;
SHashObj* pRequests;
+ int8_t schemalessType;
} STscObj;
typedef struct SResultColumn {
@@ -160,6 +163,7 @@ typedef struct SResultColumn {
} SResultColumn;
typedef struct SReqResultInfo {
+ SQueryExecRes execRes;
const char* pRspMsg;
const char* pData;
TAOS_FIELD* fields; // todo, column names are not needed.
@@ -189,6 +193,7 @@ typedef struct SRequestSendRecvBody {
typedef struct {
int8_t resType;
char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
@@ -217,7 +222,8 @@ typedef struct SRequestObj {
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
-int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
+int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
+ bool freeAfterUse);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
@@ -239,7 +245,7 @@ static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool conver
taosMemoryFreeClear(msg->resInfo.length);
taosMemoryFreeClear(msg->resInfo.convertBuf);
}
- setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4);
+ setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4, false);
return &msg->resInfo;
}
return NULL;
@@ -290,7 +296,7 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen);
int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb);
-int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList);
+int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray** pNodeList);
int32_t buildRequest(STscObj* pTscObj, const char* sql, int sqlLen, SRequestObj** pRequest);
@@ -315,8 +321,9 @@ void hbMgrInitMqHbRspHandle();
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res);
int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList);
-int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** res);
+int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList);
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
+int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
#ifdef __cplusplus
}
diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h
index f0c9dcd67dd8e3b05775003221ddf86681da37ab..936fb92fc4019842485e7051abf161aee8a7d858 100644
--- a/source/client/inc/clientStmt.h
+++ b/source/client/inc/clientStmt.h
@@ -116,8 +116,11 @@ int stmtAffectedRowsOnce(TAOS_STMT *stmt);
int stmtPrepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
int stmtSetTbName(TAOS_STMT *stmt, const char *tbName);
int stmtSetTbTags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags);
+int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields);
+int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields);
int stmtIsInsert(TAOS_STMT *stmt, int *insert);
int stmtGetParamNum(TAOS_STMT *stmt, int *nums);
+int stmtGetParam(TAOS_STMT *stmt, int idx, int *type, int *bytes);
int stmtAddBatch(TAOS_STMT *stmt);
TAOS_RES *stmtUseResult(TAOS_STMT *stmt);
int stmtBindBatch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int32_t colIdx);
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 669b2bc97eb3e6fab04701aebbf80402432b44c1..19847d9aa2fd04bd8b3f7ff13c0d8462d3c2b9cf 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -161,6 +161,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
taosThreadMutexInit(&pObj->mutex, NULL);
pObj->id = taosAddRef(clientConnRefPool, pObj);
+ pObj->schemalessType = 0;
tscDebug("connObj created, 0x%" PRIx64, pObj->id);
return pObj;
@@ -234,6 +235,8 @@ static void doDestroyRequest(void *p) {
taosArrayDestroy(pRequest->tableList);
taosArrayDestroy(pRequest->dbList);
+ destroyQueryExecRes(&pRequest->body.resInfo.execRes);
+
deregisterRequest(pRequest);
taosMemoryFreeClear(pRequest);
}
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index d01ec501ba215dae820a72a8dfa8ab473d5b8950..09c3d269c703d6e2dc78cbef49a7790c98f34245 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -120,7 +120,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
return TSDB_CODE_TSC_INVALID_VALUE;
}
- catalogUpdateSTableMeta(pCatalog, rsp);
+ catalogUpdateTableMeta(pCatalog, rsp);
}
}
@@ -140,8 +140,10 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
STscObj *pTscObj = (STscObj *)acquireTscObj(pRsp->connKey.tscRid);
if (NULL == pTscObj) {
tscDebug("tscObj rid %" PRIx64 " not exist", pRsp->connKey.tscRid);
- } else {
- updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
+ } else {
+ if (pRsp->query->totalDnodes > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &pRsp->query->epSet)) {
+ updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
+ }
pTscObj->connId = pRsp->query->connId;
if (pRsp->query->killRid) {
@@ -158,6 +160,10 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
taos_close(pTscObj);
}
+ if (pRsp->query->pQnodeList) {
+ updateQnodeList(pTscObj->pAppInfo, pRsp->query->pQnodeList);
+ }
+
releaseTscObj(pRsp->connKey.tscRid);
}
}
@@ -580,8 +586,15 @@ void hbClearReqInfo(SAppHbMgr *pAppHbMgr) {
}
}
+void hbThreadFuncUnexpectedStopped(void) {
+ atomic_store_8(&clientHbMgr.threadStop, 2);
+}
+
static void *hbThreadFunc(void *param) {
setThreadName("hb");
+#ifdef WINDOWS
+ atexit(hbThreadFuncUnexpectedStopped);
+#endif
while (1) {
int8_t threadStop = atomic_val_compare_exchange_8(&clientHbMgr.threadStop, 1, 2);
if (1 == threadStop) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index daa5887127ae5df63fe600b59b9ef5c8da7a592a..e7b4fb06577d2f34a5ca9815754c00e904d873c8 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -118,6 +118,7 @@ TAOS* taos_connect_internal(const char* ip, const char* user, const char* pass,
if (pInst == NULL) {
p = taosMemoryCalloc(1, sizeof(struct SAppInstInfo));
p->mgmtEp = epSet;
+ taosThreadMutexInit(&p->qnodeMutex, NULL);
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores);
p->pAppHbMgr = appHbMgrInit(p, key);
taosHashPut(appInfo.pInstMap, key, strlen(key), &p, POINTER_BYTES);
@@ -175,6 +176,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = pStmtCb,
.pUser = pTscObj->user,
+ .schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER))};
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
@@ -202,7 +204,7 @@ int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
- code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false);
+ code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, false);
}
return code;
}
@@ -228,7 +230,61 @@ int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
return TSDB_CODE_SUCCESS;
}
-int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList) {
+int compareQueryNodeLoad(const void* elem1, const void* elem2) {
+ SQueryNodeLoad* node1 = (SQueryNodeLoad*)elem1;
+ SQueryNodeLoad* node2 = (SQueryNodeLoad*)elem2;
+
+ if (node1->load < node2->load) {
+ return -1;
+ }
+
+ return node1->load > node2->load;
+}
+
+int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList) {
+ taosThreadMutexLock(&pInfo->qnodeMutex);
+ if (pInfo->pQnodeList) {
+ taosArrayDestroy(pInfo->pQnodeList);
+ pInfo->pQnodeList = NULL;
+ }
+
+ if (pNodeList) {
+ pInfo->pQnodeList = taosArrayDup(pNodeList);
+ taosArraySort(pInfo->pQnodeList, compareQueryNodeLoad);
+ }
+ taosThreadMutexUnlock(&pInfo->qnodeMutex);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t getQnodeList(SRequestObj* pRequest, SArray** pNodeList) {
+ SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo;
+ int32_t code = 0;
+
+ taosThreadMutexLock(&pInfo->qnodeMutex);
+ if (pInfo->pQnodeList) {
+ *pNodeList = taosArrayDup(pInfo->pQnodeList);
+ }
+ taosThreadMutexUnlock(&pInfo->qnodeMutex);
+
+ if (NULL == *pNodeList) {
+ SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pNodeList = taosArrayInit(5, sizeof(SQueryNodeLoad));
+ code = catalogGetQnodeList(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &mgmtEpSet, *pNodeList);
+ }
+
+ if (TSDB_CODE_SUCCESS == code && *pNodeList) {
+ code = updateQnodeList(pInfo, *pNodeList);
+ }
+ }
+
+ return code;
+}
+
+int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray** pNodeList) {
pRequest->type = pQuery->msgType;
SPlanContext cxt = {.queryId = pRequest->requestId,
.acctId = pRequest->pTscObj->acctId,
@@ -237,14 +293,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
- SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
- SCatalog* pCatalog = NULL;
- int32_t code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetQnodeList(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &mgmtEpSet, pNodeList);
- }
+
+ int32_t code = getQnodeList(pRequest, pNodeList);
if (TSDB_CODE_SUCCESS == code) {
- code = qCreateQueryPlan(&cxt, pPlan, pNodeList);
+ code = qCreateQueryPlan(&cxt, pPlan, *pNodeList);
}
return code;
}
@@ -289,19 +341,64 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) {
pResInfo->precision = precision;
}
-int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) {
+int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) {
+ void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter;
+
+ tsem_init(&schdRspSem, 0, 0);
+
+ SQueryResult res = {.code = 0, .numOfRows = 0};
+ int32_t code = schedulerAsyncExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
+ pRequest->metric.start, schdExecCallback, &res);
+
+ pRequest->body.resInfo.execRes = res.res;
+
+ while (true) {
+ if (code != TSDB_CODE_SUCCESS) {
+ if (pRequest->body.queryJob != 0) {
+ schedulerFreeJob(pRequest->body.queryJob);
+ }
+
+ pRequest->code = code;
+ terrno = code;
+ return pRequest->code;
+ } else {
+ tsem_wait(&schdRspSem);
+
+ if (res.code) {
+ code = res.code;
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_CREATE_TABLE == pRequest->type) {
+ pRequest->body.resInfo.numOfRows = res.numOfRows;
+
+ if (pRequest->body.queryJob != 0) {
+ schedulerFreeJob(pRequest->body.queryJob);
+ }
+ }
+
+ pRequest->code = res.code;
+ terrno = res.code;
+ return pRequest->code;
+}
+
+int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) {
void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter;
- SQueryResult res = {.code = 0, .numOfRows = 0, .msgSize = ERROR_MSG_BUF_DEFAULT_SIZE, .msg = pRequest->msgBuf};
+ SQueryResult res = {.code = 0, .numOfRows = 0};
int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
pRequest->metric.start, &res);
+
+ pRequest->body.resInfo.execRes = res.res;
+
if (code != TSDB_CODE_SUCCESS) {
if (pRequest->body.queryJob != 0) {
schedulerFreeJob(pRequest->body.queryJob);
}
- *pRes = res.res;
-
pRequest->code = code;
terrno = code;
return pRequest->code;
@@ -315,92 +412,118 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
}
}
- *pRes = res.res;
-
pRequest->code = res.code;
terrno = res.code;
return pRequest->code;
}
int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList) {
- *pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
- return getPlan(pRequest, pQuery, &pRequest->body.pDag, *pNodeList);
+ return getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList);
}
-int32_t validateSversion(SRequestObj* pRequest, void* res) {
- SArray* pArray = NULL;
+int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet *epset) {
int32_t code = 0;
-
- if (TDMT_VND_SUBMIT == pRequest->type) {
- SSubmitRsp* pRsp = (SSubmitRsp*)res;
- if (pRsp->nBlocks <= 0) {
- return TSDB_CODE_SUCCESS;
+ SArray* pArray = NULL;
+ SSubmitRsp* pRsp = (SSubmitRsp*)res;
+ if (pRsp->nBlocks <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion));
+ if (NULL == pArray) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
+ SSubmitBlkRsp* blk = pRsp->pBlocks + i;
+ if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
+ continue;
}
+
+ STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
+ taosArrayPush(pArray, &tbSver);
+ }
- pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion));
- if (NULL == pArray) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, epset, pArray);
- for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
- SSubmitBlkRsp* blk = pRsp->pBlocks + i;
- if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
- continue;
- }
-
- STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
- taosArrayPush(pArray, &tbSver);
- }
- } else if (TDMT_VND_QUERY == pRequest->type) {
- SArray* pTbArray = (SArray*)res;
- int32_t tbNum = taosArrayGetSize(pTbArray);
- if (tbNum <= 0) {
- return TSDB_CODE_SUCCESS;
- }
+_return:
- pArray = taosArrayInit(tbNum, sizeof(STbSVersion));
- if (NULL == pArray) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ taosArrayDestroy(pArray);
+ return code;
+}
- for (int32_t i = 0; i < tbNum; ++i) {
- STbVerInfo* tbInfo = taosArrayGet(pTbArray, i);
- STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion};
- taosArrayPush(pArray, &tbSver);
- }
+int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet *epset) {
+ int32_t code = 0;
+ SArray* pArray = NULL;
+ SArray* pTbArray = (SArray*)res;
+ int32_t tbNum = taosArrayGetSize(pTbArray);
+ if (tbNum <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pArray = taosArrayInit(tbNum, sizeof(STbSVersion));
+ if (NULL == pArray) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < tbNum; ++i) {
+ STbVerInfo* tbInfo = taosArrayGet(pTbArray, i);
+ STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion, .tver = tbInfo->tversion};
+ taosArrayPush(pArray, &tbSver);
}
- SCatalog* pCatalog = NULL;
- CHECK_CODE_GOTO(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog), _return);
-
- SEpSet epset = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &epset, pArray);
+ code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, epset, pArray);
_return:
taosArrayDestroy(pArray);
+ return code;
+}
- return code;
+int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) {
+ return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
}
-void freeRequestRes(SRequestObj* pRequest, void* res) {
- if (NULL == pRequest || NULL == res) {
- return;
+int32_t handleExecRes(SRequestObj* pRequest) {
+ if (NULL == pRequest->body.resInfo.execRes.res) {
+ return TSDB_CODE_SUCCESS;
}
-
- if (TDMT_VND_SUBMIT == pRequest->type) {
- tFreeSSubmitRsp((SSubmitRsp*)res);
- } else if (TDMT_VND_QUERY == pRequest->type) {
- taosArrayDestroy((SArray*)res);
+
+ int32_t code = 0;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code) {
+ return code;
}
+
+ SEpSet epset = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+ SQueryExecRes* pRes = &pRequest->body.resInfo.execRes;
+
+ switch (pRes->msgType) {
+ case TDMT_VND_ALTER_TABLE:
+ case TDMT_MND_ALTER_STB: {
+ code = handleAlterTbExecRes(pRes->res, pCatalog);
+ break;
+ }
+ case TDMT_VND_SUBMIT: {
+ code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset);
+ break;
+ }
+ case TDMT_VND_QUERY: {
+ code = handleQueryExecRes(pRequest, pRes->res, pCatalog, &epset);
+ break;
+ }
+ default:
+ tscError("invalid exec result for request type %d", pRequest->type);
+ return TSDB_CODE_APP_ERROR;
+ }
+
+ return code;
}
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res) {
- void* pRes = NULL;
-
if (TSDB_CODE_SUCCESS == code) {
switch (pQuery->execMode) {
case QUERY_EXEC_MODE_LOCAL:
@@ -410,13 +533,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code
code = execDdlQuery(pRequest, pQuery);
break;
case QUERY_EXEC_MODE_SCHEDULE: {
- SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
- code = getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList);
+ SArray* pNodeList = NULL;
+ code = getPlan(pRequest, pQuery, &pRequest->body.pDag, &pNodeList);
if (TSDB_CODE_SUCCESS == code) {
- code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList, &pRes);
- if (NULL != pRes) {
- code = validateSversion(pRequest, pRes);
- }
+ code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList);
}
taosArrayDestroy(pNodeList);
break;
@@ -433,15 +553,15 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code
qDestroyQuery(pQuery);
}
+ handleExecRes(pRequest);
+
if (NULL != pRequest && TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
}
if (res) {
- *res = pRes;
- } else {
- freeRequestRes(pRequest, pRes);
- pRes = NULL;
+ *res = pRequest->body.resInfo.execRes.res;
+ pRequest->body.resInfo.execRes.res = NULL;
}
return pRequest;
@@ -506,12 +626,12 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) {
int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
SCatalog* pCatalog = NULL;
- int32_t tbNum = taosArrayGetSize(tbList);
- int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ int32_t tbNum = taosArrayGetSize(tbList);
+ int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
-
+
for (int32_t i = 0; i < tbNum; ++i) {
SName* pTbName = taosArrayGet(tbList, i);
catalogRemoveTableMeta(pCatalog, pTbName);
@@ -520,7 +640,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
return TSDB_CODE_SUCCESS;
}
-
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
SRequestObj* pRequest = NULL;
int32_t retryNum = 0;
@@ -543,7 +662,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
removeMeta(pTscObj, pRequest->tableList);
}
-
+
return pRequest;
}
@@ -684,28 +803,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
taosMemoryFreeClear(pMsgBody);
}
-bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) {
- return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP ||
- msgType == TDMT_VND_QUERY_HEARTBEAT_RSP;
+void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) {
+ if (NULL == pEpSet) {
+ return;
+ }
+
+ switch (pSendInfo->target.type) {
+ case TARGET_TYPE_MNODE:
+ if (NULL == pTscObj) {
+ tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId);
+ return;
+ }
+
+ updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
+ break;
+ case TARGET_TYPE_VNODE: {
+ if (NULL == pTscObj) {
+ tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId);
+ return;
+ }
+
+ SCatalog* pCatalog = NULL;
+ int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId,
+ tstrerror(code));
+ return;
+ }
+
+ catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet);
+ break;
+ }
+ default:
+ tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType));
+ break;
+ }
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
assert(pMsg->info.ahandle != NULL);
+ SRequestObj* pRequest = NULL;
+ STscObj* pTscObj = NULL;
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
assert(pRequest->self == pSendInfo->requestObjRefId);
pRequest->metric.rsp = taosGetTimestampUs();
-
- STscObj* pTscObj = pRequest->pTscObj;
- if (pEpSet) {
- if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) {
- updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
- }
- }
-
+ pTscObj = pRequest->pTscObj;
/*
* There is not response callback function for submit response.
* The actual inserted number of points is the first number.
@@ -722,6 +868,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId);
}
+ updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet);
+
SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle};
if (pMsg->contLen > 0) {
@@ -796,7 +944,58 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
}
}
+void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
+ assert(pRequest != NULL);
+
+ SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
+ if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
+ // All data has returned to App already, no need to try again
+ if (pResultInfo->completed) {
+ pResultInfo->numOfRows = 0;
+ return NULL;
+ }
+
+ tsem_init(&schdRspSem, 0, 0);
+
+ SReqResultInfo* pResInfo = &pRequest->body.resInfo;
+ SSchdFetchParam param = {.pData = (void**)&pResInfo->pData, .code = &pRequest->code};
+ pRequest->code = schedulerAsyncFetchRows(pRequest->body.queryJob, schdFetchCallback, ¶m);
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ return NULL;
+ }
+
+ tsem_wait(&schdRspSem);
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ return NULL;
+ }
+
+ pRequest->code =
+ setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ return NULL;
+ }
+
+ tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
+ pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId);
+
+ if (pResultInfo->numOfRows == 0) {
+ return NULL;
+ }
+ }
+
+ if (setupOneRowPtr) {
+ doSetOneRowPtr(pResultInfo);
+ pResultInfo->current += 1;
+ }
+
+ return pResultInfo->row;
+}
+
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
+ // return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4);
assert(pRequest != NULL);
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
@@ -814,7 +1013,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
return NULL;
}
- pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4);
+ pRequest->code =
+ setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
return NULL;
@@ -858,27 +1058,20 @@ static char* parseTagDatatoJson(void* p) {
goto end;
}
- int16_t nCols = kvRowNCols(p);
+ SArray* pTagVals = NULL;
+ if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+ goto end;
+ }
+
+ int16_t nCols = taosArrayGetSize(pTagVals);
char tagJsonKey[256] = {0};
for (int j = 0; j < nCols; ++j) {
- SColIdx* pColIdx = kvRowColIdxAt(p, j);
- char* val = (char*)(kvRowColVal(p, pColIdx));
- if (j == 0) {
- if (*val == TSDB_DATA_TYPE_NULL) {
- string = taosMemoryCalloc(1, 8);
- sprintf(string, "%s", TSDB_DATA_NULL_STR_L);
- goto end;
- }
- continue;
- }
-
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
// json key encode by binary
memset(tagJsonKey, 0, sizeof(tagJsonKey));
- memcpy(tagJsonKey, varDataVal(val), varDataLen(val));
+ memcpy(tagJsonKey, pTagVal->pKey, strlen(pTagVal->pKey));
// json value
- val += varDataTLen(val);
- char* realData = POINTER_SHIFT(val, CHAR_BYTES);
- char type = *val;
+ char type = pTagVal->type;
if (type == TSDB_DATA_TYPE_NULL) {
cJSON* value = cJSON_CreateNull();
if (value == NULL) {
@@ -887,11 +1080,12 @@ static char* parseTagDatatoJson(void* p) {
cJSON_AddItemToObject(json, tagJsonKey, value);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
cJSON* value = NULL;
- if (varDataLen(realData) > 0) {
- char* tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1);
- int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(realData), varDataLen(realData), tagJsonValue);
+ if (pTagVal->nData > 0) {
+ char* tagJsonValue = taosMemoryCalloc(pTagVal->nData, 1);
+ int32_t length = taosUcs4ToMbs((TdUcs4*)pTagVal->pData, pTagVal->nData, tagJsonValue);
if (length < 0) {
- tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, val);
+ tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
+ pTagVal->pData);
taosMemoryFree(tagJsonValue);
goto end;
}
@@ -900,7 +1094,7 @@ static char* parseTagDatatoJson(void* p) {
if (value == NULL) {
goto end;
}
- } else if (varDataLen(realData) == 0) {
+ } else if (pTagVal->nData == 0) {
value = cJSON_CreateString("");
} else {
ASSERT(0);
@@ -908,22 +1102,14 @@ static char* parseTagDatatoJson(void* p) {
cJSON_AddItemToObject(json, tagJsonKey, value);
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
- double jsonVd = *(double*)(realData);
+ double jsonVd = *(double*)(&pTagVal->i64);
cJSON* value = cJSON_CreateNumber(jsonVd);
if (value == NULL) {
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
- // }else if(type == TSDB_DATA_TYPE_BIGINT){
- // int64_t jsonVd = *(int64_t*)(realData);
- // cJSON* value = cJSON_CreateNumber((double)jsonVd);
- // if (value == NULL)
- // {
- // goto end;
- // }
- // cJSON_AddItemToObject(json, tagJsonKey, value);
} else if (type == TSDB_DATA_TYPE_BOOL) {
- char jsonVd = *(char*)(realData);
+ char jsonVd = *(char*)(&pTagVal->i64);
cJSON* value = cJSON_CreateBool(jsonVd);
if (value == NULL) {
goto end;
@@ -988,7 +1174,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
if (jsonInnerType == TSDB_DATA_TYPE_NULL) {
sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(dst, strlen(varDataVal(dst)));
- } else if (jsonInnerType == TSDB_DATA_TYPE_JSON) {
+ } else if (jsonInnerType == TD_TAG_JSON) {
char* jsonString = parseTagDatatoJson(jsonInnerData);
STR_TO_VARSTR(dst, jsonString);
taosMemoryFree(jsonString);
@@ -1007,10 +1193,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
double jsonVd = *(double*)(jsonInnerData);
sprintf(varDataVal(dst), "%.9lf", jsonVd);
varDataSetLen(dst, strlen(varDataVal(dst)));
- } else if (jsonInnerType == TSDB_DATA_TYPE_BIGINT) {
- int64_t jsonVd = *(int64_t*)(jsonInnerData);
- sprintf(varDataVal(dst), "%" PRId64, jsonVd);
- varDataSetLen(dst, strlen(varDataVal(dst)));
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false");
varDataSetLen(dst, strlen(varDataVal(dst)));
@@ -1121,9 +1303,12 @@ void resetConnectDB(STscObj* pTscObj) {
taosThreadMutexUnlock(&pTscObj->mutex);
}
-int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) {
+int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
+ bool freeAfterUse) {
assert(pResultInfo != NULL && pRsp != NULL);
+ if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg);
+
pResultInfo->pRspMsg = (const char*)pRsp;
pResultInfo->pData = (void*)pRsp->data;
pResultInfo->numOfRows = htonl(pRsp->numOfRows);
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 53eb443b36b05393b22667a6f623892008f14ebb..e144885e9efc4b3eca7c806996b77ad416d70161 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -666,8 +666,39 @@ int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name) {
return stmtSetTbName(stmt, name);
}
+int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags) {
+ if (stmt == NULL || tags == NULL) {
+ tscError("NULL parameter for %s", __FUNCTION__);
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+
+ return stmtSetTbTags(stmt, tags);
+}
+
+
int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name) { return taos_stmt_set_tbname(stmt, name); }
+int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int* fieldNum, TAOS_FIELD_E** fields) {
+ if (stmt == NULL || NULL == fieldNum) {
+ tscError("NULL parameter for %s", __FUNCTION__);
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+
+ return stmtGetTagFields(stmt, fieldNum, fields);
+}
+
+int taos_stmt_get_col_fields(TAOS_STMT *stmt, int* fieldNum, TAOS_FIELD_E** fields) {
+ if (stmt == NULL || NULL == fieldNum) {
+ tscError("NULL parameter for %s", __FUNCTION__);
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+
+ return stmtGetColFields(stmt, fieldNum, fields);
+}
+
int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
if (stmt == NULL || bind == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
@@ -772,6 +803,16 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
return stmtGetParamNum(stmt, nums);
}
+int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
+ if (stmt == NULL || type == NULL || NULL == bytes || idx < 0) {
+ tscError("invalid parameter for %s", __FUNCTION__);
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+
+ return stmtGetParam(stmt, idx, type, bytes);
+}
+
TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt) {
if (stmt == NULL) {
tscError("NULL parameter for %s", __FUNCTION__);
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index dfce01dd6356f19da8dce1b8de9c2eb9e9ca42e4..9de3ee1d0f1a8a529c7177329543e3379cdc6cbb 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -58,7 +58,12 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
return code;
}
- if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) {
+ if (connectRsp.dnodeNum == 1) {
+ SEpSet srcEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ SEpSet dstEpSet = connectRsp.epSet;
+ rpcSetDefaultAddr(pTscObj->pAppInfo->pTransporter, srcEpSet.eps[srcEpSet.inUse].fqdn,
+ dstEpSet.eps[dstEpSet.inUse].fqdn);
+ } else if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) {
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &connectRsp.epSet);
}
@@ -126,9 +131,10 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
if (usedbRsp.vgVersion >= 0) {
uint64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId;
- int32_t code1 = catalogGetHandle(clusterId, &pCatalog);
+ int32_t code1 = catalogGetHandle(clusterId, &pCatalog);
if (code1 != TSDB_CODE_SUCCESS) {
- tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId, tstrerror(code1));
+ tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId,
+ tstrerror(code1));
} else {
catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid);
}
@@ -158,7 +164,7 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
taosMemoryFreeClear(output.dbVgroup);
- tscError("0x%" PRIx64" failed to build use db output since %s", pRequest->requestId, terrstr());
+ tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr());
} else if (output.dbVgroup) {
struct SCatalog* pCatalog = NULL;
@@ -217,10 +223,33 @@ int32_t processDropDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
return code;
}
+int32_t processAlterStbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
+ SRequestObj* pRequest = param;
+ if (code != TSDB_CODE_SUCCESS) {
+ setErrno(pRequest, code);
+ tsem_post(&pRequest->body.rspSem);
+ return code;
+ }
+
+ SMAlterStbRsp alterRsp = {0};
+ SDecoder coder = {0};
+ tDecoderInit(&coder, pMsg->pData, pMsg->len);
+ tDecodeSMAlterStbRsp(&coder, &alterRsp);
+ tDecoderClear(&coder);
+
+ pRequest->body.resInfo.execRes.msgType = TDMT_MND_ALTER_STB;
+ pRequest->body.resInfo.execRes.res = alterRsp.pMeta;
+
+ tsem_post(&pRequest->body.rspSem);
+ return code;
+}
+
+
void initMsgHandleFp() {
handleRequestRspFp[TMSG_INDEX(TDMT_MND_CONNECT)] = processConnectRsp;
handleRequestRspFp[TMSG_INDEX(TDMT_MND_CREATE_DB)] = processCreateDbRsp;
handleRequestRspFp[TMSG_INDEX(TDMT_MND_USE_DB)] = processUseDbRsp;
handleRequestRspFp[TMSG_INDEX(TDMT_MND_CREATE_STB)] = processCreateTableRsp;
handleRequestRspFp[TMSG_INDEX(TDMT_MND_DROP_DB)] = processDropDbRsp;
+ handleRequestRspFp[TMSG_INDEX(TDMT_MND_ALTER_STB)] = processAlterStbRsp;
}
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 68c47c2d13421cd34e327db37e31ae76774985ac..bcdba696925a0cef4838fa24a563852385044c22 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -24,7 +24,6 @@
#define EQUAL '='
#define QUOTE '"'
#define SLASH '\\'
-#define tsMaxSQLStringLen (1024*1024)
#define JUMP_SPACE(sql) while (*sql != '\0'){if(*sql == SPACE) sql++;else break;}
// comma ,
@@ -63,12 +62,11 @@ for (int i = 1; i < keyLen; ++i) { \
#define TS "_ts"
#define TS_LEN 3
-#define VALUE "value"
-#define VALUE_LEN 5
+#define VALUE "_value"
+#define VALUE_LEN 6
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
-#define CHAR_SAVE_LENGTH 8
//=================================================================================================
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
@@ -253,12 +251,20 @@ static int32_t smlGenerateSchemaAction(SSchema* colField, SHashObj* colHash, SSm
return 0;
}
+static int32_t smlFindNearestPowerOf2(int32_t length){
+ int32_t result = 1;
+ while(result <= length){
+ result *= 2;
+ }
+ return result;
+}
+
static int32_t smlBuildColumnDescription(SSmlKv* field, char* buf, int32_t bufSize, int32_t* outBytes) {
uint8_t type = field->type;
char tname[TSDB_TABLE_NAME_LEN] = {0};
memcpy(tname, field->key, field->keyLen);
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- int32_t bytes = field->length > CHAR_SAVE_LENGTH ? (2*field->length) : CHAR_SAVE_LENGTH;
+ int32_t bytes = smlFindNearestPowerOf2(field->length);
int out = snprintf(buf, bufSize, "`%s` %s(%d)",
tname, tDataTypes[field->type].name, bytes);
*outBytes = out;
@@ -273,8 +279,8 @@ static int32_t smlBuildColumnDescription(SSmlKv* field, char* buf, int32_t bufSi
static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
int32_t code = 0;
int32_t outBytes = 0;
- char *result = (char *)taosMemoryCalloc(1, tsMaxSQLStringLen+1);
- int32_t capacity = tsMaxSQLStringLen + 1;
+ char *result = (char *)taosMemoryCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
+ int32_t capacity = TSDB_MAX_ALLOWED_SQL_LEN;
uDebug("SML:0x%"PRIx64" apply schema action. action: %d", info->id, action->action);
switch (action->action) {
@@ -398,7 +404,7 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) {
}
if(taosArrayGetSize(cols) == 0){
outBytes = snprintf(pos, freeBytes,"`%s` %s(%d)",
- tsSmlTagName, tDataTypes[TSDB_DATA_TYPE_NCHAR].name, CHAR_SAVE_LENGTH);
+ tsSmlTagName, tDataTypes[TSDB_DATA_TYPE_NCHAR].name, 1);
pos += outBytes; freeBytes -= outBytes;
*pos = ','; ++pos; --freeBytes;
}
@@ -508,6 +514,11 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
if (code != TSDB_CODE_SUCCESS) {
return code;
}
+
+ code = catalogRefreshTableMeta(info->pCatalog, info->taos->pAppInfo->pTransporter, &ep, &pName, -1);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
} else {
uError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code));
return code;
@@ -1113,7 +1124,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable
}
static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, char *childTableName, bool isTag, SHashObj *dumplicateKey, SSmlMsgBuf *msg){
- if(isTag && len == 0){
+ if(len == 0){
return TSDB_CODE_SUCCESS;
}
@@ -2307,6 +2318,28 @@ cleanup:
return code;
}
+static int32_t isSchemalessDb(SSmlHandle* info){
+ SName name;
+ tNameSetDbName(&name, info->taos->acctId, info->taos->db, strlen(info->taos->db));
+ char dbFname[TSDB_DB_FNAME_LEN] = {0};
+ tNameGetFullDbName(&name, dbFname);
+ SDbCfgInfo pInfo = {0};
+ SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
+
+ int32_t code = catalogGetDBCfg(info->pCatalog, info->taos->pAppInfo->pTransporter, &ep, dbFname, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ info->pRequest->code = code;
+ smlBuildInvalidDataMsg(&info->msgBuf, "catalogGetDBCfg error, code:", tstrerror(code));
+ return code;
+ }
+ if (!pInfo.schemaless){
+ info->pRequest->code = TSDB_CODE_SML_INVALID_DB_CONF;
+ smlBuildInvalidDataMsg(&info->msgBuf, "can not insert into schemaless db:", dbFname);
+ return TSDB_CODE_SML_INVALID_DB_CONF;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
/**
* taos_schemaless_insert() parse and insert data points into database according to
* different protocol.
@@ -2340,6 +2373,19 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
return (TAOS_RES*)request;
}
+ info->taos->schemalessType = 1;
+ if(request->pDb == NULL){
+ request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ smlBuildInvalidDataMsg(&info->msgBuf, "Database not specified", NULL);
+ goto end;
+ }
+
+ if(isSchemalessDb(info) != TSDB_CODE_SUCCESS){
+ request->code = TSDB_CODE_SML_INVALID_DB_CONF;
+ smlBuildInvalidDataMsg(&info->msgBuf, "Cannot write data to a non schemaless database", NULL);
+ goto end;
+ }
+
if (!lines) {
request->code = TSDB_CODE_SML_INVALID_DATA;
smlBuildInvalidDataMsg(&info->msgBuf, "lines is null", NULL);
@@ -2361,6 +2407,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
info->pRequest->code = smlProcess(info, lines, numLines);
end:
+ uDebug("result:%s", info->msgBuf.buf);
smlDestroyInfo(info);
return (TAOS_RES*)request;
}
diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c
index 17468584822361d8a3f7ff048cb797a8174b1836..3adb3684da1164363a1ffda4c26130643efc5f78 100644
--- a/source/client/src/clientStmt.c
+++ b/source/client/src/clientStmt.c
@@ -17,7 +17,7 @@ int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
}
break;
case STMT_SETTAGS:
- if (STMT_STATUS_NE(SETTBNAME)) {
+ if (STMT_STATUS_NE(SETTBNAME) && STMT_STATUS_NE(FETCH_FIELDS)) {
code = TSDB_CODE_TSC_STMT_API_ERROR;
}
break;
@@ -48,7 +48,8 @@ int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
break;
case STMT_EXECUTE:
if (STMT_TYPE_QUERY == pStmt->sql.type) {
- if (STMT_STATUS_NE(ADD_BATCH) && STMT_STATUS_NE(FETCH_FIELDS) && STMT_STATUS_NE(BIND) && STMT_STATUS_NE(BIND_COL)) {
+ if (STMT_STATUS_NE(ADD_BATCH) && STMT_STATUS_NE(FETCH_FIELDS) && STMT_STATUS_NE(BIND) &&
+ STMT_STATUS_NE(BIND_COL)) {
code = TSDB_CODE_TSC_STMT_API_ERROR;
}
} else {
@@ -230,22 +231,6 @@ int32_t stmtParseSql(STscStmt* pStmt) {
pStmt->sql.type = STMT_TYPE_QUERY;
}
-/*
- switch (nodeType(pStmt->sql.pQuery->pRoot)) {
- case QUERY_NODE_VNODE_MODIF_STMT:
- if (0 == pStmt->sql.type) {
- pStmt->sql.type = STMT_TYPE_INSERT;
- }
- break;
- case QUERY_NODE_SELECT_STMT:
- pStmt->sql.type = STMT_TYPE_QUERY;
- break;
- default:
- tscError("not supported stmt type %d", nodeType(pStmt->sql.pQuery->pRoot));
- STMT_ERR_RET(TSDB_CODE_TSC_STMT_CLAUSE_ERROR);
- }
-*/
-
return TSDB_CODE_SUCCESS;
}
@@ -555,6 +540,8 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
if (pStmt->bInfo.needParse) {
strncpy(pStmt->bInfo.tbName, tbName, sizeof(pStmt->bInfo.tbName) - 1);
pStmt->bInfo.tbName[sizeof(pStmt->bInfo.tbName) - 1] = 0;
+
+ STMT_ERR_RET(stmtParseSql(pStmt));
}
return TSDB_CODE_SUCCESS;
@@ -565,10 +552,6 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTAGS));
- if (pStmt->bInfo.needParse) {
- STMT_ERR_RET(stmtParseSql(pStmt));
- }
-
if (pStmt->bInfo.inExecCache) {
return TSDB_CODE_SUCCESS;
}
@@ -586,7 +569,7 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
return TSDB_CODE_SUCCESS;
}
-int32_t stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fields) {
+int stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) {
if (STMT_TYPE_QUERY == pStmt->sql.type) {
tscError("invalid operation to get query tag fileds");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
@@ -604,7 +587,7 @@ int32_t stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fiel
return TSDB_CODE_SUCCESS;
}
-int32_t stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fields) {
+int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) {
if (STMT_TYPE_QUERY == pStmt->sql.type) {
tscError("invalid operation to get query column fileds");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
@@ -823,7 +806,7 @@ _return:
code = stmtUpdateTableUid(pStmt, pRsp);
}
}
-
+
tFreeSSubmitRsp(pRsp);
++pStmt->sql.runTimes;
@@ -861,12 +844,77 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) {
if (pStmt->sql.type) {
*insert = (STMT_TYPE_INSERT == pStmt->sql.type || STMT_TYPE_MULTI_INSERT == pStmt->sql.type);
} else {
- *insert = isInsertSql(pStmt->sql.sqlStr, 0);
+ *insert = qIsInsertSql(pStmt->sql.sqlStr, 0);
}
return TSDB_CODE_SUCCESS;
}
+int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (STMT_TYPE_QUERY == pStmt->sql.type) {
+ STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
+ }
+
+ STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
+
+ if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
+ STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
+ pStmt->bInfo.needParse = false;
+ }
+
+ if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
+ taos_free_result(pStmt->exec.pRequest);
+ pStmt->exec.pRequest = NULL;
+ }
+
+ if (NULL == pStmt->exec.pRequest) {
+ STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest));
+ }
+
+ if (pStmt->bInfo.needParse) {
+ STMT_ERR_RET(stmtParseSql(pStmt));
+ }
+
+ STMT_ERR_RET(stmtFetchTagFields(stmt, nums, fields));
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (STMT_TYPE_QUERY == pStmt->sql.type) {
+ STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
+ }
+
+ STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
+
+ if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
+ STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
+ pStmt->bInfo.needParse = false;
+ }
+
+ if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
+ taos_free_result(pStmt->exec.pRequest);
+ pStmt->exec.pRequest = NULL;
+ }
+
+ if (NULL == pStmt->exec.pRequest) {
+ STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest));
+ }
+
+ if (pStmt->bInfo.needParse) {
+ STMT_ERR_RET(stmtParseSql(pStmt));
+ }
+
+ STMT_ERR_RET(stmtFetchColFields(stmt, nums, fields));
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
STscStmt* pStmt = (STscStmt*)stmt;
@@ -899,6 +947,50 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
return TSDB_CODE_SUCCESS;
}
+int stmtGetParam(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (STMT_TYPE_QUERY == pStmt->sql.type) {
+ STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
+ }
+
+ STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
+
+ if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
+ STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
+ pStmt->bInfo.needParse = false;
+ }
+
+ if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
+ taos_free_result(pStmt->exec.pRequest);
+ pStmt->exec.pRequest = NULL;
+ }
+
+ if (NULL == pStmt->exec.pRequest) {
+ STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest));
+ }
+
+ if (pStmt->bInfo.needParse) {
+ STMT_ERR_RET(stmtParseSql(pStmt));
+ }
+
+ int32_t nums = 0;
+ TAOS_FIELD_E *pField = NULL;
+ STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField));
+ if (idx >= nums) {
+ tscError("idx %d is too big", idx);
+ taosMemoryFree(pField);
+ STMT_ERR_RET(TSDB_CODE_INVALID_PARA);
+ }
+
+ *type = pField[idx].type;
+ *bytes = pField[idx].bytes;
+
+ taosMemoryFree(pField);
+
+ return TSDB_CODE_SUCCESS;
+}
+
TAOS_RES* stmtUseResult(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index dfa56f80c457783eb58255f3a0d494936b475bad..c2170631c2c90ca1d7322a4210f7763c6a703c57 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -143,6 +143,7 @@ typedef struct {
typedef struct {
// subscribe info
char* topicName;
+ char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray
@@ -1039,6 +1040,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
topic.schema = pTopicEp->schema;
taosHashClear(pHash);
topic.topicName = strdup(pTopicEp->topic);
+ tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer %ld update topic: %s", tmq->consumerId, topic.topicName);
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
@@ -1243,7 +1245,7 @@ tmq_resp_err_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
return TMQ_RESP_ERR__FAIL;
}
-SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic* pTopic, SMqClientVg* pVg) {
+SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
int64_t reqOffset;
if (pVg->currentOffset >= 0) {
reqOffset = pVg->currentOffset;
@@ -1269,7 +1271,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic*
strcpy(pReq->subKey + tlen + 1, pTopic->topicName);
pReq->withTbName = tmq->withTbName;
- pReq->waitTime = waitTime;
+ pReq->timeout = timeout;
pReq->consumerId = tmq->consumerId;
pReq->epoch = tmq->epoch;
pReq->currentOffset = reqOffset;
@@ -1283,7 +1285,8 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic*
SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
SMqRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqRspObj));
pRspObj->resType = RES_TYPE__TMQ;
- strncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
pRspObj->vgId = pWrapper->vgHandle->vgId;
pRspObj->resIter = -1;
memcpy(&pRspObj->rsp, &pWrapper->msg, sizeof(SMqDataBlkRsp));
@@ -1297,7 +1300,7 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
-int32_t tmqPollImpl(tmq_t* tmq, int64_t waitTime) {
+int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*printf("call poll\n");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
@@ -1318,7 +1321,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t waitTime) {
#endif
}
atomic_store_32(&pVg->vgSkipCnt, 0);
- SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, waitTime, pTopic, pVg);
+ SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, timeout, pTopic, pVg);
if (pReq == NULL) {
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
tsem_post(&tmq->rspSem);
@@ -1388,7 +1391,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset)
return 0;
}
-SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t waitTime, bool pollIfReset) {
+SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
while (1) {
SMqRspWrapper* rspWrapper = NULL;
taosGetQitem(tmq->qall, (void**)&rspWrapper);
@@ -1428,17 +1431,17 @@ SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t waitTime, bool pollIfReset) {
taosFreeQitem(rspWrapper);
if (pollIfReset && reset) {
tscDebug("consumer %ld reset and repoll", tmq->consumerId);
- tmqPollImpl(tmq, waitTime);
+ tmqPollImpl(tmq, timeout);
}
}
}
}
-TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
+TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
SMqRspObj* rspObj;
int64_t startTime = taosGetTimestampMs();
- rspObj = tmqHandleAllRsp(tmq, wait_time, false);
+ rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
return (TAOS_RES*)rspObj;
}
@@ -1450,16 +1453,16 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
while (1) {
tmqHandleAllDelayedTask(tmq);
- if (tmqPollImpl(tmq, wait_time) < 0) return NULL;
+ if (tmqPollImpl(tmq, timeout) < 0) return NULL;
- rspObj = tmqHandleAllRsp(tmq, wait_time, false);
+ rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
return (TAOS_RES*)rspObj;
}
- if (wait_time != 0) {
+ if (timeout != 0) {
int64_t endTime = taosGetTimestampMs();
int64_t leftTime = endTime - startTime;
- if (leftTime > wait_time) {
+ if (leftTime > timeout) {
tscDebug("consumer %ld (epoch %d) timeout, no rsp", tmq->consumerId, tmq->epoch);
return NULL;
}
@@ -1474,10 +1477,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) {
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
tmq_resp_err_t rsp = tmq_commit_sync(tmq, NULL);
- if (rsp == TMQ_RESP_ERR__SUCCESS) {
- // TODO: free resources
- return TMQ_RESP_ERR__SUCCESS;
- } else {
+ if (rsp == TMQ_RESP_ERR__FAIL) {
return TMQ_RESP_ERR__FAIL;
}
@@ -1485,10 +1485,7 @@ tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) {
rsp = tmq_subscribe(tmq, lst);
tmq_list_destroy(lst);
- if (rsp == TMQ_RESP_ERR__SUCCESS) {
- // TODO: free resources
- return TMQ_RESP_ERR__SUCCESS;
- } else {
+ if (rsp == TMQ_RESP_ERR__FAIL) {
return TMQ_RESP_ERR__FAIL;
}
}
@@ -1512,6 +1509,15 @@ const char* tmq_get_topic_name(TAOS_RES* res) {
}
}
+const char* tmq_get_db_name(TAOS_RES* res) {
+ if (TD_RES_TMQ(res)) {
+ SMqRspObj* pRspObj = (SMqRspObj*)res;
+ return strchr(pRspObj->db, '.') + 1;
+ } else {
+ return NULL;
+ }
+}
+
int32_t tmq_get_vgroup_id(TAOS_RES* res) {
if (TD_RES_TMQ(res)) {
SMqRspObj* pRspObj = (SMqRspObj*)res;
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index d67a361c21777e0dd164f4cdf89bd90145968bf8..914e5aefc2e16595e3c8831f4255bdb26c4738a9 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -606,7 +606,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
- for(int32_t i = 0; i < 100000; i += 20) {
+ for(int32_t i = 0; i < 1000000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@@ -626,23 +626,23 @@ TEST(testCase, projection_query_tables) {
printf("start to insert next table\n");
- for(int32_t i = 0; i < 100000; i += 20) {
- char sql[1024] = {0};
- sprintf(sql,
- "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
- "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
- "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
- "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
- i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7,
- i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14,
- i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
- TAOS_RES* p = taos_query(pConn, sql);
- if (taos_errno(p) != 0) {
- printf("failed to insert data, reason:%s\n", taos_errstr(p));
- }
-
- taos_free_result(p);
- }
+// for(int32_t i = 0; i < 1000000; i += 20) {
+// char sql[1024] = {0};
+// sprintf(sql,
+// "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
+// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
+// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
+// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
+// i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7,
+// i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14,
+// i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
+// TAOS_RES* p = taos_query(pConn, sql);
+// if (taos_errno(p) != 0) {
+// printf("failed to insert data, reason:%s\n", taos_errstr(p));
+// }
+//
+// taos_free_result(p);
+// }
// pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) {
@@ -664,7 +664,7 @@ TEST(testCase, projection_query_tables) {
// taos_free_result(pRes);
taos_close(pConn);
}
-
+#if 0
TEST(testCase, projection_query_stables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@@ -705,7 +705,7 @@ TEST(testCase, agg_query_tables) {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "select tbname from st1");
+ pRes = taos_query(pConn, "explain analyze select count(*) from tu interval(1s)");
if (taos_errno(pRes) != 0) {
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);
@@ -733,5 +733,6 @@ TEST(testCase, agg_query_tables) {
taos_free_result(pRes);
taos_close(pConn);
}
+#endif
#pragma GCC diagnostic pop
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index 217699e36071e1e4c5e93e391e77a95c4f857af8..6dc8088cd1ab9470fefe35666fa186b5acd5f3f6 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -1258,4 +1258,48 @@ TEST(testCase, sml_TD15742_Test) {
destroyRequest(request);
smlDestroyInfo(info);
-}
\ No newline at end of file
+}
+
+TEST(testCase, sml_params_Test) {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ ASSERT_NE(taos, nullptr);
+
+ TAOS_RES* pRes = taos_query(taos, "create database if not exists param");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "test_ms,t0=t c0=f 1626006833641",
+ };
+ TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+ ASSERT_EQ(taos_errno(res), TSDB_CODE_PAR_DB_NOT_SPECIFIED);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use param");
+ taos_free_result(pRes);
+
+ res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+ ASSERT_EQ(taos_errno(res), TSDB_CODE_SML_INVALID_DB_CONF);
+ taos_free_result(pRes);
+}
+
+TEST(testCase, sml_oom_Test) {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ ASSERT_NE(taos, nullptr);
+
+ TAOS_RES* pRes = taos_query(taos, "create database if not exists oom schemaless 1");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ //"test_ms,t0=t c0=f 1626006833641",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pgxbrbga\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gviggpmi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cexkarjn\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rzwwuoxu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xphrlkey\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"llsawebj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jwpkipff\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"euzzhcvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jumhnsvw\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnetgdhj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vrmmpgqe\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lnpfjapr\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gvbhmsfr\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kydxrxwc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pfyarryq\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uxptotap\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prolhudh\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ttxaxnac\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dfgvmjmz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bloextkn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dvjxwzsi\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aigjomaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"refbidtf\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vuanlfpz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nbpajxkx\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ktzzauxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prcwdjct\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmbhvjtp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"liuddtuz\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pddsktow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"algldlvl\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mlmnjgdl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"oiynpcog\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wmynbagb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"asvyulrm\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ohaacrkp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ytyejhiq\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bbznuerb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lpebcibw\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xmqrbafv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lnmwpdne\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jpcsjqun\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mmxqmavz\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hhsbgaow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uwogyuud\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ytxpaxnk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"wouwdvtt\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitwikkh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lgyzuyaq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bdtiigxi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qpnsvdhw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pjxihgvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ksxkfetn\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ocukufqs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qzerxmpe\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"qwcfdyxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jldrpmmd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lucxlfzc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcewrvya\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dknvaphs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nxtxgzdr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mbvuugwz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uikakffu\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mwmtqsma\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bfcxrrpa\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ksajygdj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vmhhszyv\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"urwjgvut\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jrvytcxy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"evqkzygh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zitdznhg\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tpqekrxa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yrrbgjtk\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnphiuyq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"huknehjn\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iudbxfke\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fjmolwbn\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gukzgcjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bjvdtlgq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phxnesxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qgpgckvc\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yechqtfa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pbouxywy\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kxtuojyo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"txaniwlj\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixgufrj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okzvalwq\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitawgbn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gayvmird\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dprkfjph\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kmuccshq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vkslsdsd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dukccdqk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"leztxmqf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kltixbwz\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xqhkweef\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"idxsimvz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbruvcpk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uxandqkd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dsiosysh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kxuyanpp\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wkrktags\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yvizzpiv\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ddnefben\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"novmfmbc\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnusxsfu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ouerfjap\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sigognkf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slvzhede\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bknerect\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"tmhcdfjb\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hpnoanpp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okmhelnc\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcernjin\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jdmiismg\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tmnqozrf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zgwrftkx\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyamlwwh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nuedqcro\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lpsvyqaa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mneitsul\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vpleinwb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njxuaedy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sdgxpqmu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yjirrebp\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ikqndzfj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ghnfdxhr\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hrwczpvo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nattumpb\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zoyfzazn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rdwemofy\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phkgsjeg\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyhvvjrt\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zfslyton\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bxwjzeri\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uovzzgjv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cfjmacvr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jefqgzqx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njrksxmr\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mhvabvgn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kfekjltr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lexfaaby\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zbblsmwq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"oqcombkx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcdmhzyw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"otksuean\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"itbdvowq\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tswtmhex\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xoukkzid\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"guangmpq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rayxzuky\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lspwucrv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pdprzzkf\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sddqrtza\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kabndgkx\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aglnqqxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fiwpzmdr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hxctooen\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pckjpwyh\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ivmvsbai\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"eljdclst\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rwgdctie\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zlnthxoz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ljtxelle\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"llfggdpy\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tvnridze\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"hxjpgube\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zmldmquq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bggqwcoj\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"drksfofm\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jcsixens\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"cdwnwhaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nngpumuq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hylgooci\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cozeyjys\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lcgpfcsa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qdtzhtyd\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"txpubynb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gbslzbtu\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"buihcpcl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ayqezaiq\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zgkgtilj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bcjopqif\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mfzxiaqt\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xmnlqxoj\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"reyiklyf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xssuomhk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"liazkjll\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nigjlblo\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmojyznk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dotkbvrz\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kuwdyydw\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slsfqydw\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyironhd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pktwfhzi\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xybavsvh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyrxemvx\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tlfihwjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"neumakmg\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wxqingoa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ };
+ pRes = taos_query(taos, "use oom");
+ taos_free_result(pRes);
+
+ TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 100, TSDB_SML_LINE_PROTOCOL, 0);
+ ASSERT_EQ(taos_errno(res), 0);
+ taos_free_result(pRes);
+}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 9fe7645e2b2c5dab0f2f588013269be53a6756f1..948b50c01a6a0d583fdea5b0d412e9e8440092cb 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -36,7 +36,7 @@ static const SSysDbTableSchema mnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
@@ -91,6 +91,8 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
+
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
};
@@ -124,13 +126,17 @@ static const SSysDbTableSchema userStbsSchema[] = {
{.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
-static const SSysDbTableSchema userStreamsSchema[] = {
+static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR},
-};
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
+ {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ };
static const SSysDbTableSchema userTblsSchema[] = {
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
@@ -197,12 +203,14 @@ static const SSysDbTableSchema vgroupsSchema[] = {
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
};
static const SSysDbTableSchema smaSchema[] = {
{.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysDbTableSchema transSchema[] = {
@@ -210,7 +218,6 @@ static const SSysDbTableSchema transSchema[] = {
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
@@ -233,7 +240,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
- {TSDB_INS_TABLE_USER_STREAMS, userStreamsSchema, tListLen(userStreamsSchema)},
+ {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
{TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
{TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)},
@@ -306,17 +313,7 @@ static const SSysDbTableSchema querySchema[] = {
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
-static const SSysDbTableSchema streamSchema[] = {
- {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
-};
+
static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 51bcd05ea12a32eb9ac94d1aadea72c37ab531bb..f77b823f3c7d8a9e3f62e98e0f967f9d66ad83d3 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -116,22 +116,23 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con
int32_t type = pColumnInfoData->info.type;
if (IS_VAR_DATA_TYPE(type)) {
- int32_t dataLen = varDataTLen(pData);
+ int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
if (*pData == TSDB_DATA_TYPE_NULL) {
- dataLen = 0;
+ dataLen = CHAR_BYTES;
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
- dataLen = varDataTLen(pData + CHAR_BYTES);
+ dataLen = varDataTLen(pData + CHAR_BYTES) + CHAR_BYTES;
} else if (*pData == TSDB_DATA_TYPE_DOUBLE) {
- dataLen = DOUBLE_BYTES;
+ dataLen = DOUBLE_BYTES + CHAR_BYTES;
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
- dataLen = CHAR_BYTES;
- } else if (*pData == TSDB_DATA_TYPE_JSON) {
- dataLen = kvRowLen(pData + CHAR_BYTES);
+ dataLen = CHAR_BYTES + CHAR_BYTES;
+ } else if (*pData == TD_TAG_JSON) { // json string
+ dataLen = ((STag*)(pData))->len;
} else {
ASSERT(0);
}
- dataLen += CHAR_BYTES;
+ }else {
+ dataLen = varDataTLen(pData);
}
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
@@ -275,8 +276,10 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, in
doBitmapMerge(pColumnInfoData, numOfRow1, pSource, numOfRow2);
- int32_t offset = pColumnInfoData->info.bytes * numOfRow1;
- memcpy(pColumnInfoData->pData + offset, pSource->pData, pSource->info.bytes * numOfRow2);
+ if (pSource->pData) {
+ int32_t offset = pColumnInfoData->info.bytes * numOfRow1;
+ memcpy(pColumnInfoData->pData + offset, pSource->pData, pSource->info.bytes * numOfRow2);
+ }
}
return numOfRow1 + numOfRow2;
@@ -319,14 +322,16 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
pColumnInfoData->nullbitmap = tmp;
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
- int32_t newSize = numOfRows * pColumnInfoData->info.bytes;
- tmp = taosMemoryRealloc(pColumnInfoData->pData, newSize);
- if (tmp == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ if (pSource->pData) {
+ int32_t newSize = numOfRows * pColumnInfoData->info.bytes;
+ tmp = taosMemoryRealloc(pColumnInfoData->pData, newSize);
+ if (tmp == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
- pColumnInfoData->pData = tmp;
- memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
+ pColumnInfoData->pData = tmp;
+ memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
+ }
}
pColumnInfoData->hasNull = pSource->hasNull;
@@ -350,30 +355,29 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex)
return -1;
}
- int32_t index = (tsColumnIndex == -1)? 0:tsColumnIndex;
+ int32_t index = (tsColumnIndex == -1) ? 0 : tsColumnIndex;
+
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, index);
if (pColInfoData->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
return 0;
}
- pDataBlock->info.window.skey = *(TSKEY*)colDataGetData(pColInfoData, 0);
- pDataBlock->info.window.ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1));
+ TSKEY skey = *(TSKEY*)colDataGetData(pColInfoData, 0);
+ TSKEY ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1));
+
+ pDataBlock->info.window.skey = TMIN(skey, ekey);
+ pDataBlock->info.window.ekey = TMAX(skey, ekey);
+
return 0;
}
-// if pIndexMap = NULL, merger one column by on column
-int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap) {
+int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) {
assert(pSrc != NULL && pDest != NULL);
int32_t capacity = pDest->info.capacity;
for (int32_t i = 0; i < pDest->info.numOfCols; ++i) {
- int32_t mapIndex = i;
- // if (pIndexMap) {
- // mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i);
- // }
-
SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
- SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, mapIndex);
+ SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i);
capacity = pDest->info.capacity;
colDataMergeCol(pCol2, pDest->info.rows, &capacity, pCol1, pSrc->info.rows);
@@ -605,14 +609,15 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
}
int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) {
- pBlock->info.rows = *(int32_t*) buf;
- pBlock->info.groupId = *(uint64_t*) (buf + sizeof(int32_t));
+ pBlock->info.rows = *(int32_t*)buf;
+ pBlock->info.groupId = *(uint64_t*)(buf + sizeof(int32_t));
int32_t numOfCols = pBlock->info.numOfCols;
const char* pStart = buf + sizeof(uint32_t) + sizeof(uint64_t);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
+ pCol->hasNull = true;
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
size_t metaSize = capacity * sizeof(int32_t);
@@ -675,7 +680,7 @@ size_t blockDataGetSerialMetaSize(const SSDataBlock* pBlock) {
return sizeof(int32_t) + sizeof(uint64_t) + pBlock->info.numOfCols * sizeof(int32_t);
}
-double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
+double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
ASSERT(pBlock != NULL);
double rowSize = 0;
@@ -1155,7 +1160,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumn->info.type)) {
pColumn->varmeta.length = 0;
} else {
- memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
+ if (pColumn->nullbitmap != NULL) {
+ memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
+ }
}
}
@@ -1238,7 +1245,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
// the true value must be less than the value of nRows
int32_t additional = 0;
- for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
additional += nRows * sizeof(int32_t);
@@ -1248,7 +1255,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
int32_t newRows = (payloadSize - additional) / rowSize;
- ASSERT(newRows <= nRows && newRows > 1);
+ ASSERT(newRows <= nRows && newRows >= 1);
return newRows;
}
@@ -1271,29 +1278,43 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
memmove(nullBitmap, nullBitmap + n / 8, newLen);
} else {
int32_t tail = n % 8;
- int32_t i = 0;
-
+ int32_t i = 0;
uint8_t* p = (uint8_t*)nullBitmap;
- while (i < len) {
- uint8_t v = p[i];
- p[i] = 0;
- p[i] = (v << tail);
+ if (n < 8) {
+ while (i < len) {
+ uint8_t v = p[i]; // source bitmap value
+ p[i] = (v << tail);
- if (i < len - 1) {
- uint8_t next = p[i + 1];
- p[i] |= (next >> (8 - tail));
+ if (i < len - 1) {
+ uint8_t next = p[i + 1];
+ p[i] |= (next >> (8 - tail));
+ }
+
+ i += 1;
}
+ } else if (n > 8) {
+ int32_t gap = len - newLen;
+ while(i < newLen) {
+ uint8_t v = p[i + gap];
+ p[i] = (v << tail);
+
+ if (i < newLen - 1) {
+ uint8_t next = p[i + gap + 1];
+ p[i] |= (next >> (8 - tail));
+ }
- i += 1;
+ i += 1;
+ }
}
}
}
+
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
- memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n));
- memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n);
+ memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
+ memset(&pColInfoData->varmeta.offset[total - n], 0, n);
} else {
int32_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes);
@@ -1462,7 +1483,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowData(const SArray* dataBlocks) {
- char pBuf[128];
+ char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
@@ -1510,14 +1531,11 @@ void blockDebugShowData(const SArray* dataBlocks) {
* @param pReq
* @param pDataBlocks
* @param vgId
- * @param uid set as parameter temporarily // TODO: remove this parameter, and the executor should set uid in
- * SDataBlock->info.uid
* @param suid // TODO: check with Liao whether suid response is reasonable
*
* TODO: colId should be set
*/
-int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
- tb_uid_t uid, tb_uid_t suid) {
+int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) {
int32_t sz = taosArrayGetSize(pDataBlocks);
int32_t bufSize = sizeof(SSubmitReq);
for (int32_t i = 0; i < sz; ++i) {
@@ -1553,7 +1571,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
SSubmitBlk* pSubmitBlk = POINTER_SHIFT(pDataBuf, msgLen);
pSubmitBlk->suid = suid;
- pSubmitBlk->uid = uid;
+ pSubmitBlk->uid = pDataBlock->info.groupId;
pSubmitBlk->numOfRows = rows;
++numOfBlks;
@@ -1564,6 +1582,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf
printf("|");
bool isStartKey = false;
+ int32_t offset = 0;
for (int32_t k = 0; k < colNum; ++k) { // iterate by column
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
@@ -1572,18 +1591,18 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
if (!isStartKey) {
isStartKey = true;
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true,
- 0, 0);
+ offset, k);
+
} else {
- tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, 8, k);
- break;
+ tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, offset, k);
}
break;
case TSDB_DATA_TYPE_NCHAR: {
- tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, 8, k);
+ tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, offset, k);
break;
}
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
- tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, 8, k);
+ tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, offset, k);
break;
}
case TSDB_DATA_TYPE_VARBINARY:
@@ -1595,13 +1614,14 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
break;
default:
if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) {
- tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, 8, k);
+ tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, offset, k);
} else {
printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
TASSERT(0);
}
break;
}
+ offset += TYPE_BYTES[pColInfoData->info.type];
}
dataLen += TD_ROW_LEN(rb.pBuf);
}
@@ -1632,8 +1652,13 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
}
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid,
- int32_t vgId) {
+ const char* stbFullName, int32_t vgId) {
SSubmitReq* ret = NULL;
+ SArray* tagArray = taosArrayInit(1, sizeof(STagVal));
+ if(!tagArray) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
// cal size
int32_t cap = sizeof(SSubmitReq);
@@ -1648,22 +1673,40 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
if (createTb) {
SVCreateTbReq createTbReq = {0};
- createTbReq.name = "a";
+ char* cname = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
+ snprintf(cname, TSDB_TABLE_FNAME_LEN, "%s:%ld", stbFullName, pDataBlock->info.groupId);
+ createTbReq.name = cname;
createTbReq.flags = 0;
createTbReq.type = TSDB_CHILD_TABLE;
- createTbReq.ctb.suid = htobe64(suid);
+ createTbReq.ctb.suid = suid;
- SKVRowBuilder kvRowBuilder = {0};
- if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
- ASSERT(0);
+
+
+ STagVal tagVal = {.cid = 1,
+ .type = TSDB_DATA_TYPE_UBIGINT,
+ .pData = (uint8_t*)&pDataBlock->info.groupId,
+ .nData = sizeof(uint64_t)};
+ STag* pTag = NULL;
+ taosArrayClear(tagArray);
+ taosArrayPush(tagArray, &tagVal);
+ tTagNew(tagArray, 1, false, &pTag);
+ if (!pTag) {
+ tdDestroySVCreateTbReq(&createTbReq);
+ taosArrayDestroy(tagArray);
+ return NULL;
}
- tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t));
- createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder);
- tdDestroyKVRowBuilder(&kvRowBuilder);
+ createTbReq.ctb.pTag = (uint8_t*)pTag;
int32_t code;
tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code);
- if (code < 0) return NULL;
+
+ tdDestroySVCreateTbReq(&createTbReq);
+
+ if (code < 0) {
+ tdDestroySVCreateTbReq(&createTbReq);
+ taosArrayDestroy(tagArray);
+ return NULL;
+ }
}
cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen;
@@ -1699,27 +1742,49 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
int32_t schemaLen = 0;
if (createTb) {
SVCreateTbReq createTbReq = {0};
- createTbReq.name = "a";
+ char* cname = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
+ snprintf(cname, TSDB_TABLE_FNAME_LEN, "%s:%ld", stbFullName, pDataBlock->info.groupId);
+ createTbReq.name = cname;
createTbReq.flags = 0;
createTbReq.type = TSDB_CHILD_TABLE;
createTbReq.ctb.suid = suid;
- SKVRowBuilder kvRowBuilder = {0};
- if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
- ASSERT(0);
+ STagVal tagVal = {.cid = 1,
+ .type = TSDB_DATA_TYPE_UBIGINT,
+ .pData = (uint8_t*)&pDataBlock->info.groupId,
+ .nData = sizeof(uint64_t)};
+ taosArrayClear(tagArray);
+ taosArrayPush(tagArray, &tagVal);
+ STag* pTag = NULL;
+ tTagNew(tagArray, 1, false, &pTag);
+ if (!pTag) {
+ tdDestroySVCreateTbReq(&createTbReq);
+ taosArrayDestroy(tagArray);
+ taosMemoryFreeClear(ret);
+ return NULL;
}
- tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t));
- createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder);
- tdDestroyKVRowBuilder(&kvRowBuilder);
+ createTbReq.ctb.pTag = (uint8_t*)pTag;
int32_t code;
tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code);
- if (code < 0) return NULL;
+ if (code < 0) {
+ tdDestroySVCreateTbReq(&createTbReq);
+ taosArrayDestroy(tagArray);
+ taosMemoryFreeClear(ret);
+ return NULL;
+ }
SEncoder encoder = {0};
tEncoderInit(&encoder, blockData, schemaLen);
- if (tEncodeSVCreateTbReq(&encoder, &createTbReq) < 0) return NULL;
+ code = tEncodeSVCreateTbReq(&encoder, &createTbReq);
tEncoderClear(&encoder);
+ tdDestroySVCreateTbReq(&createTbReq);
+
+ if (code < 0) {
+ taosArrayDestroy(tagArray);
+ taosMemoryFreeClear(ret);
+ return NULL;
+ }
}
blkHead->schemaLen = htonl(schemaLen);
@@ -1734,8 +1799,12 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
for (int32_t k = 0; k < pTSchema->numOfCols; k++) {
const STColumn* pColumn = &pTSchema->columns[k];
SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, k);
- void* data = colDataGetData(pColData, j);
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
+ if (colDataIsNull_s(pColData, j)) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, pColumn->offset, k);
+ } else {
+ void* data = colDataGetData(pColData, j);
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k);
+ }
}
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
@@ -1750,5 +1819,102 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
}
ret->length = htonl(ret->length);
+ taosArrayDestroy(tagArray);
return ret;
}
+
+void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) {
+ int32_t* actualLen = (int32_t*)data;
+ data += sizeof(int32_t);
+
+ uint64_t* groupId = (uint64_t*)data;
+ data += sizeof(uint64_t);
+
+ int32_t* colSizes = (int32_t*)data;
+ data += numOfCols * sizeof(int32_t);
+
+ *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t));
+
+ int32_t numOfRows = pBlock->info.rows;
+ for (int32_t col = 0; col < numOfCols; ++col) {
+ SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col);
+
+ // copy the null bitmap
+ if (IS_VAR_DATA_TYPE(pColRes->info.type)) {
+ size_t metaSize = numOfRows * sizeof(int32_t);
+ memcpy(data, pColRes->varmeta.offset, metaSize);
+ data += metaSize;
+ (*dataLen) += metaSize;
+ } else {
+ int32_t len = BitmapLen(numOfRows);
+ memcpy(data, pColRes->nullbitmap, len);
+ data += len;
+ (*dataLen) += len;
+ }
+
+ if (needCompress) {
+ colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress);
+ data += colSizes[col];
+ (*dataLen) += colSizes[col];
+ } else {
+ colSizes[col] = colDataGetLength(pColRes, numOfRows);
+ (*dataLen) += colSizes[col];
+ memmove(data, pColRes->pData, colSizes[col]);
+ data += colSizes[col];
+ }
+
+ colSizes[col] = htonl(colSizes[col]);
+ }
+
+ *actualLen = *dataLen;
+ *groupId = pBlock->info.groupId;
+}
+
+const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) {
+ blockDataEnsureCapacity(pBlock, numOfRows);
+ const char* pStart = pData;
+
+ int32_t dataLen = *(int32_t*)pStart;
+ pStart += sizeof(int32_t);
+
+ pBlock->info.groupId = *(uint64_t*)pStart;
+ pStart += sizeof(uint64_t);
+
+ int32_t* colLen = (int32_t*)pStart;
+ pStart += sizeof(int32_t) * numOfCols;
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ colLen[i] = htonl(colLen[i]);
+ ASSERT(colLen[i] >= 0);
+
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+ if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
+ pColInfoData->varmeta.length = colLen[i];
+ pColInfoData->varmeta.allocLen = colLen[i];
+
+ memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows);
+ pStart += sizeof(int32_t) * numOfRows;
+
+ if (colLen[i] > 0) {
+ taosMemoryFreeClear(pColInfoData->pData);
+ pColInfoData->pData = taosMemoryMalloc(colLen[i]);
+ }
+ } else {
+ memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows));
+ pStart += BitmapLen(numOfRows);
+ }
+
+ if (colLen[i] > 0) {
+ memcpy(pColInfoData->pData, pStart, colLen[i]);
+ }
+
+ // TODO
+ // setting this flag to true temporarily so aggregate function on stable will
+ // examine NULL value for non-primary key column
+ pColInfoData->hasNull = true;
+ pStart += colLen[i];
+ }
+
+ ASSERT(pStart - pData == dataLen);
+ return pStart;
+}
\ No newline at end of file
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index f82df0d9bc5b670f12f896406a8b3df399a52a60..65daee650840a5bea69b2c0dcb687ea76b4dc705 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -19,31 +19,15 @@
#include "tdatablock.h"
#include "tlog.h"
-typedef struct SKVIdx {
- int32_t cid;
- int32_t offset;
-} SKVIdx;
+static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson);
#pragma pack(push, 1)
typedef struct {
int16_t nCols;
- SKVIdx idx[];
+ uint8_t idx[];
} STSKVRow;
#pragma pack(pop)
-typedef struct STagIdx {
- int16_t cid;
- uint16_t offset;
-} STagIdx;
-
-#pragma pack(push, 1)
-struct STag {
- uint16_t len;
- uint16_t nTag;
- STagIdx idx[];
-};
-#pragma pack(pop)
-
#define TSROW_IS_KV_ROW(r) ((r)->flags & TSROW_KV_ROW)
#define BIT1_SIZE(n) (((n)-1) / 8 + 1)
#define BIT2_SIZE(n) (((n)-1) / 4 + 1)
@@ -54,171 +38,551 @@ struct STag {
static FORCE_INLINE int tSKVIdxCmprFn(const void *p1, const void *p2);
-// STSRow2
-int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) {
+// SValue
+static FORCE_INLINE int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type) {
int32_t n = 0;
- n += tPutI64(p ? p + n : p, pRow->ts);
- n += tPutI8(p ? p + n : p, pRow->flags);
- n += tPutI32v(p ? p + n : p, pRow->sver);
+ if (IS_VAR_DATA_TYPE(type)) {
+ n += tPutBinary(p ? p + n : p, pValue->pData, pValue->nData);
+ } else {
+ switch (type) {
+ case TSDB_DATA_TYPE_BOOL:
+ n += tPutI8(p ? p + n : p, pValue->i8 ? 1 : 0);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ n += tPutI8(p ? p + n : p, pValue->i8);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ n += tPutI16(p ? p + n : p, pValue->i16);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ n += tPutI32(p ? p + n : p, pValue->i32);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ n += tPutI64(p ? p + n : p, pValue->i64);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ n += tPutFloat(p ? p + n : p, pValue->f);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ n += tPutDouble(p ? p + n : p, pValue->d);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ n += tPutI64(p ? p + n : p, pValue->ts);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ n += tPutU8(p ? p + n : p, pValue->u8);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ n += tPutU16(p ? p + n : p, pValue->u16);
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ n += tPutU32(p ? p + n : p, pValue->u32);
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ n += tPutU64(p ? p + n : p, pValue->u64);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
- ASSERT(pRow->flags & 0xf);
+ return n;
+}
- switch (pRow->flags & 0xf) {
- case TSROW_HAS_NONE:
- case TSROW_HAS_NULL:
- break;
- default:
- n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData);
- break;
+static FORCE_INLINE int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type) {
+ int32_t n = 0;
+
+ if (IS_VAR_DATA_TYPE(type)) {
+ n += tGetBinary(p, &pValue->pData, pValue ? &pValue->nData : NULL);
+ } else {
+ switch (type) {
+ case TSDB_DATA_TYPE_BOOL:
+ n += tGetI8(p, &pValue->i8);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ n += tGetI8(p, &pValue->i8);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ n += tGetI16(p, &pValue->i16);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ n += tGetI32(p, &pValue->i32);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ n += tGetI64(p, &pValue->i64);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ n += tGetFloat(p, &pValue->f);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ n += tGetDouble(p, &pValue->d);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ n += tGetI64(p, &pValue->ts);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ n += tGetU8(p, &pValue->u8);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ n += tGetU16(p, &pValue->u16);
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ n += tGetU32(p, &pValue->u32);
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ n += tGetU64(p, &pValue->u64);
+ break;
+ default:
+ ASSERT(0);
+ }
}
return n;
}
-int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) {
- int32_t n = 0;
- uint8_t flags;
+// STSRow2 ========================================================================
+static void tTupleTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow2 *pRow) {
+ int32_t nColVal = taosArrayGetSize(pArray);
+ STColumn *pTColumn;
+ SColVal *pColVal;
+
+ ASSERT(nColVal > 0);
+
+ pRow->sver = pTSchema->version;
+
+ // ts
+ pTColumn = &pTSchema->columns[0];
+ pColVal = (SColVal *)taosArrayGet(pArray, 0);
- n += tGetI64(p + n, pRow ? &pRow->ts : NULL);
- n += tGetI8(p + n, pRow ? &pRow->flags : &flags);
- n += tGetI32v(p + n, pRow ? &pRow->sver : NULL);
+ ASSERT(pTColumn->colId == 0 && pColVal->cid == 0);
+ ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
+
+ pRow->ts = pColVal->value.ts;
+
+ // other fields
+ int32_t iColVal = 1;
+ int32_t bidx;
+ uint32_t nv = 0;
+ uint8_t *pb = NULL;
+ uint8_t *pf = NULL;
+ uint8_t *pv = NULL;
+ uint8_t flags = 0;
+ for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
+ bidx = iColumn - 1;
+ pTColumn = &pTSchema->columns[iColumn];
+
+ if (iColVal < nColVal) {
+ pColVal = (SColVal *)taosArrayGet(pArray, iColVal);
+ } else {
+ pColVal = NULL;
+ }
+
+ if (pColVal) {
+ if (pColVal->cid == pTColumn->colId) {
+ iColVal++;
+ if (pColVal->isNone) {
+ goto _set_none;
+ } else if (pColVal->isNull) {
+ goto _set_null;
+ } else {
+ goto _set_value;
+ }
+ } else if (pColVal->cid > pTColumn->colId) {
+ goto _set_none;
+ } else {
+ ASSERT(0);
+ }
+ } else {
+ goto _set_none;
+ }
- if (pRow) flags = pRow->flags;
+ _set_none:
+ flags |= TSROW_HAS_NONE;
+ // SET_BIT2(pb, bidx, 0); (todo)
+ continue;
+
+ _set_null:
+ flags != TSROW_HAS_NULL;
+ // SET_BIT2(pb, bidx, 1); (todo)
+ continue;
+
+ _set_value:
+ flags != TSROW_HAS_VAL;
+ // SET_BIT2(pb, bidx, 2); (todo)
+ if (IS_VAR_DATA_TYPE(pTColumn->type)) {
+ // nv += tPutColVal(pv ? pv + nv : pv, pColVal, pTColumn->type, 1);
+ } else {
+ // tPutColVal(pf ? pf + pTColumn->offset : pf, pColVal, pTColumn->type, 1);
+ }
+ continue;
+ }
+
+ ASSERT(flags);
switch (flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
+ pRow->nData = 0;
+ break;
+ case TSROW_HAS_VAL:
+ pRow->nData = pTSchema->flen + nv;
+ break;
+ case TSROW_HAS_NULL | TSROW_HAS_NONE:
+ pRow->nData = BIT1_SIZE(pTSchema->numOfCols - 1);
+ break;
+ case TSROW_HAS_VAL | TSROW_HAS_NONE:
+ case TSROW_HAS_VAL | TSROW_HAS_NULL:
+ pRow->nData = BIT1_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + nv;
+ break;
+ case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE:
+ pRow->nData = BIT2_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + nv;
break;
default:
- n += tGetBinary(p + n, pRow ? &pRow->pData : NULL, pRow ? &pRow->nData : NULL);
break;
}
+}
- return n;
+static void tMapTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow2 *pRow) {
+ int32_t nColVal = taosArrayGetSize(pArray);
+ STColumn *pTColumn;
+ SColVal *pColVal;
+
+ ASSERT(nColVal > 0);
+
+ pRow->sver = pTSchema->version;
+
+ // ts
+ pTColumn = &pTSchema->columns[0];
+ pColVal = (SColVal *)taosArrayGet(pArray, 0);
+
+ ASSERT(pTColumn->colId == 0 && pColVal->cid == 0);
+ ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
+
+ pRow->ts = pColVal->value.ts;
+
+ // other fields
+ int32_t iColVal = 1;
+ uint32_t nv = 0;
+ uint8_t *pv = NULL;
+ uint8_t *pidx = NULL;
+ uint8_t flags = 0;
+ int16_t nCol = 0;
+ for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
+ pTColumn = &pTSchema->columns[iColumn];
+
+ if (iColVal < nColVal) {
+ pColVal = (SColVal *)taosArrayGet(pArray, iColVal);
+ } else {
+ pColVal = NULL;
+ }
+
+ if (pColVal) {
+ if (pColVal->cid == pTColumn->colId) {
+ iColVal++;
+ if (pColVal->isNone) {
+ goto _set_none;
+ } else if (pColVal->isNull) {
+ goto _set_null;
+ } else {
+ goto _set_value;
+ }
+ } else if (pColVal->cid > pTColumn->colId) {
+ goto _set_none;
+ } else {
+ ASSERT(0);
+ }
+ } else {
+ goto _set_none;
+ }
+
+ _set_none:
+ flags |= TSROW_HAS_NONE;
+ continue;
+
+ _set_null:
+ flags != TSROW_HAS_NULL;
+ pidx[nCol++] = nv;
+ // nv += tPutColVal(pv ? pv + nv : pv, pColVal, pTColumn->type, 0);
+ continue;
+
+ _set_value:
+ flags != TSROW_HAS_VAL;
+ pidx[nCol++] = nv;
+ // nv += tPutColVal(pv ? pv + nv : pv, pColVal, pTColumn->type, 0);
+ continue;
+ }
+
+ if (nv <= UINT8_MAX) {
+ // small
+ } else if (nv <= UINT16_MAX) {
+ // mid
+ } else {
+ // large
+ }
+}
+
+// try-decide-build
+int32_t tTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) {
+ int32_t code = 0;
+ STSRow2 rowT = {0};
+ STSRow2 rowM = {0};
+
+ // try
+ tTupleTSRowNew(pArray, pTSchema, &rowT);
+ tMapTSRowNew(pArray, pTSchema, &rowM);
+
+ // decide & build
+ if (rowT.nData <= rowM.nData) {
+ tTupleTSRowNew(pArray, pTSchema, &rowT);
+ } else {
+ tMapTSRowNew(pArray, pTSchema, &rowM);
+ }
+
+ return code;
}
-int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow) {
- (*ppRow) = taosMemoryMalloc(sizeof(*pRow) + pRow->nData);
+int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) {
+ int32_t code = 0;
+
+ (*ppRow) = (STSRow2 *)taosMemoryMalloc(sizeof(**ppRow));
if (*ppRow == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
+ **ppRow = *pRow;
+ (*ppRow)->pData = NULL;
- (*ppRow)->ts = pRow->ts;
- (*ppRow)->flags = pRow->flags;
- (*ppRow)->sver = pRow->sver;
- (*ppRow)->nData = pRow->nData;
if (pRow->nData) {
- (*ppRow)->pData = (uint8_t *)(&(*ppRow)[1]);
+ (*ppRow)->pData = taosMemoryMalloc(pRow->nData);
+ if ((*ppRow)->pData == NULL) {
+ taosMemoryFree(*ppRow);
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
memcpy((*ppRow)->pData, pRow->pData, pRow->nData);
- } else {
- (*ppRow)->pData = NULL;
}
- return 0;
+_exit:
+ return code;
}
void tTSRowFree(STSRow2 *pRow) {
- if (pRow) taosMemoryFree(pRow);
+ if (pRow) {
+ if (pRow->pData) taosMemoryFree(pRow->pData);
+ taosMemoryFree(pRow);
+ }
}
-int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
- uint32_t n;
- uint8_t *p;
- uint8_t v;
- int32_t bidx = iCol - 1;
+void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
+ uint8_t isTuple = (pRow->flags & 0xf0 == 0) ? 1 : 0;
STColumn *pTColumn = &pTSchema->columns[iCol];
- STSKVRow *pTSKVRow;
- SKVIdx *pKVIdx;
+ uint8_t flags = pRow->flags & (uint8_t)0xf;
+ SValue value;
- ASSERT(iCol != 0);
- ASSERT(pTColumn->colId != 0);
+ ASSERT(iCol < pTSchema->numOfCols);
+ ASSERT(flags);
+ ASSERT(pRow->sver == pTSchema->version);
- ASSERT(pRow->flags & 0xf != 0);
- switch (pRow->flags & 0xf) {
- case TSROW_HAS_NONE:
- *pColVal = ColValNONE;
- return 0;
- case TSROW_HAS_NULL:
- *pColVal = ColValNULL;
- return 0;
+ if (iCol == 0) {
+ value.ts = pRow->ts;
+ goto _return_value;
}
- if (TSROW_IS_KV_ROW(pRow)) {
- ASSERT((pRow->flags & 0xf) != TSROW_HAS_VAL);
+ if (flags == TSROW_HAS_NONE) {
+ goto _return_none;
+ } else if (flags == TSROW_HAS_NONE) {
+ goto _return_null;
+ }
- pTSKVRow = (STSKVRow *)pRow->pData;
- pKVIdx =
- bsearch(&((SKVIdx){.cid = pTColumn->colId}), pTSKVRow->idx, pTSKVRow->nCols, sizeof(SKVIdx), tSKVIdxCmprFn);
- if (pKVIdx == NULL) {
- *pColVal = ColValNONE;
- } else if (pKVIdx->offset < 0) {
- *pColVal = ColValNULL;
- } else {
- p = pRow->pData + sizeof(STSKVRow) + sizeof(SKVIdx) * pTSKVRow->nCols + pKVIdx->offset;
- pColVal->type = COL_VAL_DATA;
- tGetBinary(p, &pColVal->pData, &pColVal->nData);
- }
- } else {
- // get bitmap
- p = pRow->pData;
- switch (pRow->flags & 0xf) {
+ ASSERT(pRow->nData && pRow->pData);
+
+ if (isTuple) {
+ uint8_t *pb = pRow->pData;
+ uint8_t *pf = NULL;
+ uint8_t *pv = NULL;
+ uint8_t *p;
+ uint8_t b;
+
+ // bit
+ switch (flags) {
+ case TSROW_HAS_VAL:
+ pf = pb;
+ break;
case TSROW_HAS_NULL | TSROW_HAS_NONE:
- v = GET_BIT1(p, bidx);
- if (v == 0) {
- *pColVal = ColValNONE;
+ b = GET_BIT1(pb, iCol - 1);
+ if (b == 0) {
+ goto _return_none;
} else {
- *pColVal = ColValNULL;
+ goto _return_null;
}
- return 0;
case TSROW_HAS_VAL | TSROW_HAS_NONE:
- v = GET_BIT1(p, bidx);
- if (v == 1) {
- p = p + BIT1_SIZE(pTSchema->numOfCols - 1);
- break;
+ b = GET_BIT1(pb, iCol - 1);
+ if (b == 0) {
+ goto _return_none;
} else {
- *pColVal = ColValNONE;
- return 0;
+ pf = pb + BIT1_SIZE(pTSchema->numOfCols - 1);
+ break;
}
case TSROW_HAS_VAL | TSROW_HAS_NULL:
- v = GET_BIT1(p, bidx);
- if (v == 1) {
- p = p + BIT1_SIZE(pTSchema->numOfCols - 1);
- break;
+ b = GET_BIT1(pb, iCol - 1);
+ if (b == 0) {
+ goto _return_null;
} else {
- *pColVal = ColValNULL;
- return 0;
+ pf = pb + BIT1_SIZE(pTSchema->numOfCols - 1);
+ break;
}
case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE:
- v = GET_BIT2(p, bidx);
- if (v == 0) {
- *pColVal = ColValNONE;
- return 0;
- } else if (v == 1) {
- *pColVal = ColValNULL;
- return 0;
- } else if (v == 2) {
- p = p + BIT2_SIZE(pTSchema->numOfCols - 1);
- break;
+ b = GET_BIT2(pb, iCol - 1);
+ if (b == 0) {
+ goto _return_none;
+ } else if (b == 1) {
+ goto _return_null;
} else {
- ASSERT(0);
+ pf = pb + BIT2_SIZE(pTSchema->numOfCols - 1);
+ break;
}
default:
- break;
+ ASSERT(0);
}
- // get real value
- p = p + pTColumn->offset;
- pColVal->type = COL_VAL_DATA;
+ ASSERT(pf);
+
+ p = pf + pTColumn->offset;
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
- tGetBinary(p + pTSchema->flen + *(int32_t *)p, &pColVal->pData, &pColVal->nData);
+ pv = pf + pTSchema->flen;
+ p = pv + *(VarDataOffsetT *)p;
+ }
+ tGetValue(p, &value, pTColumn->type);
+ goto _return_value;
+ } else {
+ STSKVRow *pRowK = (STSKVRow *)pRow->pData;
+ int16_t lidx = 0;
+ int16_t ridx = pRowK->nCols - 1;
+ uint8_t *p;
+ int16_t midx;
+ uint32_t n;
+ int16_t cid;
+
+ ASSERT(pRowK->nCols > 0);
+
+ if (pRow->flags & TSROW_KV_SMALL) {
+ p = pRow->pData + sizeof(STSKVRow) + sizeof(uint8_t) * pRowK->nCols;
+ } else if (pRow->flags & TSROW_KV_MID) {
+ p = pRow->pData + sizeof(STSKVRow) + sizeof(uint16_t) * pRowK->nCols;
+ } else if (pRow->flags & TSROW_KV_BIG) {
+ p = pRow->pData + sizeof(STSKVRow) + sizeof(uint32_t) * pRowK->nCols;
} else {
- pColVal->pData = p;
- pColVal->nData = pTColumn->bytes;
+ ASSERT(0);
+ }
+ while (lidx <= ridx) {
+ midx = (lidx + ridx) / 2;
+
+ if (pRow->flags & TSROW_KV_SMALL) {
+ n = ((uint8_t *)pRowK->idx)[midx];
+ } else if (pRow->flags & TSROW_KV_MID) {
+ n = ((uint16_t *)pRowK->idx)[midx];
+ } else {
+ n = ((uint32_t *)pRowK->idx)[midx];
+ }
+
+ n += tGetI16v(p + n, &cid);
+
+ if (TABS(cid) == pTColumn->colId) {
+ if (cid < 0) {
+ goto _return_null;
+ } else {
+ n += tGetValue(p + n, &value, pTColumn->type);
+ goto _return_value;
+ }
+
+ return;
+ } else if (TABS(cid) > pTColumn->colId) {
+ ridx = midx - 1;
+ } else {
+ lidx = midx + 1;
+ }
}
+
+ // not found, return NONE
+ goto _return_none;
}
- return 0;
+_return_none:
+ *pColVal = COL_VAL_NONE(pTColumn->colId);
+ return;
+
+_return_null:
+ *pColVal = COL_VAL_NULL(pTColumn->colId);
+ return;
+
+_return_value:
+ *pColVal = COL_VAL_VALUE(pTColumn->colId, value);
+ return;
+}
+
+int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) {
+ int32_t code = 0;
+ SColVal cv;
+
+ (*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
+ if (*ppArray == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ for (int32_t iColumn = 0; iColumn < pTSchema->numOfCols; iColumn++) {
+ tTSRowGet(pRow, pTSchema, iColumn, &cv);
+ taosArrayPush(*ppArray, &cv);
+ }
+
+_exit:
+ return code;
+}
+
+int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) {
+ int32_t n = 0;
+
+ n += tPutI64(p ? p + n : p, pRow->ts);
+ n += tPutI8(p ? p + n : p, pRow->flags);
+ n += tPutI32v(p ? p + n : p, pRow->sver);
+
+ ASSERT(pRow->flags & 0xf);
+
+ switch (pRow->flags & 0xf) {
+ case TSROW_HAS_NONE:
+ case TSROW_HAS_NULL:
+ ASSERT(pRow->nData == 0);
+ ASSERT(pRow->pData == NULL);
+ break;
+ default:
+ ASSERT(pRow->nData && pRow->pData);
+ n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData);
+ break;
+ }
+
+ return n;
+}
+
+int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) {
+ int32_t n = 0;
+
+ n += tGetI64(p + n, &pRow->ts);
+ n += tGetI8(p + n, &pRow->flags);
+ n += tGetI32v(p + n, &pRow->sver);
+
+ ASSERT(pRow->flags);
+ switch (pRow->flags & 0xf) {
+ case TSROW_HAS_NONE:
+ case TSROW_HAS_NULL:
+ pRow->nData = 0;
+ pRow->pData = NULL;
+ break;
+ default:
+ n += tGetBinary(p + n, &pRow->pData, &pRow->nData);
+ break;
+ }
+
+ return n;
}
// STSchema
@@ -262,6 +626,7 @@ void tTSchemaDestroy(STSchema *pTSchema) {
}
// STSRowBuilder
+#if 0
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema) {
if (tTSchemaCreate(sver, pSchema, nCols, &pBuilder->pTSchema) < 0) return -1;
@@ -443,7 +808,6 @@ static void setBitMap(uint8_t *p, STSchema *pTSchema, uint8_t flags) {
}
int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
int32_t nDataTP, nDataKV;
- uint32_t flags;
STSKVRow *pTSKVRow = (STSKVRow *)pBuilder->pKVBuf;
int32_t nCols = pBuilder->pTSchema->numOfCols;
@@ -457,7 +821,7 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
pBuilder->row.flags |= TSROW_HAS_NONE;
}
- ASSERT(pBuilder->row.flags & 0xf != 0);
+ ASSERT((pBuilder->row.flags & 0xf) != 0);
*(ppRow) = &pBuilder->row;
switch (pBuilder->row.flags & 0xf) {
case TSROW_HAS_NONE:
@@ -487,7 +851,7 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
if (nDataKV < nDataTP) {
// generate KV row
- ASSERT(pBuilder->row.flags & 0xf != TSROW_HAS_VAL);
+ ASSERT((pBuilder->row.flags & 0xf) != TSROW_HAS_VAL);
pBuilder->row.flags |= TSROW_KV_ROW;
pBuilder->row.nData = nDataKV;
@@ -503,12 +867,12 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
pBuilder->row.nData = nDataTP;
uint8_t *p;
- uint8_t flags = pBuilder->row.flags & 0xf;
+ uint8_t flags = (pBuilder->row.flags & 0xf);
if (flags == TSROW_HAS_VAL) {
pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2;
} else {
- if (flags == TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE) {
+ if (flags == (TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE)) {
pBuilder->row.pData = pBuilder->pTPBuf;
} else {
pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2 - pBuilder->szBitMap1;
@@ -520,94 +884,374 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
return 0;
}
+#endif
-static FORCE_INLINE int tTagIdxCmprFn(const void *p1, const void *p2) {
- STagIdx *pTagIdx1 = (STagIdx *)p1;
- STagIdx *pTagIdx2 = (STagIdx *)p2;
- if (pTagIdx1->cid < pTagIdx1->cid) {
+static int tTagValCmprFn(const void *p1, const void *p2) {
+ if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) {
return -1;
- } else if (pTagIdx1->cid > pTagIdx1->cid) {
+ } else if (((STagVal *)p1)->cid > ((STagVal *)p2)->cid) {
return 1;
}
+
return 0;
}
-int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag) {
- STagVal *pTagVal;
- uint8_t *p;
- int32_t n;
- uint16_t tsize = sizeof(STag) + sizeof(STagIdx) * nTag;
+static int tTagValJsonCmprFn(const void *p1, const void *p2) {
+ return strcmp(((STagVal *)p1)[0].pKey, ((STagVal *)p2)[0].pKey);
+}
- for (int16_t iTag = 0; iTag < nTag; iTag++) {
- pTagVal = &pTagVals[iTag];
+static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) {
+ switch (type) {
+ case TSDB_DATA_TYPE_JSON:
+ case TSDB_DATA_TYPE_VARCHAR:
+ case TSDB_DATA_TYPE_NCHAR: {
+ char tmpVal[32] = {0};
+ strncpy(tmpVal, val, vlen > 31 ? 31 : vlen);
+ printf("%s:%d type:%d vlen:%d, val:\"%s\"\n", tag, ln, (int32_t)type, vlen, tmpVal);
+ } break;
+ case TSDB_DATA_TYPE_FLOAT:
+ printf("%s:%d type:%d vlen:%d, val:%f\n", tag, ln, (int32_t)type, vlen, *(float *)val);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ printf("%s:%d type:%d vlen:%d, val:%lf\n", tag, ln, (int32_t)type, vlen, *(double *)val);
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIu8 "\n", tag, ln, (int32_t)type, vlen, *(uint8_t *)val);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi8 "\n", tag, ln, (int32_t)type, vlen, *(int8_t *)val);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi16 "\n", tag, ln, (int32_t)type, vlen, *(int16_t *)val);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi32 "\n", tag, ln, (int32_t)type, vlen, *(int32_t *)val);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi64 "\n", tag, ln, (int32_t)type, vlen, *(int64_t *)val);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi64 "\n", tag, ln, (int32_t)type, vlen, *(int64_t *)val);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIu8 "\n", tag, ln, (int32_t)type, vlen, *(uint8_t *)val);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIu16 "\n", tag, ln, (int32_t)type, vlen, *(uint16_t *)val);
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIu32 "\n", tag, ln, (int32_t)type, vlen, *(uint32_t *)val);
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIu64 "\n", tag, ln, (int32_t)type, vlen, *(uint64_t *)val);
+ break;
+ case TSDB_DATA_TYPE_NULL:
+ printf("%s:%d type:%d vlen:%d, val:%" PRIi8 "\n", tag, ln, (int32_t)type, vlen, *(int8_t *)val);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
- if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- tsize += tPutBinary(NULL, pTagVal->pData, pTagVal->nData);
+// if (isLarge) {
+// p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+// } else {
+// p = (uint8_t *)&pTag->idx[pTag->nTag];
+// }
+
+// (*ppArray) = taosArrayInit(pTag->nTag + 1, sizeof(STagVal));
+// if (*ppArray == NULL) {
+// code = TSDB_CODE_OUT_OF_MEMORY;
+// goto _err;
+// }
+
+// for (int16_t iTag = 0; iTag < pTag->nTag; iTag++) {
+// if (isLarge) {
+// offset = ((int16_t *)pTag->idx)[iTag];
+// } else {
+// offset = pTag->idx[iTag];
+// }
+
+void debugPrintSTag(STag *pTag, const char *tag, int32_t ln) {
+ int8_t isJson = pTag->flags & TD_TAG_JSON;
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ uint8_t *p = NULL;
+ int16_t offset = 0;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+ } else {
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+ printf("%s:%d >>> STAG === %s:%s, len: %d, nTag: %d, sver:%d\n", tag, ln, isJson ? "json" : "normal",
+ isLarge ? "large" : "small", (int32_t)pTag->len, (int32_t)pTag->nTag, pTag->ver);
+ for (uint16_t n = 0; n < pTag->nTag; ++n) {
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[n];
} else {
- ASSERT(pTagVal->nData == TYPE_BYTES[pTagVal->type]);
- tsize += pTagVal->nData;
+ offset = pTag->idx[n];
+ }
+ STagVal tagVal = {0};
+ if (isJson) {
+ tagVal.pKey = (char *)POINTER_SHIFT(p, offset);
+ } else {
+ tagVal.cid = *(int16_t *)POINTER_SHIFT(p, offset);
+ }
+ printf("%s:%d loop[%d-%d] offset=%d\n", __func__, __LINE__, (int32_t)pTag->nTag, (int32_t)n, (int32_t)offset);
+ tGetTagVal(p + offset, &tagVal, isJson);
+ if (IS_VAR_DATA_TYPE(tagVal.type)) {
+ debugPrintTagVal(tagVal.type, tagVal.pData, tagVal.nData, __func__, __LINE__);
+ } else {
+ debugPrintTagVal(tagVal.type, &tagVal.i64, tDataTypes[tagVal.type].bytes, __func__, __LINE__);
}
}
+ printf("\n");
+}
- (*ppTag) = (STag *)taosMemoryMalloc(tsize);
- if (*ppTag == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+void debugCheckTags(STag *pTag) {
+ switch (pTag->flags) {
+ case 0x0:
+ case 0x20:
+ case 0x40:
+ case 0x60:
+ break;
+ default:
+ ASSERT(0);
}
- p = (uint8_t *)&((*ppTag)->idx[nTag]);
- n = 0;
+ ASSERT(pTag->nTag <= 128 && pTag->nTag >= 0);
+ ASSERT(pTag->ver <= 512 && pTag->ver >= 0); // temp condition for pTag->ver
+}
- (*ppTag)->len = tsize;
- (*ppTag)->nTag = nTag;
+static int32_t tPutTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
+ int32_t n = 0;
+
+ // key
+ if (isJson) {
+ n += tPutCStr(p ? p + n : p, pTagVal->pKey);
+ } else {
+ n += tPutI16v(p ? p + n : p, pTagVal->cid);
+ }
+
+ // type
+ n += tPutI8(p ? p + n : p, pTagVal->type);
+
+ // value
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ n += tPutBinary(p ? p + n : p, pTagVal->pData, pTagVal->nData);
+ } else {
+ p = p ? p + n : p;
+ n += tDataTypes[pTagVal->type].bytes;
+ if (p) memcpy(p, &(pTagVal->i64), tDataTypes[pTagVal->type].bytes);
+ }
+
+ return n;
+}
+static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
+ int32_t n = 0;
+
+ // key
+ if (isJson) {
+ n += tGetCStr(p + n, &pTagVal->pKey);
+ } else {
+ n += tGetI16v(p + n, &pTagVal->cid);
+ }
+
+ // type
+ n += tGetI8(p + n, &pTagVal->type);
+
+ // value
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ n += tGetBinary(p + n, &pTagVal->pData, &pTagVal->nData);
+ } else {
+ memcpy(&(pTagVal->i64), p + n, tDataTypes[pTagVal->type].bytes);
+ n += tDataTypes[pTagVal->type].bytes;
+ }
+
+ return n;
+}
+int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) {
+ int32_t code = 0;
+ uint8_t *p = NULL;
+ int16_t n = 0;
+ int16_t nTag = taosArrayGetSize(pArray);
+ int32_t szTag = 0;
+ int8_t isLarge = 0;
+
+ // sort
+ if (isJson) {
+ qsort(pArray->pData, nTag, sizeof(STagVal), tTagValJsonCmprFn);
+ } else {
+ qsort(pArray->pData, nTag, sizeof(STagVal), tTagValCmprFn);
+ }
+
+ // get size
for (int16_t iTag = 0; iTag < nTag; iTag++) {
- pTagVal = &pTagVals[iTag];
+ szTag += tPutTagVal(NULL, (STagVal *)taosArrayGet(pArray, iTag), isJson);
+ }
+ if (szTag <= INT8_MAX) {
+ szTag = szTag + sizeof(STag) + sizeof(int8_t) * nTag;
+ } else {
+ szTag = szTag + sizeof(STag) + sizeof(int16_t) * nTag;
+ isLarge = 1;
+ }
- (*ppTag)->idx[iTag].cid = pTagVal->cid;
- (*ppTag)->idx[iTag].offset = n;
+ ASSERT(szTag <= INT16_MAX);
- if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- n += tPutBinary(p + n, pTagVal->pData, pTagVal->nData);
+ // build tag
+ (*ppTag) = (STag *)taosMemoryCalloc(szTag, 1);
+ if ((*ppTag) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ (*ppTag)->flags = 0;
+ if (isJson) {
+ (*ppTag)->flags |= TD_TAG_JSON;
+ }
+ if (isLarge) {
+ (*ppTag)->flags |= TD_TAG_LARGE;
+ }
+ (*ppTag)->len = szTag;
+ (*ppTag)->nTag = nTag;
+ (*ppTag)->ver = version;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)(*ppTag)->idx)[nTag];
+ } else {
+ p = (uint8_t *)&(*ppTag)->idx[nTag];
+ }
+ n = 0;
+ for (int16_t iTag = 0; iTag < nTag; iTag++) {
+ if (isLarge) {
+ ((int16_t *)(*ppTag)->idx)[iTag] = n;
} else {
- memcpy(p + n, pTagVal->pData, pTagVal->nData);
- n += pTagVal->nData;
+ (*ppTag)->idx[iTag] = n;
}
+ n += tPutTagVal(p + n, (STagVal *)taosArrayGet(pArray, iTag), isJson);
}
+#ifdef TD_DEBUG_PRINT_TAG
+ debugPrintSTag(*ppTag, __func__, __LINE__);
+#endif
- qsort((*ppTag)->idx, (*ppTag)->nTag, sizeof(STagIdx), tTagIdxCmprFn);
- return 0;
+ debugCheckTags(*ppTag); // TODO: remove this line after debug
+ return code;
+
+_err:
+ return code;
}
void tTagFree(STag *pTag) {
if (pTag) taosMemoryFree(pTag);
}
-void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData) {
- STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn);
- if (pTagIdx == NULL) {
- *ppData = NULL;
- *nData = 0;
+char *tTagValToData(const STagVal *value, bool isJson) {
+ if (!value) return NULL;
+ char *data = NULL;
+ int8_t typeBytes = 0;
+ if (isJson) {
+ typeBytes = CHAR_BYTES;
+ }
+ if (IS_VAR_DATA_TYPE(value->type)) {
+ data = taosMemoryCalloc(1, typeBytes + VARSTR_HEADER_SIZE + value->nData);
+ if (data == NULL) return NULL;
+ if (isJson) *data = value->type;
+ varDataLen(data + typeBytes) = value->nData;
+ memcpy(varDataVal(data + typeBytes), value->pData, value->nData);
+ } else {
+ data = ((char *)&(value->i64)) - typeBytes; // json with type
+ }
+
+ return data;
+}
+
+bool tTagGet(const STag *pTag, STagVal *pTagVal) {
+ int16_t lidx = 0;
+ int16_t ridx = pTag->nTag - 1;
+ int16_t midx;
+ uint8_t *p;
+ int8_t isJson = pTag->flags & TD_TAG_JSON;
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ int16_t offset;
+ STagVal tv;
+ int c;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
} else {
- uint8_t *p = (uint8_t *)&pTag->idx[pTag->nTag] + pTagIdx->offset;
- if (IS_VAR_DATA_TYPE(type)) {
- tGetBinary(p, ppData, nData);
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+
+ pTagVal->type = TSDB_DATA_TYPE_NULL;
+ pTagVal->pData = NULL;
+ pTagVal->nData = 0;
+ while (lidx <= ridx) {
+ midx = (lidx + ridx) / 2;
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[midx];
+ } else {
+ offset = pTag->idx[midx];
+ }
+
+ tGetTagVal(p + offset, &tv, isJson);
+ if (isJson) {
+ c = tTagValJsonCmprFn(pTagVal, &tv);
} else {
- *ppData = p;
- *nData = TYPE_BYTES[type];
+ c = tTagValCmprFn(pTagVal, &tv);
+ }
+
+ if (c < 0) {
+ ridx = midx - 1;
+ } else if (c > 0) {
+ lidx = midx + 1;
+ } else {
+ memcpy(pTagVal, &tv, sizeof(tv));
+ return true;
}
}
+ return false;
}
-int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag) {
- // return tEncodeBinary(pEncoder, (uint8_t *)pTag, pTag->len);
- ASSERT(0);
- return 0;
+int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag) {
+ return tEncodeBinary(pEncoder, (const uint8_t *)pTag, pTag->len);
}
-int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag) {
- // uint32_t n;
- // return tDecodeBinary(pDecoder, (const uint8_t **)ppTag, &n);
- ASSERT(0);
- return 0;
+int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag) {
+ return tDecodeBinary(pDecoder, (uint8_t **)ppTag, NULL);
+}
+
+int32_t tTagToValArray(const STag *pTag, SArray **ppArray) {
+ int32_t code = 0;
+ uint8_t *p = NULL;
+ STagVal tv = {0};
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ int16_t offset = 0;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+ } else {
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+
+ (*ppArray) = taosArrayInit(pTag->nTag + 1, sizeof(STagVal));
+ if (*ppArray == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ for (int16_t iTag = 0; iTag < pTag->nTag; iTag++) {
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[iTag];
+ } else {
+ offset = pTag->idx[iTag];
+ }
+ tGetTagVal(p + offset, &tv, pTag->flags & TD_TAG_JSON);
+ taosArrayPush(*ppArray, &tv);
+ }
+
+ return code;
+
+_err:
+ return code;
}
#if 1 // ===================================================================================================================
@@ -936,162 +1580,4 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
-SKVRow tdKVRowDup(SKVRow row) {
- SKVRow trow = taosMemoryMalloc(kvRowLen(row));
- if (trow == NULL) return NULL;
-
- kvRowCpy(trow, row);
- return trow;
-}
-
-static int compareColIdx(const void *a, const void *b) {
- const SColIdx *x = (const SColIdx *)a;
- const SColIdx *y = (const SColIdx *)b;
- if (x->colId > y->colId) {
- return 1;
- }
- if (x->colId < y->colId) {
- return -1;
- }
- return 0;
-}
-
-void tdSortKVRowByColIdx(SKVRow row) { qsort(kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), compareColIdx); }
-
-int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
- SColIdx *pColIdx = NULL;
- SKVRow row = *orow;
- SKVRow nrow = NULL;
- void *ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
-
- if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row
- int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type];
- int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff;
- int oRowCols = kvRowNCols(row);
-
- ASSERT(diff > 0);
- nrow = taosMemoryMalloc(nRowLen);
- if (nrow == NULL) return -1;
-
- kvRowSetLen(nrow, nRowLen);
- kvRowSetNCols(nrow, oRowCols + 1);
-
- memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols);
- memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row));
-
- pColIdx = kvRowColIdxAt(nrow, oRowCols);
- pColIdx->colId = colId;
- pColIdx->offset = kvRowValLen(row);
-
- memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value
-
- tdSortKVRowByColIdx(nrow);
-
- *orow = nrow;
- taosMemoryFree(row);
- } else {
- ASSERT(((SColIdx *)ptr)->colId == colId);
- if (IS_VAR_DATA_TYPE(type)) {
- void *pOldVal = kvRowColVal(row, (SColIdx *)ptr);
-
- if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
- memcpy(pOldVal, value, varDataTLen(value));
- } else { // need to reallocate the memory
- int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal));
- ASSERT(nlen > 0);
- nrow = taosMemoryMalloc(nlen);
- if (nrow == NULL) return -1;
-
- kvRowSetLen(nrow, nlen);
- kvRowSetNCols(nrow, kvRowNCols(row));
-
- int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset;
- memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize);
- memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value));
- // Copy left value part
- int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal);
- if (lsize > 0) {
- memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)),
- POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize);
- }
-
- for (int i = 0; i < kvRowNCols(nrow); i++) {
- pColIdx = kvRowColIdxAt(nrow, i);
-
- if (pColIdx->offset > ((SColIdx *)ptr)->offset) {
- pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value);
- }
- }
-
- *orow = nrow;
- taosMemoryFree(row);
- }
- } else {
- memcpy(kvRowColVal(row, (SColIdx *)ptr), value, TYPE_BYTES[type]);
- }
- }
-
- return 0;
-}
-
-int tdEncodeKVRow(void **buf, SKVRow row) {
- // May change the encode purpose
- if (buf != NULL) {
- kvRowCpy(*buf, row);
- *buf = POINTER_SHIFT(*buf, kvRowLen(row));
- }
-
- return kvRowLen(row);
-}
-
-void *tdDecodeKVRow(void *buf, SKVRow *row) {
- *row = tdKVRowDup(buf);
- if (*row == NULL) return NULL;
- return POINTER_SHIFT(buf, kvRowLen(*row));
-}
-
-int tdInitKVRowBuilder(SKVRowBuilder *pBuilder) {
- pBuilder->tCols = 128;
- pBuilder->nCols = 0;
- pBuilder->pColIdx = (SColIdx *)taosMemoryMalloc(sizeof(SColIdx) * pBuilder->tCols);
- if (pBuilder->pColIdx == NULL) return -1;
- pBuilder->alloc = 1024;
- pBuilder->size = 0;
- pBuilder->buf = taosMemoryMalloc(pBuilder->alloc);
- if (pBuilder->buf == NULL) {
- taosMemoryFree(pBuilder->pColIdx);
- return -1;
- }
- return 0;
-}
-
-void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder) {
- taosMemoryFreeClear(pBuilder->pColIdx);
- taosMemoryFreeClear(pBuilder->buf);
-}
-
-void tdResetKVRowBuilder(SKVRowBuilder *pBuilder) {
- pBuilder->nCols = 0;
- pBuilder->size = 0;
-}
-
-SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
- int tlen = sizeof(SColIdx) * pBuilder->nCols + pBuilder->size;
- // if (tlen == 0) return NULL; // nCols == 0 means no tags
-
- tlen += TD_KV_ROW_HEAD_SIZE;
-
- SKVRow row = taosMemoryMalloc(tlen);
- if (row == NULL) return NULL;
-
- kvRowSetNCols(row, pBuilder->nCols);
- kvRowSetLen(row, tlen);
-
- if(pBuilder->nCols > 0){
- memcpy(kvRowColIdx(row), pBuilder->pColIdx, sizeof(SColIdx) * pBuilder->nCols);
- memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
- }
-
- return row;
-}
#endif
\ No newline at end of file
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 1b61a0bc606aa9fd479cf996668756d2b88f4702..e77c462e5ae0fe81521f34cbd1475669747e0ee6 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -79,9 +79,10 @@ uint16_t tsTelemPort = 80;
// schemaless
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
-char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value.
- //If set to empty system will generate table name using MD5 hash.
-bool tsSmlDataFormat = true; // true means that the name and order of cols in each line are the same(only for influx protocol)
+char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
+ // If set to empty system will generate table name using MD5 hash.
+bool tsSmlDataFormat =
+ true; // true means that the name and order of cols in each line are the same(only for influx protocol)
// query
int32_t tsQueryPolicy = 1;
@@ -108,8 +109,11 @@ int32_t tsCompressColData = -1;
*/
int32_t tsCompatibleModel = 1;
+// count/hyperloglog function always return values in case of all NULL data or Empty data set.
+int32_t tsCountAlwaysReturnValue = 1;
+
// 10 ms for sliding time, the value will changed in case of time precision changed
-int32_t tsMinSlidingTime = 10;
+int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
@@ -129,7 +133,6 @@ int32_t tsRetryStreamCompDelay = 10 * 1000;
// The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f;
-int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once
int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
// the maximum allowed query buffer size during query processing for each data node.
@@ -292,6 +295,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1;
return 0;
}
@@ -307,6 +311,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1;
return 0;
}
@@ -371,6 +376,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxStreamCompDelay", tsMaxStreamComputDelay, 10, 1000000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxFirstStreamCompDelay", tsStreamCompStartDelay, 1000, 1000000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "retryStreamCompDelay", tsRetryStreamCompDelay, 10, 1000000000, 0) != 0) return -1;
@@ -479,6 +485,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) {
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32;
+ idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
}
static void taosSetServerLogCfg(SConfig *pCfg) {
@@ -493,6 +500,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32;
fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32;
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
+ idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
}
static int32_t taosSetClientCfg(SConfig *pCfg) {
@@ -562,6 +570,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMinSlidingTime = cfgGetItem(pCfg, "minSlidingTime")->i32;
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
+ tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 1108ea1e9fea8cba6ca593f1ca7695a28f6ba003..9c6c532bcd071bf8799e28076a9ea147d5b81443 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -28,7 +28,7 @@
#undef TD_MSG_SEG_CODE_
#include "tmsgdef.h"
-int32_t tInitSubmitMsgIter(SSubmitReq *pMsg, SSubmitMsgIter *pIter) {
+int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1;
@@ -147,12 +147,24 @@ int32_t tEncodeSQueryNodeAddr(SEncoder *pEncoder, SQueryNodeAddr *pAddr) {
return 0;
}
+int32_t tEncodeSQueryNodeLoad(SEncoder *pEncoder, SQueryNodeLoad *pLoad) {
+ if (tEncodeSQueryNodeAddr(pEncoder, &pLoad->addr) < 0) return -1;
+ if (tEncodeU64(pEncoder, pLoad->load) < 0) return -1;
+ return 0;
+}
+
int32_t tDecodeSQueryNodeAddr(SDecoder *pDecoder, SQueryNodeAddr *pAddr) {
if (tDecodeI32(pDecoder, &pAddr->nodeId) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &pAddr->epSet) < 0) return -1;
return 0;
}
+int32_t tDecodeSQueryNodeLoad(SDecoder *pDecoder, SQueryNodeLoad *pLoad) {
+ if (tDecodeSQueryNodeAddr(pDecoder, &pLoad->addr) < 0) return -1;
+ if (tDecodeU64(pDecoder, &pLoad->load) < 0) return -1;
+ return 0;
+}
+
int32_t taosEncodeSEpSet(void **buf, const SEpSet *pEp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI8(buf, pEp->inUse);
@@ -304,6 +316,12 @@ static int32_t tSerializeSClientHbRsp(SEncoder *pEncoder, const SClientHbRsp *pR
if (tEncodeI32(pEncoder, pRsp->query->onlineDnodes) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->query->killConnection) < 0) return -1;
if (tEncodeSEpSet(pEncoder, &pRsp->query->epSet) < 0) return -1;
+ int32_t num = taosArrayGetSize(pRsp->query->pQnodeList);
+ if (tEncodeI32(pEncoder, num) < 0) return -1;
+ for (int32_t i = 0; i < num; ++i) {
+ SQueryNodeLoad *pLoad = taosArrayGet(pRsp->query->pQnodeList, i);
+ if (tEncodeSQueryNodeLoad(pEncoder, pLoad) < 0) return -1;
+ }
} else {
if (tEncodeI32(pEncoder, queryNum) < 0) return -1;
}
@@ -333,6 +351,15 @@ static int32_t tDeserializeSClientHbRsp(SDecoder *pDecoder, SClientHbRsp *pRsp)
if (tDecodeI32(pDecoder, &pRsp->query->onlineDnodes) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->query->killConnection) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &pRsp->query->epSet) < 0) return -1;
+ int32_t pQnodeNum = 0;
+ if (tDecodeI32(pDecoder, &pQnodeNum) < 0) return -1;
+ if (pQnodeNum > 0) {
+ pRsp->query->pQnodeList = taosArrayInit(pQnodeNum, sizeof(SQueryNodeLoad));
+ if (NULL == pRsp->query->pQnodeList) return -1;
+ SQueryNodeLoad load = {0};
+ if (tDecodeSQueryNodeLoad(pDecoder, &load) < 0) return -1;
+ taosArrayPush(pRsp->query->pQnodeList, &load);
+ }
}
int32_t kvNum = 0;
@@ -600,7 +627,8 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq)
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->alterType) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->verInBlock) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->tagVer) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->colVer) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfFields) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfFields; ++i) {
SField *pField = taosArrayGet(pReq->pFields, i);
@@ -627,7 +655,8 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->alterType) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->verInBlock) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->tagVer) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->colVer) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfFields) < 0) return -1;
pReq->pFields = taosArrayInit(pReq->numOfFields, sizeof(SField));
if (pReq->pFields == NULL) {
@@ -663,22 +692,25 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) {
taosArrayDestroy(pReq->pFields);
pReq->pFields = NULL;
}
-int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
+
+
+int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
- if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1;
+ if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
-int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
+
+int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
- if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1;
+ if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -891,6 +923,21 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tEncodeI64(&encoder, pload->pointsWritten) < 0) return -1;
}
+ // mnode loads
+ if (tEncodeI32(&encoder, pReq->mload.syncState) < 0) return -1;
+
+ if (tEncodeI32(&encoder, pReq->qload.dnodeId) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfProcessedQuery) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfProcessedCQuery) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfProcessedFetch) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDrop) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfProcessedHb) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.cacheDataSize) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfQueryInQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.numOfFetchInQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.timeInQueryQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->qload.timeInFetchQueue) < 0) return -1;
+
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -946,6 +993,20 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
}
}
+ if (tDecodeI32(&decoder, &pReq->mload.syncState) < 0) return -1;
+
+ if (tDecodeI32(&decoder, &pReq->qload.dnodeId) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedQuery) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedCQuery) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedFetch) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDrop) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedHb) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.cacheDataSize) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfQueryInQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.numOfFetchInQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.timeInQueryQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->qload.timeInFetchQueue) < 0) return -1;
+
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@@ -1675,6 +1736,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1;
+ if (tEncodeI8(&encoder, pReq->schemaless) < 0) return -1;
if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfRetensions; ++i) {
@@ -1715,6 +1777,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1;
+ if (tDecodeI8(&decoder, &pReq->schemaless) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1;
pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention));
@@ -1910,11 +1973,11 @@ int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp)
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
- int32_t num = taosArrayGetSize(pRsp->addrsList);
+ int32_t num = taosArrayGetSize(pRsp->qnodeList);
if (tEncodeI32(&encoder, num) < 0) return -1;
for (int32_t i = 0; i < num; ++i) {
- SQueryNodeAddr *addr = taosArrayGet(pRsp->addrsList, i);
- if (tEncodeSQueryNodeAddr(&encoder, addr) < 0) return -1;
+ SQueryNodeLoad *pLoad = taosArrayGet(pRsp->qnodeList, i);
+ if (tEncodeSQueryNodeLoad(&encoder, pLoad) < 0) return -1;
}
tEndEncode(&encoder);
@@ -1930,15 +1993,15 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp
if (tStartDecode(&decoder) < 0) return -1;
int32_t num = 0;
if (tDecodeI32(&decoder, &num) < 0) return -1;
- if (NULL == pRsp->addrsList) {
- pRsp->addrsList = taosArrayInit(num, sizeof(SQueryNodeAddr));
- if (NULL == pRsp->addrsList) return -1;
+ if (NULL == pRsp->qnodeList) {
+ pRsp->qnodeList = taosArrayInit(num, sizeof(SQueryNodeLoad));
+ if (NULL == pRsp->qnodeList) return -1;
}
for (int32_t i = 0; i < num; ++i) {
- SQueryNodeAddr addr = {0};
- if (tDecodeSQueryNodeAddr(&decoder, &addr) < 0) return -1;
- taosArrayPush(pRsp->addrsList, &addr);
+ SQueryNodeLoad load = {0};
+ if (tDecodeSQueryNodeLoad(&decoder, &load) < 0) return -1;
+ taosArrayPush(pRsp->qnodeList, &load);
}
tEndDecode(&decoder);
@@ -1946,7 +2009,7 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp
return 0;
}
-void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->addrsList); }
+void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->qnodeList); }
int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) {
SEncoder encoder = {0};
@@ -2215,6 +2278,7 @@ int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) {
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
}
+ if (tEncodeI8(&encoder, pRsp->schemaless) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2263,6 +2327,7 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) {
return -1;
}
}
+ if (tDecodeI8(&decoder, &pRsp->schemaless) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2657,25 +2722,23 @@ int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *
}
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
- int32_t sqlLen = 0;
- int32_t astLen = 0;
- if (pReq->sql != NULL) sqlLen = (int32_t)strlen(pReq->sql);
- if (pReq->ast != NULL) astLen = (int32_t)strlen(pReq->ast);
-
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->withTbName) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->withSchema) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->withTag) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->subscribeDbName) < 0) return -1;
- if (tEncodeI32(&encoder, sqlLen) < 0) return -1;
- if (tEncodeI32(&encoder, astLen) < 0) return -1;
- if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
- if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
+ if (tEncodeI8(&encoder, pReq->subType) < 0) return -1;
+ if (tEncodeCStr(&encoder, pReq->subDbName) < 0) return -1;
+ if (TOPIC_SUB_TYPE__DB == pReq->subType) {
+ } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
+ if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1;
+ } else {
+ if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1;
+ if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
+ }
+ if (tEncodeI32(&encoder, strlen(pReq->sql)) < 0) return -1;
+ if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
tEndEncode(&encoder);
@@ -2694,26 +2757,26 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
- if (tDecodeI8(&decoder, &pReq->withTbName) < 0) return -1;
- if (tDecodeI8(&decoder, &pReq->withSchema) < 0) return -1;
- if (tDecodeI8(&decoder, &pReq->withTag) < 0) return -1;
- if (tDecodeCStrTo(&decoder, pReq->subscribeDbName) < 0) return -1;
+ if (tDecodeI8(&decoder, &pReq->subType) < 0) return -1;
+ if (tDecodeCStrTo(&decoder, pReq->subDbName) < 0) return -1;
+ if (TOPIC_SUB_TYPE__DB == pReq->subType) {
+ } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
+ if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1;
+ } else {
+ if (tDecodeI32(&decoder, &astLen) < 0) return -1;
+ if (astLen > 0) {
+ pReq->ast = taosMemoryCalloc(1, astLen + 1);
+ if (pReq->ast == NULL) return -1;
+ if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
+ }
+ }
if (tDecodeI32(&decoder, &sqlLen) < 0) return -1;
- if (tDecodeI32(&decoder, &astLen) < 0) return -1;
-
if (sqlLen > 0) {
pReq->sql = taosMemoryCalloc(1, sqlLen + 1);
if (pReq->sql == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
}
- if (astLen > 0) {
- pReq->ast = taosMemoryCalloc(1, astLen + 1);
- if (pReq->ast == NULL) return -1;
- if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
- } else {
- }
-
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2722,7 +2785,9 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) {
taosMemoryFreeClear(pReq->sql);
- taosMemoryFreeClear(pReq->ast);
+ if (TOPIC_SUB_TYPE__COLUMN == pReq->subType) {
+ taosMemoryFreeClear(pReq->ast);
+ }
}
int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) {
@@ -2871,7 +2936,6 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->vgId) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
if (tEncodeI64(&encoder, pReq->dbUid) < 0) return -1;
if (tEncodeI32(&encoder, pReq->vgVersion) < 0) return -1;
@@ -2894,6 +2958,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
if (tEncodeI8(&encoder, pReq->compression) < 0) return -1;
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1;
+ if (tEncodeI8(&encoder, pReq->standby) < 0) return -1;
if (tEncodeI8(&encoder, pReq->replica) < 0) return -1;
if (tEncodeI8(&encoder, pReq->selfIndex) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
@@ -2908,6 +2973,13 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
}
+
+ if (tEncodeI8(&encoder, pReq->isTsma) < 0) return -1;
+ if (pReq->isTsma) {
+ uint32_t tsmaLen = (uint32_t)(htonl(((SMsgHead *)pReq->pTsma)->contLen));
+ if (tEncodeBinary(&encoder, (const uint8_t *)pReq->pTsma, tsmaLen) < 0) return -1;
+ }
+
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2921,7 +2993,6 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->dbUid) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->vgVersion) < 0) return -1;
@@ -2944,6 +3015,7 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
if (tDecodeI8(&decoder, &pReq->compression) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1;
+ if (tDecodeI8(&decoder, &pReq->standby) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->selfIndex) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
@@ -2970,6 +3042,11 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
}
}
+ if (tDecodeI8(&decoder, &pReq->isTsma) < 0) return -1;
+ if (pReq->isTsma) {
+ if (tDecodeBinaryAlloc(&decoder, &pReq->pTsma, NULL) < 0) return -1;
+ }
+
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@@ -2978,6 +3055,9 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
int32_t tFreeSCreateVnodeReq(SCreateVnodeReq *pReq) {
taosArrayDestroy(pReq->pRetensions);
pReq->pRetensions = NULL;
+ if (pReq->isTsma) {
+ taosMemoryFreeClear(pReq->pTsma);
+ }
return 0;
}
@@ -3056,8 +3136,8 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq
if (tEncodeI8(&encoder, pReq->walLevel) < 0) return -1;
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->replica) < 0) return -1;
if (tEncodeI8(&encoder, pReq->selfIndex) < 0) return -1;
+ if (tEncodeI8(&encoder, pReq->replica) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
SReplica *pReplica = &pReq->replicas[i];
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
@@ -3087,8 +3167,8 @@ int32_t tDeserializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pR
if (tDecodeI8(&decoder, &pReq->walLevel) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1;
- if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->selfIndex) < 0) return -1;
+ if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
SReplica *pReplica = &pReq->replicas[i];
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
@@ -3181,7 +3261,6 @@ int32_t tSerializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq *
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeI8(&encoder, pReq->replica) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
SReplica *pReplica = &pReq->replicas[i];
@@ -3199,7 +3278,6 @@ int32_t tDeserializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1;
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
SReplica *pReplica = &pReq->replicas[i];
@@ -3318,9 +3396,11 @@ int32_t tSerializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
if (tEncodeI32(&encoder, pRsp->numOfPlans) < 0) return -1;
for (int32_t i = 0; i < pRsp->numOfPlans; ++i) {
SExplainExecInfo *info = &pRsp->subplanInfo[i];
- if (tEncodeU64(&encoder, info->startupCost) < 0) return -1;
- if (tEncodeU64(&encoder, info->totalCost) < 0) return -1;
+ if (tEncodeDouble(&encoder, info->startupCost) < 0) return -1;
+ if (tEncodeDouble(&encoder, info->totalCost) < 0) return -1;
if (tEncodeU64(&encoder, info->numOfRows) < 0) return -1;
+ if (tEncodeU32(&encoder, info->verboseLen) < 0) return -1;
+ if (tEncodeBinary(&encoder, info->verboseInfo, info->verboseLen) < 0) return -1;
}
tEndEncode(&encoder);
@@ -3341,9 +3421,12 @@ int32_t tDeserializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
if (pRsp->subplanInfo == NULL) return -1;
}
for (int32_t i = 0; i < pRsp->numOfPlans; ++i) {
- if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].startupCost) < 0) return -1;
- if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].totalCost) < 0) return -1;
+ if (tDecodeDouble(&decoder, &pRsp->subplanInfo[i].startupCost) < 0) return -1;
+ if (tDecodeDouble(&decoder, &pRsp->subplanInfo[i].totalCost) < 0) return -1;
if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].numOfRows) < 0) return -1;
+ if (tDecodeU32(&decoder, &pRsp->subplanInfo[i].verboseLen) < 0) return -1;
+ if (tDecodeBinary(&decoder, (uint8_t **)&pRsp->subplanInfo[i].verboseInfo, &pRsp->subplanInfo[i].verboseLen) < 0)
+ return -1;
}
tEndDecode(&decoder);
@@ -3491,31 +3574,6 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); }
-int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, bufLen);
-
- if (tStartEncode(&encoder) < 0) return -1;
- if (tEncodeI32(&encoder, pRsp->code) < 0) return -1;
- tEndEncode(&encoder);
-
- int32_t tlen = encoder.pos;
- tEncoderClear(&encoder);
- return tlen;
-}
-
-int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
- SDecoder decoder = {0};
- tDecoderInit(&decoder, buf, bufLen);
-
- if (tStartDecode(&decoder) < 0) return -1;
- if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
- tEndDecode(&decoder);
-
- tDecoderClear(&decoder);
- return 0;
-}
-
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
// SEncoder encoder = {0};
// tEncoderInit(&encoder, buf, bufLen);
@@ -3598,6 +3656,7 @@ int32_t tEncodeTSma(SEncoder *pCoder, const STSma *pSma) {
if (tEncodeI8(pCoder, pSma->intervalUnit) < 0) return -1;
if (tEncodeI8(pCoder, pSma->slidingUnit) < 0) return -1;
if (tEncodeI8(pCoder, pSma->timezoneInt) < 0) return -1;
+ if (tEncodeI32(pCoder, pSma->dstVgId) < 0) return -1;
if (tEncodeCStr(pCoder, pSma->indexName) < 0) return -1;
if (tEncodeI32(pCoder, pSma->exprLen) < 0) return -1;
if (tEncodeI32(pCoder, pSma->tagsFilterLen) < 0) return -1;
@@ -3620,6 +3679,7 @@ int32_t tDecodeTSma(SDecoder *pCoder, STSma *pSma) {
if (tDecodeI8(pCoder, &pSma->version) < 0) return -1;
if (tDecodeI8(pCoder, &pSma->intervalUnit) < 0) return -1;
if (tDecodeI8(pCoder, &pSma->slidingUnit) < 0) return -1;
+ if (tDecodeI32(pCoder, &pSma->dstVgId) < 0) return -1;
if (tDecodeI8(pCoder, &pSma->timezoneInt) < 0) return -1;
if (tDecodeCStrTo(pCoder, pSma->indexName) < 0) return -1;
if (tDecodeI32(pCoder, &pSma->exprLen) < 0) return -1;
@@ -3692,6 +3752,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
+ if (tEncodeCStr(&encoder, pReq->sourceDB) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->targetStbFullName) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1;
if (tEncodeI32(&encoder, sqlLen) < 0) return -1;
@@ -3717,6 +3778,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
+ if (tDecodeCStrTo(&decoder, pReq->sourceDB) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->targetStbFullName) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
if (tDecodeI32(&decoder, &sqlLen) < 0) return -1;
@@ -3789,7 +3851,7 @@ int tEncodeSVCreateStbReq(SEncoder *pCoder, const SVCreateStbReq *pReq) {
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->rollup) < 0) return -1;
- if (tEncodeSSchemaWrapper(pCoder, &pReq->schema) < 0) return -1;
+ if (tEncodeSSchemaWrapper(pCoder, &pReq->schemaRow) < 0) return -1;
if (tEncodeSSchemaWrapper(pCoder, &pReq->schemaTag) < 0) return -1;
if (pReq->rollup) {
if (tEncodeSRSmaParam(pCoder, &pReq->pRSmaParam) < 0) return -1;
@@ -3805,7 +3867,7 @@ int tDecodeSVCreateStbReq(SDecoder *pCoder, SVCreateStbReq *pReq) {
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->rollup) < 0) return -1;
- if (tDecodeSSchemaWrapper(pCoder, &pReq->schema) < 0) return -1;
+ if (tDecodeSSchemaWrapper(pCoder, &pReq->schemaRow) < 0) return -1;
if (tDecodeSSchemaWrapper(pCoder, &pReq->schemaTag) < 0) return -1;
if (pReq->rollup) {
if (tDecodeSRSmaParam(pCoder, &pReq->pRSmaParam) < 0) return -1;
@@ -3843,18 +3905,17 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
+ if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeI64(pCoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1;
-
- if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1;
if (tEncodeI8(pCoder, pReq->type) < 0) return -1;
if (pReq->type == TSDB_CHILD_TABLE) {
if (tEncodeI64(pCoder, pReq->ctb.suid) < 0) return -1;
- if (tEncodeBinary(pCoder, pReq->ctb.pTag, kvRowLen(pReq->ctb.pTag)) < 0) return -1;
+ if (tEncodeTag(pCoder, (const STag *)pReq->ctb.pTag) < 0) return -1;
} else if (pReq->type == TSDB_NORMAL_TABLE) {
- if (tEncodeSSchemaWrapper(pCoder, &pReq->ntb.schema) < 0) return -1;
+ if (tEncodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1;
} else {
ASSERT(0);
}
@@ -3864,23 +3925,20 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
}
int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
- uint32_t len;
-
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
+ if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1;
-
- if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->type) < 0) return -1;
if (pReq->type == TSDB_CHILD_TABLE) {
if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1;
- if (tDecodeBinary(pCoder, &pReq->ctb.pTag, &len) < 0) return -1;
+ if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1;
} else if (pReq->type == TSDB_NORMAL_TABLE) {
- if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schema) < 0) return -1;
+ if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1;
} else {
ASSERT(0);
}
@@ -4292,13 +4350,96 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->code) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->pMeta ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta) {
+ if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1;
+ }
tEndEncode(pEncoder);
return 0;
}
int32_t tDecodeSVAlterTbRsp(SDecoder *pDecoder, SVAlterTbRsp *pRsp) {
+ int32_t meta = 0;
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->code) < 0) return -1;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int32_t tDeserializeSVAlterTbRsp(void *buf, int32_t bufLen, SVAlterTbRsp *pRsp) {
+ int32_t meta = 0;
+ SDecoder decoder = {0};
+ tDecoderInit(&decoder, buf, bufLen);
+
+ if (tStartDecode(&decoder) < 0) return -1;
+ if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
+ if (tDecodeI32(&decoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(&decoder);
+ tDecoderClear(&decoder);
+ return 0;
+}
+
+int32_t tEncodeSMAlterStbRsp(SEncoder *pEncoder, const SMAlterStbRsp *pRsp) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta->pSchemas) {
+ if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeSMAlterStbRsp(SDecoder *pDecoder, SMAlterStbRsp *pRsp) {
+ int32_t meta = 0;
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1;
+ }
tEndDecode(pDecoder);
return 0;
}
+
+int32_t tDeserializeSMAlterStbRsp(void *buf, int32_t bufLen, SMAlterStbRsp *pRsp) {
+ int32_t meta = 0;
+ SDecoder decoder = {0};
+ tDecoderInit(&decoder, buf, bufLen);
+
+ if (tStartDecode(&decoder) < 0) return -1;
+ if (tDecodeI32(&decoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(&decoder);
+ tDecoderClear(&decoder);
+ return 0;
+}
+
+void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp) {
+ if (NULL == pRsp) {
+ return;
+ }
+
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
+
+
diff --git a/source/common/src/tname.c b/source/common/src/tname.c
index 104dee261c9f64c7c8859228dcb0595f4b4df2c0..fd055135799a5e508ec535b43d46e9246c8d644e 100644
--- a/source/common/src/tname.c
+++ b/source/common/src/tname.c
@@ -127,7 +127,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) {
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
- assert(name->type == TSDB_TABLE_NAME_T);
+ /*assert(name->type == TSDB_TABLE_NAME_T);*/
dst[len] = TS_PATH_DELIMITER[0];
memcpy(dst + len + 1, name->tname, tnameLen);
@@ -314,9 +314,9 @@ void buildChildTableName(RandTableName* rName) {
for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) {
SSmlKv* tagKv = taosArrayGetP(rName->tags, j);
taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen);
- if(IS_VAR_DATA_TYPE(tagKv->type)){
+ if (IS_VAR_DATA_TYPE(tagKv->type)) {
taosStringBuilderAppendStringLen(&sb, tagKv->value, tagKv->length);
- }else{
+ } else {
taosStringBuilderAppendStringLen(&sb, (char*)(&(tagKv->value)), tagKv->length);
}
}
diff --git a/source/common/src/trow.c b/source/common/src/trow.c
index 4d0846f6c2957a6d2a1b74dabf60ee76af57287c..c8a28d7f28f747b65fae3802bc392ac6163e5e1e 100644
--- a/source/common/src/trow.c
+++ b/source/common/src/trow.c
@@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols
* @param pCols
*/
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
+#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
+ printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__,
+ TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows);
+#endif
if (TD_IS_TP_ROW(pRow)) {
return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge);
} else if (TD_IS_KV_ROW(pRow)) {
@@ -1191,9 +1195,9 @@ bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset,
}
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) {
- if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) {
+ if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) {
return 1;
- } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) {
+ } else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) {
return -1;
} else {
return 0;
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index 69ba964187fe44f33b4df1ab8b5c7706a8569eec..10ba58af298c59306badc2e299e588e3ec46874f 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -184,6 +184,16 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) {
i++;
+ int32_t j = i;
+ while (str[j]) {
+ if ((str[j] >= '0' && str[j] <= '9') || str[j] == ':') {
+ ++j;
+ continue;
+ }
+
+ return -1;
+ }
+
char* sep = strchr(&str[i], ':');
if (sep != NULL) {
int32_t len = (int32_t)(sep - &str[i]);
@@ -511,21 +521,21 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) {
newColData = taosMemoryCalloc(1, charLen + 1);
memcpy(newColData, varDataVal(inputData), charLen);
- bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
+ int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
if (ret != TSDB_CODE_SUCCESS) {
taosMemoryFree(newColData);
- return ret;
+ return TSDB_CODE_INVALID_TIMESTAMP;
}
taosMemoryFree(newColData);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
- newColData = taosMemoryCalloc(1, charLen / TSDB_NCHAR_SIZE + 1);
+ newColData = taosMemoryCalloc(1, charLen + TSDB_NCHAR_SIZE);
int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(inputData), charLen, newColData);
if (len < 0){
taosMemoryFree(newColData);
return TSDB_CODE_FAILED;
}
newColData[len] = 0;
- bool ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight);
+ int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight);
if (ret != TSDB_CODE_SUCCESS) {
taosMemoryFree(newColData);
return ret;
@@ -773,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
- static char buf[96];
+ static char buf[96] = {0};
size_t pos = 0;
struct tm tm;
diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c
index 1a04e83f8127c0c71570c2cef4f300f05da11e67..2b0f6a01a0b87cdee8d071d1e53bad398ea90f97 100644
--- a/source/dnode/mgmt/exe/dmMain.c
+++ b/source/dnode/mgmt/exe/dmMain.c
@@ -216,7 +216,7 @@ int main(int argc, char const *argv[]) {
return -1;
}
- dInfo("start to run dnode");
+ dInfo("start to open dnode");
dmSetSignalHandle();
int32_t code = dmRun();
dInfo("shutting down the service");
diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
index ae8879326d6da92b6bd5ab3ea89584b347817fd4..ee811c0071cbd07c03edb7aaf117c3c4461adebb 100644
--- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
+++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
@@ -35,6 +35,7 @@ typedef struct SDnodeMgmt {
SendMonitorReportFp sendMonitorReportFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetMnodeLoadsFp getMnodeLoadsFp;
+ GetQnodeLoadsFp getQnodeLoadsFp;
} SDnodeMgmt;
// dmHandle.c
@@ -58,4 +59,4 @@ void dmStopWorker(SDnodeMgmt *pMgmt);
}
#endif
-#endif /*_TD_DND_QNODE_INT_H_*/
\ No newline at end of file
+#endif /*_TD_DND_QNODE_INT_H_*/
diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
index f7337f482f23945b99893dee242d9af9a10631a6..fbd46db183d3024e40bb472decf80bf4c3936443 100644
--- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
+++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
@@ -75,8 +75,11 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
(*pMgmt->getVnodeLoadsFp)(&vinfo);
req.pVloads = vinfo.pVloads;
- SMonMloadInfo minfo = {0};
+ SMonMloadInfo minfo = {0};
(*pMgmt->getMnodeLoadsFp)(&minfo);
+ req.mload = minfo.load;
+
+ (*pMgmt->getQnodeLoadsFp)(&req.qload);
int32_t contLen = tSerializeSStatusReq(NULL, 0, &req);
void *pHead = rpcMallocCont(contLen);
@@ -91,6 +94,13 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
SEpSet epSet = {0};
dmGetMnodeEpSet(pMgmt->pData, &epSet);
rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp);
+ if (rpcRsp.code != 0) {
+ dError("failed to send status msg since %s, numOfEps:%d inUse:%d", tstrerror(rpcRsp.code), epSet.numOfEps,
+ epSet.inUse);
+ for (int32_t i = 0; i < epSet.numOfEps; ++i) {
+ dDebug("index:%d, mnode ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
+ }
+ }
dmProcessStatusRsp(pMgmt, &rpcRsp);
}
diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c
index 59c926545e6f565a124a4846532e4f74efeecd5e..d2db1a4a62fd157b2df235133c85bb6e38ac680d 100644
--- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c
+++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c
@@ -48,6 +48,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp;
+ pMgmt->getQnodeLoadsFp = pInput->getQnodeLoadsFp;
if (dmStartWorker(pMgmt) != 0) {
return -1;
diff --git a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h
index 030d4b309e3e0a4a70e706cd5606d495323d819d..bd034fe7d6c21dcf31e0ca4e9e83d7a23fa28fb8 100644
--- a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h
+++ b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h
@@ -36,7 +36,6 @@ typedef struct SMnodeMgmt {
SSingleWorker monitorWorker;
SReplica replicas[TSDB_MAX_REPLICA];
int8_t replica;
- int8_t selfIndex;
bool stopped;
int32_t refCount;
TdThreadRwlock lock;
@@ -47,7 +46,6 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed);
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed);
// mmInt.c
-int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg);
int32_t mmAcquire(SMnodeMgmt *pMgmt);
void mmRelease(SMnodeMgmt *pMgmt);
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c
index 2aa108777078de3e9b2b8a2323c0d28572a15db2..478d6abd52cdba9c0a2f99acd3001e281ade6b8d 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c
@@ -53,43 +53,45 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
*pDeployed = deployed->valueint;
cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes");
- if (!mnodes || mnodes->type != cJSON_Array) {
- dError("failed to read %s since nodes not found", file);
- goto _OVER;
- }
-
- pMgmt->replica = cJSON_GetArraySize(mnodes);
- if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) {
- dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica);
- goto _OVER;
- }
-
- for (int32_t i = 0; i < pMgmt->replica; ++i) {
- cJSON *node = cJSON_GetArrayItem(mnodes, i);
- if (node == NULL) break;
-
- SReplica *pReplica = &pMgmt->replicas[i];
-
- cJSON *id = cJSON_GetObjectItem(node, "id");
- if (!id || id->type != cJSON_Number) {
- dError("failed to read %s since id not found", file);
+ if (mnodes != NULL) {
+ if (!mnodes || mnodes->type != cJSON_Array) {
+ dError("failed to read %s since nodes not found", file);
goto _OVER;
}
- pReplica->id = id->valueint;
- cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn");
- if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) {
- dError("failed to read %s since fqdn not found", file);
+ pMgmt->replica = cJSON_GetArraySize(mnodes);
+ if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) {
+ dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica);
goto _OVER;
}
- tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN);
- cJSON *port = cJSON_GetObjectItem(node, "port");
- if (!port || port->type != cJSON_Number) {
- dError("failed to read %s since port not found", file);
- goto _OVER;
+ for (int32_t i = 0; i < pMgmt->replica; ++i) {
+ cJSON *node = cJSON_GetArrayItem(mnodes, i);
+ if (node == NULL) break;
+
+ SReplica *pReplica = &pMgmt->replicas[i];
+
+ cJSON *id = cJSON_GetObjectItem(node, "id");
+ if (!id || id->type != cJSON_Number) {
+ dError("failed to read %s since id not found", file);
+ goto _OVER;
+ }
+ pReplica->id = id->valueint;
+
+ cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn");
+ if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) {
+ dError("failed to read %s since fqdn not found", file);
+ goto _OVER;
+ }
+ tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN);
+
+ cJSON *port = cJSON_GetObjectItem(node, "port");
+ if (!port || port->type != cJSON_Number) {
+ dError("failed to read %s since port not found", file);
+ goto _OVER;
+ }
+ pReplica->port = port->valueint;
}
- pReplica->port = port->valueint;
}
code = 0;
@@ -122,21 +124,23 @@ int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed) {
char *content = taosMemoryCalloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
- len += snprintf(content + len, maxLen - len, " \"mnodes\": [{\n");
int8_t replica = (pMsg != NULL ? pMsg->replica : pMgmt->replica);
- for (int32_t i = 0; i < replica; ++i) {
- SReplica *pReplica = &pMgmt->replicas[i];
- if (pMsg != NULL) {
- pReplica = &pMsg->replicas[i];
- }
- len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id);
- len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn);
- len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port);
- if (i < replica - 1) {
- len += snprintf(content + len, maxLen - len, " },{\n");
- } else {
- len += snprintf(content + len, maxLen - len, " }],\n");
+ if (replica > 0) {
+ len += snprintf(content + len, maxLen - len, " \"mnodes\": [{\n");
+ for (int32_t i = 0; i < replica; ++i) {
+ SReplica *pReplica = &pMgmt->replicas[i];
+ if (pMsg != NULL) {
+ pReplica = &pMsg->replicas[i];
+ }
+ len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id);
+ len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn);
+ len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port);
+ if (i < replica - 1) {
+ len += snprintf(content + len, maxLen - len, " },{\n");
+ } else {
+ len += snprintf(content + len, maxLen - len, " }],\n");
+ }
}
}
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index a894a4962dddc632d583d1e4d5bc5a82fbf07f52..5c5316e3a3ba5f51a59e51b10bdc7663970dd71d 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -79,7 +79,7 @@ int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
return -1;
}
- if (createReq.replica <= 1 || (createReq.dnodeId != pInput->pData->dnodeId && pInput->pData->dnodeId != 0)) {
+ if (createReq.replica != 1) {
terrno = TSDB_CODE_INVALID_OPTION;
dError("failed to create mnode since %s", terrstr());
return -1;
@@ -124,22 +124,6 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
return 0;
}
-int32_t mmProcessAlterReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
- SDAlterMnodeReq alterReq = {0};
- if (tDeserializeSDCreateMnodeReq(pMsg->pCont, pMsg->contLen, &alterReq) != 0) {
- terrno = TSDB_CODE_INVALID_MSG;
- return -1;
- }
-
- if (pMgmt->pData->dnodeId != 0 && alterReq.dnodeId != pMgmt->pData->dnodeId) {
- terrno = TSDB_CODE_INVALID_OPTION;
- dError("failed to alter mnode since %s, input:%d cur:%d", terrstr(), alterReq.dnodeId, pMgmt->pData->dnodeId);
- return -1;
- } else {
- return mmAlter(pMgmt, &alterReq);
- }
-}
-
SArray *mmGetMsgHandles() {
int32_t code = -1;
SArray *pArray = taosArrayInit(64, sizeof(SMgmtHandle));
@@ -213,6 +197,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutNodeMsgToReadQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_STREAM, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
@@ -233,9 +219,9 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutNodeMsgToQueryQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutNodeMsgToQueryQueue, 1) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_TIMEOUT, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c
index 43113d05af5291295f9f27e7bc767a0617117ba9..1b973f3045d5dd4e2f6e5fcc4e25413068af6af5 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c
@@ -39,71 +39,38 @@ static int32_t mmRequire(const SMgmtInputOpt *pInput, bool *required) {
}
static void mmBuildOptionForDeploy(SMnodeMgmt *pMgmt, const SMgmtInputOpt *pInput, SMnodeOpt *pOption) {
+ pOption->standby = false;
+ pOption->deploy = true;
pOption->msgCb = pMgmt->msgCb;
+ pOption->dnodeId = pMgmt->pData->dnodeId;
+
pOption->replica = 1;
pOption->selfIndex = 0;
+
SReplica *pReplica = &pOption->replicas[0];
pReplica->id = 1;
pReplica->port = tsServerPort;
tstrncpy(pReplica->fqdn, tsLocalFqdn, TSDB_FQDN_LEN);
- pOption->deploy = true;
-
- pMgmt->selfIndex = pOption->selfIndex;
- pMgmt->replica = pOption->replica;
- memcpy(&pMgmt->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
}
static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) {
- pOption->msgCb = pMgmt->msgCb;
- pOption->selfIndex = pMgmt->selfIndex;
- pOption->replica = pMgmt->replica;
- memcpy(&pOption->replicas, pMgmt->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
pOption->deploy = false;
-}
-
-static int32_t mmBuildOptionFromReq(SMnodeMgmt *pMgmt, SMnodeOpt *pOption, SDCreateMnodeReq *pCreate) {
+ pOption->standby = false;
pOption->msgCb = pMgmt->msgCb;
- pOption->replica = pCreate->replica;
- pOption->selfIndex = -1;
- for (int32_t i = 0; i < pCreate->replica; ++i) {
- SReplica *pReplica = &pOption->replicas[i];
- pReplica->id = pCreate->replicas[i].id;
- pReplica->port = pCreate->replicas[i].port;
- memcpy(pReplica->fqdn, pCreate->replicas[i].fqdn, TSDB_FQDN_LEN);
- if (pReplica->id == pMgmt->pData->dnodeId) {
- pOption->selfIndex = i;
+ pOption->dnodeId = pMgmt->pData->dnodeId;
+
+ if (pMgmt->replica > 0) {
+ pOption->standby = true;
+ pOption->replica = 1;
+ pOption->selfIndex = 0;
+ SReplica *pReplica = &pOption->replicas[0];
+ for (int32_t i = 0; i < pMgmt->replica; ++i) {
+ if (pMgmt->replicas[i].id != pMgmt->pData->dnodeId) continue;
+ pReplica->id = pMgmt->replicas[i].id;
+ pReplica->port = pMgmt->replicas[i].port;
+ memcpy(pReplica->fqdn, pMgmt->replicas[i].fqdn, TSDB_FQDN_LEN);
}
}
-
- if (pOption->selfIndex == -1) {
- dError("failed to build mnode options since %s", terrstr());
- return -1;
- }
- pOption->deploy = true;
-
- pMgmt->selfIndex = pOption->selfIndex;
- pMgmt->replica = pOption->replica;
- memcpy(&pMgmt->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
- return 0;
-}
-
-int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg) {
- SMnodeOpt option = {0};
- if (mmBuildOptionFromReq(pMgmt, &option, pMsg) != 0) {
- return -1;
- }
-
- if (mndAlter(pMgmt->pMnode, &option) != 0) {
- return -1;
- }
-
- bool deployed = true;
- if (mmWriteFile(pMgmt, pMsg, deployed) != 0) {
- dError("failed to write mnode file since %s", terrstr());
- return -1;
- }
-
- return 0;
}
static void mmClose(SMnodeMgmt *pMgmt) {
@@ -177,7 +144,8 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
}
tmsgReportStartup("mnode-worker", "initialized");
- if (!deployed) {
+ if (!deployed || pMgmt->replica > 0) {
+ pMgmt->replica = 0;
deployed = true;
if (mmWriteFile(pMgmt, NULL, deployed) != 0) {
dError("failed to write mnode file since %s", terrstr());
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
index 45dec1153f16ae96b5012f68a743024bdaf1239d..1de9875d063933fe1f35bb5b5770c1aabc6b8fc3 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
@@ -32,9 +32,6 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
dTrace("msg:%p, get from mnode queue", pMsg);
switch (pMsg->msgType) {
- case TDMT_DND_ALTER_MNODE:
- code = mmProcessAlterReq(pMgmt, pMsg);
- break;
case TDMT_MON_MM_INFO:
code = mmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
@@ -43,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
break;
default:
pMsg->info.node = pMgmt->pMnode;
- code = mndProcessMsg(pMsg);
+ code = mndProcessRpcMsg(pMsg);
}
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {
@@ -58,8 +55,19 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
static void mmProcessSyncQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
SMnodeMgmt *pMgmt = pInfo->ahandle;
+ dTrace("msg:%p, get from mnode-sync queue", pMsg);
+
pMsg->info.node = pMgmt->pMnode;
- mndProcessSyncMsg(pMsg);
+
+ SMsgHead *pHead = pMsg->pCont;
+ pHead->contLen = ntohl(pHead->contLen);
+ pHead->vgId = ntohl(pHead->vgId);
+
+ int32_t code = mndProcessSyncMsg(pMsg);
+
+ dTrace("msg:%p, is freed, code:0x%x", pMsg, code);
+ rpcFreeCont(pMsg->pCont);
+ taosFreeQitem(pMsg);
}
static int32_t mmPutNodeMsgToWorker(SSingleWorker *pWorker, SRpcMsg *pMsg) {
@@ -111,9 +119,16 @@ int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
- if (mmAcquire(pMgmt) != 0) return -1;
- int32_t code = mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg);
- mmRelease(pMgmt);
+ int32_t code = -1;
+ if (mmAcquire(pMgmt) == 0) {
+ code = mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg);
+ mmRelease(pMgmt);
+ }
+
+ if (code != 0) {
+ rpcFreeCont(pMsg->pCont);
+ pMsg->pCont = NULL;
+ }
return code;
}
diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
index c4b1ab63e46d62720131953bbddc928fc351d31c..864f5b485afdea2c798cbc35a12466ecfa1b69b8 100644
--- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
+++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
@@ -16,7 +16,19 @@
#define _DEFAULT_SOURCE
#include "qmInt.h"
-void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {}
+void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {
+ SQnodeLoad qload = {0};
+ qndGetLoad(pMgmt->pQnode, &qload);
+
+ qload.dnodeId = pMgmt->pData->dnodeId;
+
+}
+
+void qmGetQnodeLoads(SQnodeMgmt *pMgmt, SQnodeLoad *pInfo) {
+ qndGetLoad(pMgmt->pQnode, pInfo);
+
+ pInfo->dnodeId = pMgmt->pData->dnodeId;
+}
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SMonQmInfo qmInfo = {0};
@@ -101,8 +113,6 @@ SArray *qmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
index 35c94b7fbe786434cfb59191c8899949099d0325..e7fc261b67a8a6416cdbafae07552a5c9576bc22 100644
--- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
+++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
@@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
code = qmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
default:
- code = qndProcessQueryMsg(pMgmt->pQnode, pMsg);
+ code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg);
break;
}
diff --git a/source/dnode/mgmt/mgmt_snode/src/smHandle.c b/source/dnode/mgmt/mgmt_snode/src/smHandle.c
index bf1bb145b7548f1e50958e4cf718ebdc627bdfcf..a3aab439debfbd536312a2b5cbc104b4cf0fa2e2 100644
--- a/source/dnode/mgmt/mgmt_snode/src/smHandle.c
+++ b/source/dnode/mgmt/mgmt_snode/src/smHandle.c
@@ -96,7 +96,7 @@ SArray *smGetMsgHandles() {
// Requests handled by SNODE
if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_DEPLOY, smPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER;
+ /*if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER;*/
code = 0;
_OVER:
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index f28209f9828062f8ed27f194914b4ac11848735a..b0c9566184be0f3ac400681b9a9dd2f56f704c64 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -138,8 +138,9 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->dbId = pCreate->dbUid;
pCfg->szPage = pCreate->pageSize * 1024;
pCfg->szCache = pCreate->pages;
- pCfg->szBuf = pCreate->buffer * 1024 * 1024;
+ pCfg->szBuf = (uint64_t)pCreate->buffer * 1024 * 1024;
pCfg->isWeak = true;
+ pCfg->isTsma = pCreate->isTsma;
pCfg->tsdbCfg.compression = pCreate->compression;
pCfg->tsdbCfg.precision = pCreate->precision;
pCfg->tsdbCfg.days = pCreate->daysPerFile;
@@ -149,20 +150,26 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->tsdbCfg.minRows = pCreate->minRows;
pCfg->tsdbCfg.maxRows = pCreate->maxRows;
for (size_t i = 0; i < taosArrayGetSize(pCreate->pRetensions); ++i) {
- memcpy(&pCfg->tsdbCfg.retentions[i], taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention));
+ SRetention *pRetention = &pCfg->tsdbCfg.retentions[i];
+ memcpy(pRetention, taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention));
+ if (i == 0) {
+ if ((pRetention->freq > 0 && pRetention->keep > 0)) pCfg->isRsma = 1;
+ }
}
+
pCfg->walCfg.vgId = pCreate->vgId;
pCfg->hashBegin = pCreate->hashBegin;
pCfg->hashEnd = pCreate->hashEnd;
pCfg->hashMethod = pCreate->hashMethod;
+ pCfg->standby = pCfg->standby;
pCfg->syncCfg.myIndex = pCreate->selfIndex;
pCfg->syncCfg.replicaNum = pCreate->replica;
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
for (int i = 0; i < pCreate->replica; ++i) {
- pCfg->syncCfg.nodeInfo[i].nodePort = pCreate->replicas[i].port;
- snprintf(pCfg->syncCfg.nodeInfo[i].nodeFqdn, sizeof(pCfg->syncCfg.nodeInfo[i].nodeFqdn), "%s",
- pCreate->replicas[i].fqdn);
+ SNodeInfo *pNode = &pCfg->syncCfg.nodeInfo[i];
+ pNode->nodePort = pCreate->replicas[i].port;
+ tstrncpy(pNode->nodeFqdn, pCreate->replicas[i].fqdn, sizeof(pNode->nodeFqdn));
}
}
@@ -173,8 +180,28 @@ static void vmGenerateWrapperCfg(SVnodeMgmt *pMgmt, SCreateVnodeReq *pCreate, SW
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId);
}
+static int32_t vmTsmaAdjustDays(SVnodeCfg *pCfg, SCreateVnodeReq *pReq) {
+ if (pReq->isTsma) {
+ SMsgHead *smaMsg = pReq->pTsma;
+ uint32_t contLen = (uint32_t)(htonl(smaMsg->contLen) - sizeof(SMsgHead));
+ return smaGetTSmaDays(pCfg, POINTER_SHIFT(smaMsg, sizeof(SMsgHead)), contLen, &pCfg->tsdbCfg.days);
+ }
+ return 0;
+}
+
+static int32_t vmTsmaProcessCreate(SVnode *pVnode, SCreateVnodeReq *pReq) {
+ if (pReq->isTsma) {
+ SMsgHead *smaMsg = pReq->pTsma;
+ uint32_t contLen = (uint32_t)(htonl(smaMsg->contLen) - sizeof(SMsgHead));
+ return vnodeProcessCreateTSma(pVnode, POINTER_SHIFT(smaMsg, sizeof(SMsgHead)), contLen);
+ }
+ return 0;
+}
+
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SCreateVnodeReq createReq = {0};
+ SVnodeCfg vnodeCfg = {0};
+ SWrapperCfg wrapperCfg = {0};
int32_t code = -1;
char path[TSDB_FILENAME_LEN] = {0};
@@ -183,12 +210,16 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
- dDebug("vgId:%d, create vnode req is received", createReq.vgId);
-
- SVnodeCfg vnodeCfg = {0};
+ dDebug("vgId:%d, create vnode req is received, tsma:%d standby:%d", createReq.vgId, createReq.isTsma,
+ createReq.standby);
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
- SWrapperCfg wrapperCfg = {0};
+ if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
+ dError("vgId:%d, failed to adjust tsma days since %s", createReq.vgId, terrstr());
+ code = terrno;
+ goto _OVER;
+ }
+
vmGenerateWrapperCfg(pMgmt, &createReq, &wrapperCfg);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, createReq.vgId);
@@ -197,19 +228,21 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
tFreeSCreateVnodeReq(&createReq);
vmReleaseVnode(pMgmt, pVnode);
terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED;
- return -1;
+ code = terrno;
+ goto _OVER;
}
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&createReq);
dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr());
- return -1;
+ code = terrno;
+ goto _OVER;
}
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
- dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr());
+ dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr());
code = terrno;
goto _OVER;
}
@@ -217,6 +250,14 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl);
if (code != 0) {
dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr());
+ code = terrno;
+ goto _OVER;
+ }
+
+ code = vmTsmaProcessCreate(pImpl, &createReq);
+ if (code != 0) {
+ dError("vgId:%d, failed to create tsma since %s", createReq.vgId, terrstr());
+ code = terrno;
goto _OVER;
}
@@ -227,12 +268,17 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
code = vmWriteVnodeListToFile(pMgmt);
- if (code != 0) goto _OVER;
+ if (code != 0) {
+ code = terrno;
+ goto _OVER;
+ }
_OVER:
if (code != 0) {
vnodeClose(pImpl);
vnodeDestroy(path, pMgmt->pTfs);
+ } else {
+ dInfo("vgId:%d, vnode is created", createReq.vgId);
}
tFreeSCreateVnodeReq(&createReq);
@@ -292,8 +338,6 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
// if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
@@ -310,16 +354,14 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DEPLOY, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_PIPE_EXEC, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_MERGE_EXEC, vmPutNodeMsgToMergeQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_WRITE_EXEC, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index 6183794bdd9c87da091a64c5333ad42f70dd824e..03db74abd7dfc30140cdd9a36ded80d9ab3072cc 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -91,51 +91,52 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
}
static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
+ int32_t code = 0;
+ SRpcMsg *pMsg = NULL;
SVnodeObj *pVnode = pInfo->ahandle;
- SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
- if (pArray == NULL) {
- dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr());
- return;
- }
+ int64_t sync = vnodeGetSyncHandle(pVnode->pImpl);
+ SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg **));
- for (int32_t i = 0; i < numOfMsgs; ++i) {
- SRpcMsg *pMsg = NULL;
+ for (int32_t m = 0; m < numOfMsgs; m++) {
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
+ dTrace("vgId:%d, get msg:%p from vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
- dTrace("msg:%p, get from vnode-write queue", pMsg);
if (taosArrayPush(pArray, &pMsg) == NULL) {
- dTrace("msg:%p, failed to process since %s", pMsg, terrstr());
+ dError("vgId:%d, failed to push msg:%p to vnode-write array", pVnode->vgId, pMsg);
vmSendRsp(pMsg, TSDB_CODE_OUT_OF_MEMORY);
}
}
- for (int i = 0; i < taosArrayGetSize(pArray); i++) {
- SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
- SRpcMsg rsp = {.info = pMsg->info};
+ for (int32_t m = 0; m < taosArrayGetSize(pArray); m++) {
+ pMsg = *(SRpcMsg **)taosArrayGet(pArray, m);
+ code = vnodePreprocessReq(pVnode->pImpl, pMsg);
- vnodePreprocessReq(pVnode->pImpl, pMsg);
+ if (code == TSDB_CODE_ACTION_IN_PROGRESS) continue;
+ if (code != 0) {
+ dError("vgId:%d, msg:%p failed to process since %s", pVnode->vgId, pMsg, tstrerror(code));
+ vmSendRsp(pMsg, code);
+ continue;
+ }
- int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false);
- if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) {
- dTrace("msg:%p, is redirect since not leader, vgId:%d ", pMsg, pVnode->vgId);
- rsp.code = TSDB_CODE_RPC_REDIRECT;
- SEpSet newEpSet;
- syncGetEpSet(vnodeGetSyncHandle(pVnode->pImpl), &newEpSet);
+ code = syncPropose(sync, pMsg, false);
+ if (code == TAOS_SYNC_PROPOSE_SUCCESS) {
+ continue;
+ } else if (code == TAOS_SYNC_PROPOSE_NOT_LEADER) {
+ dTrace("vgId:%d, msg:%p is redirect since not leader", pVnode->vgId, pMsg);
+ SEpSet newEpSet = {0};
+ syncGetEpSet(sync, &newEpSet);
newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps;
+ SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
tmsgSendRedirectRsp(&rsp, &newEpSet);
- } else if (ret == TAOS_SYNC_PROPOSE_OTHER_ERROR) {
- rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR;
- tmsgSendRsp(&rsp);
- } else if (ret == TAOS_SYNC_PROPOSE_SUCCESS) {
- // send response in applyQ
} else {
- assert(0);
+ dError("vgId:%d, msg:%p failed to process since %s", pVnode->vgId, pMsg, tstrerror(code));
+ vmSendRsp(pMsg, code);
}
}
for (int32_t i = 0; i < numOfMsgs; i++) {
- SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
- dTrace("msg:%p, is freed", pMsg);
+ pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
+ dTrace("vgId:%d, msg:%p, is freed", pVnode->vgId, pMsg);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h
index 27f1140f2379f2db9a5856ff72ad0fbc0f42d9f2..adde0557965fb7651c66a8b4791d4a671db91201 100644
--- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h
+++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h
@@ -168,6 +168,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
void dmSendMonitorReport();
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
void dmGetMnodeLoads(SMonMloadInfo *pInfo);
+void dmGetQnodeLoads(SQnodeLoad *pInfo);
#ifdef __cplusplus
}
diff --git a/source/dnode/mgmt/node_mgmt/inc/dmNodes.h b/source/dnode/mgmt/node_mgmt/inc/dmNodes.h
index 3ac71de530d4dd9dad6ccd6b29b7789f56a85b1e..8c2d57808fc5d8e29c4bef5079f504c8a9e39802 100644
--- a/source/dnode/mgmt/node_mgmt/inc/dmNodes.h
+++ b/source/dnode/mgmt/node_mgmt/inc/dmNodes.h
@@ -37,6 +37,7 @@ void bmGetMonitorInfo(void *pMgmt, SMonBmInfo *pInfo);
void vmGetVnodeLoads(void *pMgmt, SMonVloadInfo *pInfo);
void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo);
+void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo);
#ifdef __cplusplus
}
diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c
index 07d0c43360a5de639f5af2b64208d13c79192687..528beb280bfd05aa4030a3351aaf278f31b96e17 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c
@@ -50,26 +50,26 @@ static int32_t dmInitMonitor() {
}
int32_t dmInit(int8_t rtype) {
- dInfo("start to init env");
+ dInfo("start to init dnode env");
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
if (dmInitSystem() != 0) return -1;
if (dmInitMonitor() != 0) return -1;
if (dmInitDnode(dmInstance(), rtype) != 0) return -1;
- dInfo("env is initialized");
+ dInfo("dnode env is initialized");
return 0;
}
static int32_t dmCheckRepeatCleanup(SDnode *pDnode) {
if (atomic_val_compare_exchange_8(&pDnode->once, DND_ENV_READY, DND_ENV_CLEANUP) != DND_ENV_READY) {
- dError("env is already cleaned up");
+ dError("dnode env is already cleaned up");
return -1;
}
return 0;
}
void dmCleanup() {
- dDebug("start to cleanup env");
+ dDebug("start to cleanup dnode env");
SDnode *pDnode = dmInstance();
if (dmCheckRepeatCleanup(pDnode) != 0) return;
dmCleanupDnode(pDnode);
@@ -79,7 +79,7 @@ void dmCleanup() {
udfcClose();
udfStopUdfd();
taosStopCacheRefreshWorker();
- dInfo("env is cleaned up");
+ dInfo("dnode env is cleaned up");
taosCloseLog();
taosCleanupCfg();
@@ -178,6 +178,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
.sendMonitorReportFp = dmSendMonitorReport,
.getVnodeLoadsFp = dmGetVnodeLoads,
.getMnodeLoadsFp = dmGetMnodeLoads,
+ .getQnodeLoadsFp = dmGetQnodeLoads,
};
opt.msgCb = dmGetMsgcb(pWrapper->pDnode);
diff --git a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c
index 0b74d865fd5680311c483003a58da1785813a275..ecad390ef94a635fdeed8256004fce9978fde822 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c
@@ -170,3 +170,17 @@ void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
dmReleaseWrapper(pWrapper);
}
}
+
+void dmGetQnodeLoads(SQnodeLoad *pInfo) {
+ SDnode *pDnode = dmInstance();
+ SMgmtWrapper *pWrapper = &pDnode->wrappers[QNODE];
+ if (dmMarkWrapper(pWrapper) == 0) {
+ if (tsMultiProcess) {
+ dmSendLocalRecv(pDnode, TDMT_MON_QM_LOAD, tDeserializeSQnodeLoad, pInfo);
+ } else if (pWrapper->pMgmt != NULL) {
+ qmGetQnodeLoads(pWrapper->pMgmt, pInfo);
+ }
+ dmReleaseWrapper(pWrapper);
+ }
+}
+
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index 6fbfae8b416efc68a0be9b101f1308aeba723752..e5893fd94740fa20fa244bd1957a02a50e39bf08 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -62,8 +62,10 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
dmProcessNetTestReq(pDnode, pRpc);
return;
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
- qWorkerProcessFetchRsp(NULL, NULL, pRpc);
+ qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0);
return;
+ } else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
+ dmSetMnodeEpSet(&pDnode->data, pEpSet);
} else {
}
@@ -128,7 +130,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
_OVER:
if (code != 0) {
- dError("msg:%p, failed to process since %s", pMsg, terrstr());
+ dTrace("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pRpc->msgType));
if (terrno != 0) code = terrno;
if (IsReq(pRpc)) {
@@ -204,29 +206,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) {
}
static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
- SMEpSet msg = {0};
- dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet);
+ SEpSet epSet = {0};
+ dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
- int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
+ int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->pCont = rpcMallocCont(contLen);
if (pMsg->pCont == NULL) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
} else {
- tSerializeSMEpSet(pMsg->pCont, contLen, &msg);
+ tSerializeSEpSet(pMsg->pCont, contLen, &epSet);
pMsg->contLen = contLen;
}
}
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
- SMEpSet msg = {.epSet = *pNewEpSet};
- int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
+ int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet);
rsp.pCont = rpcMallocCont(contLen);
if (rsp.pCont == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
} else {
- tSerializeSMEpSet(rsp.pCont, contLen, &msg);
+ tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet);
rsp.contLen = contLen;
}
dmSendRsp(&rsp);
diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h
index 4946669678cd0fd17a22b935aa9e2613e58d73db..c142a6cfd892413f1a69e2e7ce1d41524b1dbb27 100644
--- a/source/dnode/mgmt/node_util/inc/dmUtil.h
+++ b/source/dnode/mgmt/node_util/inc/dmUtil.h
@@ -34,6 +34,7 @@
#include "dnode.h"
#include "mnode.h"
+#include "qnode.h"
#include "monitor.h"
#include "sync.h"
#include "wal.h"
@@ -90,8 +91,9 @@ typedef enum {
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
typedef void (*SendMonitorReportFp)();
-typedef void (*GetVnodeLoadsFp)();
-typedef void (*GetMnodeLoadsFp)();
+typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
+typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
+typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo);
typedef struct {
int32_t dnodeId;
@@ -118,6 +120,7 @@ typedef struct {
SendMonitorReportFp sendMonitorReportFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetMnodeLoadsFp getMnodeLoadsFp;
+ GetQnodeLoadsFp getQnodeLoadsFp;
} SMgmtInputOpt;
typedef struct {
@@ -180,4 +183,4 @@ void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet);
}
#endif
-#endif /*_TD_DM_INT_H_*/
\ No newline at end of file
+#endif /*_TD_DM_INT_H_*/
diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c
index e0af20e41bfef194d90d30316c16042522e7f87d..937c1ab7faa29191ea65f8770d0ffb2c531b3c35 100644
--- a/source/dnode/mgmt/node_util/src/dmEps.c
+++ b/source/dnode/mgmt/node_util/src/dmEps.c
@@ -148,7 +148,6 @@ int32_t dmReadEps(SDnodeData *pData) {
code = 0;
dDebug("succcessed to read file %s", file);
- dmPrintEps(pData);
_OVER:
if (content != NULL) taosMemoryFree(content);
@@ -162,6 +161,7 @@ _OVER:
taosArrayPush(pData->dnodeEps, &dnodeEp);
}
+ dDebug("reset dnode list on startup");
dmResetEps(pData, pData->dnodeEps);
if (dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) {
@@ -236,11 +236,13 @@ void dmUpdateEps(SDnodeData *pData, SArray *eps) {
int32_t numOfEpsOld = (int32_t)taosArrayGetSize(pData->dnodeEps);
if (numOfEps != numOfEpsOld) {
+ dDebug("new dnode list get from mnode");
dmResetEps(pData, eps);
dmWriteEps(pData);
} else {
int32_t size = numOfEps * sizeof(SDnodeEp);
if (memcmp(pData->dnodeEps->pData, eps->pData, size) != 0) {
+ dDebug("new dnode list get from mnode");
dmResetEps(pData, eps);
dmWriteEps(pData);
}
@@ -282,7 +284,7 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
static void dmPrintEps(SDnodeData *pData) {
int32_t numOfEps = (int32_t)taosArrayGetSize(pData->dnodeEps);
- dDebug("print dnode ep list, num:%d", numOfEps);
+ dDebug("print dnode list, num:%d", numOfEps);
for (int32_t i = 0; i < numOfEps; i++) {
SDnodeEp *pEp = taosArrayGet(pData->dnodeEps, i);
dDebug("dnode:%d, fqdn:%s port:%u is_mnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode);
@@ -326,6 +328,7 @@ void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet
}
void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) {
+ if (memcmp(pEpSet, &pData->mnodeEps, sizeof(SEpSet)) == 0) return;
taosThreadRwlockWrlock(&pData->lock);
pData->mnodeEps = *pEpSet;
taosThreadRwlockUnlock(&pData->lock);
diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c
index 7ac6fc129d2bb591706d6ed722878359c4993515..78e706f90814950287aed067103690f9c215e8e3 100644
--- a/source/dnode/mgmt/node_util/src/dmFile.c
+++ b/source/dnode/mgmt/node_util/src/dmFile.c
@@ -135,7 +135,7 @@ TdFilePtr dmCheckRunning(const char *dataDir) {
return NULL;
}
- dDebug("file:%s is locked", filepath);
+ dDebug("lock file:%s to prevent repeated starts", filepath);
return pFile;
}
diff --git a/source/dnode/mgmt/test/CMakeLists.txt b/source/dnode/mgmt/test/CMakeLists.txt
index e1656ceb34d222fb13ef524b087349756d46d6ff..6b1919bf1862b5eeca9047de4731dae306ca275a 100644
--- a/source/dnode/mgmt/test/CMakeLists.txt
+++ b/source/dnode/mgmt/test/CMakeLists.txt
@@ -3,7 +3,7 @@ if(${BUILD_TEST})
add_subdirectory(qnode)
add_subdirectory(bnode)
add_subdirectory(snode)
- add_subdirectory(mnode)
+ #add_subdirectory(mnode)
add_subdirectory(vnode)
add_subdirectory(sut)
endif(${BUILD_TEST})
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 81f4c5ed1ef87431b639d256acde0faa596692fe..83a36f4b0d5509884b2e99e7bd0eb4663a564959 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -54,72 +54,32 @@ typedef enum {
} EAuthOp;
typedef enum {
- TRN_STAGE_PREPARE = 0,
- TRN_STAGE_REDO_LOG = 1,
- TRN_STAGE_REDO_ACTION = 2,
- TRN_STAGE_ROLLBACK = 3,
- TRN_STAGE_UNDO_ACTION = 4,
- TRN_STAGE_UNDO_LOG = 5,
- TRN_STAGE_COMMIT = 6,
- TRN_STAGE_COMMIT_LOG = 7,
- TRN_STAGE_FINISHED = 8
-} ETrnStage;
+ TRN_CONFLICT_NOTHING = 0,
+ TRN_CONFLICT_GLOBAL = 1,
+ TRN_CONFLICT_DB = 2,
+ TRN_CONFLICT_DB_INSIDE = 3,
+} ETrnConflct;
typedef enum {
- TRN_TYPE_BASIC_SCOPE = 1000,
- TRN_TYPE_CREATE_USER = 1001,
- TRN_TYPE_ALTER_USER = 1002,
- TRN_TYPE_DROP_USER = 1003,
- TRN_TYPE_CREATE_FUNC = 1004,
- TRN_TYPE_DROP_FUNC = 1005,
-
- TRN_TYPE_CREATE_SNODE = 1006,
- TRN_TYPE_DROP_SNODE = 1007,
- TRN_TYPE_CREATE_QNODE = 1008,
- TRN_TYPE_DROP_QNODE = 1009,
- TRN_TYPE_CREATE_BNODE = 1010,
- TRN_TYPE_DROP_BNODE = 1011,
- TRN_TYPE_CREATE_MNODE = 1012,
- TRN_TYPE_DROP_MNODE = 1013,
- TRN_TYPE_CREATE_TOPIC = 1014,
- TRN_TYPE_DROP_TOPIC = 1015,
- TRN_TYPE_SUBSCRIBE = 1016,
- TRN_TYPE_REBALANCE = 1017,
- TRN_TYPE_COMMIT_OFFSET = 1018,
- TRN_TYPE_CREATE_STREAM = 1019,
- TRN_TYPE_DROP_STREAM = 1020,
- TRN_TYPE_ALTER_STREAM = 1021,
- TRN_TYPE_CONSUMER_LOST = 1022,
- TRN_TYPE_CONSUMER_RECOVER = 1023,
- TRN_TYPE_BASIC_SCOPE_END,
-
- TRN_TYPE_GLOBAL_SCOPE = 2000,
- TRN_TYPE_CREATE_DNODE = 2001,
- TRN_TYPE_DROP_DNODE = 2002,
- TRN_TYPE_GLOBAL_SCOPE_END,
-
- TRN_TYPE_DB_SCOPE = 3000,
- TRN_TYPE_CREATE_DB = 3001,
- TRN_TYPE_ALTER_DB = 3002,
- TRN_TYPE_DROP_DB = 3003,
- TRN_TYPE_SPLIT_VGROUP = 3004,
- TRN_TYPE_MERGE_VGROUP = 3015,
- TRN_TYPE_DB_SCOPE_END,
-
- TRN_TYPE_STB_SCOPE = 4000,
- TRN_TYPE_CREATE_STB = 4001,
- TRN_TYPE_ALTER_STB = 4002,
- TRN_TYPE_DROP_STB = 4003,
- TRN_TYPE_CREATE_SMA = 4004,
- TRN_TYPE_DROP_SMA = 4005,
- TRN_TYPE_STB_SCOPE_END,
-} ETrnType;
+ TRN_STAGE_PREPARE = 0,
+ TRN_STAGE_REDO_ACTION = 1,
+ TRN_STAGE_ROLLBACK = 2,
+ TRN_STAGE_UNDO_ACTION = 3,
+ TRN_STAGE_COMMIT = 4,
+ TRN_STAGE_COMMIT_ACTION = 5,
+ TRN_STAGE_FINISHED = 6
+} ETrnStage;
typedef enum {
TRN_POLICY_ROLLBACK = 0,
TRN_POLICY_RETRY = 1,
} ETrnPolicy;
+typedef enum {
+ TRN_EXEC_PRARLLEL = 0,
+ TRN_EXEC_SERIAL = 1,
+} ETrnExec;
+
typedef enum {
DND_REASON_ONLINE = 0,
DND_REASON_STATUS_MSG_TIMEOUT,
@@ -147,22 +107,24 @@ typedef struct {
int32_t id;
ETrnStage stage;
ETrnPolicy policy;
- ETrnType type;
+ ETrnConflct conflict;
+ ETrnExec exec;
int32_t code;
int32_t failedTimes;
SRpcHandleInfo rpcInfo;
void* rpcRsp;
int32_t rpcRspLen;
- SArray* redoLogs;
- SArray* undoLogs;
- SArray* commitLogs;
+ int32_t redoActionPos;
SArray* redoActions;
SArray* undoActions;
+ SArray* commitActions;
int64_t createdTime;
int64_t lastExecTime;
- int64_t dbUid;
+ int32_t lastErrorAction;
+ int32_t lastErrorNo;
+ tmsg_t lastErrorMsgType;
+ SEpSet lastErrorEpset;
char dbname[TSDB_DB_FNAME_LEN];
- char lastError[TSDB_TRANS_ERROR_LEN];
int32_t startFunc;
int32_t stopFunc;
int32_t paramLen;
@@ -196,9 +158,8 @@ typedef struct {
int32_t id;
int64_t createdTime;
int64_t updateTime;
- ESyncState role;
- int32_t roleTerm;
- int64_t roleTime;
+ ESyncState state;
+ int64_t stateStartTime;
SDnodeObj* pDnode;
} SMnodeObj;
@@ -207,6 +168,7 @@ typedef struct {
int64_t createdTime;
int64_t updateTime;
SDnodeObj* pDnode;
+ SQnodeLoad load;
} SQnodeObj;
typedef struct {
@@ -293,6 +255,7 @@ typedef struct {
int8_t hashMethod; // default is 1
int32_t numOfRetensions;
SArray* pRetensions;
+ int8_t schemaless;
} SDbCfg;
typedef struct {
@@ -328,8 +291,10 @@ typedef struct {
int64_t compStorage;
int64_t pointsWritten;
int8_t compact;
+ int8_t isTsma;
int8_t replica;
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
+ void* pTsma;
} SVgObj;
typedef struct {
@@ -364,7 +329,6 @@ typedef struct {
int64_t updateTime;
int64_t uid;
int64_t dbUid;
- int32_t version;
int32_t tagVer;
int32_t colVer;
int32_t nextColId;
@@ -447,19 +411,15 @@ typedef struct {
int64_t uid;
int64_t dbUid;
int32_t version;
- int8_t subType; // db or table
- int8_t withTbName;
- int8_t withSchema;
- int8_t withTag;
+ int8_t subType; // column, db or stable
SRWLatch lock;
- int32_t consumerCnt;
int32_t sqlLen;
int32_t astLen;
char* sql;
char* ast;
char* physicalPlan;
SSchemaWrapper schema;
- int32_t refConsumerCnt;
+ int64_t stbUid;
} SMqTopicObj;
typedef struct {
@@ -518,9 +478,7 @@ typedef struct {
int64_t dbUid;
int32_t vgNum;
int8_t subType;
- int8_t withTbName;
- int8_t withSchema;
- int8_t withTag;
+ int64_t stbUid;
SHashObj* consumerHash; // consumerId -> SMqConsumerEp
SArray* unassignedVgs; // SArray
} SMqSubscribeObj;
@@ -572,7 +530,7 @@ typedef struct {
} SMqRebOutputObj;
typedef struct {
- char name[TSDB_TOPIC_FNAME_LEN];
+ char name[TSDB_STREAM_FNAME_LEN];
char sourceDb[TSDB_DB_FNAME_LEN];
char targetDb[TSDB_DB_FNAME_LEN];
char targetSTbName[TSDB_TABLE_FNAME_LEN];
@@ -587,7 +545,8 @@ typedef struct {
int8_t status;
int8_t createdBy; // STREAM_CREATED_BY__USER or SMA
int32_t fixedSinkVgId; // 0 for shuffle
- int64_t smaId; // 0 for unused
+ SVgObj fixedSinkVg;
+ int64_t smaId; // 0 for unused
int8_t trigger;
int32_t triggerParam;
int64_t waterMark;
diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h
index 5a1653b937fee8ed4427aa1e4a40b459b110125c..6661347e4206b28d6977b622bc4cd8777b34abb7 100644
--- a/source/dnode/mnode/impl/inc/mndInt.h
+++ b/source/dnode/mnode/impl/inc/mndInt.h
@@ -75,12 +75,12 @@ typedef struct {
} STelemMgmt;
typedef struct {
- SWal *pWal;
- int32_t errCode;
- bool restored;
- sem_t syncSem;
- int64_t sync;
- ESyncState state;
+ SWal *pWal;
+ sem_t syncSem;
+ int64_t sync;
+ bool standby;
+ int32_t errCode;
+ int32_t transId;
} SSyncMgmt;
typedef struct {
@@ -89,33 +89,45 @@ typedef struct {
} SGrantInfo;
typedef struct SMnode {
- int32_t selfId;
- int64_t clusterId;
- TdThread thread;
- bool stopped;
- int8_t replica;
- int8_t selfIndex;
- SReplica replicas[TSDB_MAX_REPLICA];
- char *path;
- int64_t checkTime;
- SSdb *pSdb;
- SMgmtWrapper *pWrapper;
- SArray *pSteps;
- SQHandle *pQuery;
- SShowMgmt showMgmt;
- SProfileMgmt profileMgmt;
- STelemMgmt telemMgmt;
- SSyncMgmt syncMgmt;
- SHashObj *infosMeta;
- SHashObj *perfsMeta;
- SGrantInfo grant;
- MndMsgFp msgFp[TDMT_MAX];
- SMsgCb msgCb;
+ int32_t selfDnodeId;
+ int64_t clusterId;
+ TdThread thread;
+ TdThreadRwlock lock;
+ int32_t rpcRef;
+ int32_t syncRef;
+ bool stopped;
+ bool restored;
+ bool deploy;
+ int8_t replica;
+ int8_t selfIndex;
+ SReplica replicas[TSDB_MAX_REPLICA];
+ char *path;
+ int64_t checkTime;
+ SSdb *pSdb;
+ SArray *pSteps;
+ SQHandle *pQuery;
+ SHashObj *infosMeta;
+ SHashObj *perfsMeta;
+ SShowMgmt showMgmt;
+ SProfileMgmt profileMgmt;
+ STelemMgmt telemMgmt;
+ SSyncMgmt syncMgmt;
+ SGrantInfo grant;
+ MndMsgFp msgFp[TDMT_MAX];
+ SMsgCb msgCb;
} SMnode;
void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp);
int64_t mndGenerateUid(char *name, int32_t len);
+int32_t mndAcquireRpcRef(SMnode *pMnode);
+void mndReleaseRpcRef(SMnode *pMnode);
+void mndSetRestore(SMnode *pMnode, bool restored);
+void mndSetStop(SMnode *pMnode);
+bool mndGetStop(SMnode *pMnode);
+int32_t mndAcquireSyncRef(SMnode *pMnode);
+void mndReleaseSyncRef(SMnode *pMnode);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndMnode.h b/source/dnode/mnode/impl/inc/mndMnode.h
index a5cdfa1061034c25f2162ffe1812ea3ee235bf36..fd62b3ce75a8691c95a9ecf8ec70daae272145c0 100644
--- a/source/dnode/mnode/impl/inc/mndMnode.h
+++ b/source/dnode/mnode/impl/inc/mndMnode.h
@@ -28,7 +28,6 @@ SMnodeObj *mndAcquireMnode(SMnode *pMnode, int32_t mnodeId);
void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj);
bool mndIsMnode(SMnode *pMnode, int32_t dnodeId);
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet);
-void mndUpdateMnodeRole(SMnode *pMnode);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndOffset.h b/source/dnode/mnode/impl/inc/mndOffset.h
index 900181858bd724873ea948d450e830cc83643463..f7569b964875bbffe90c8fc5525fda8f68b688b8 100644
--- a/source/dnode/mnode/impl/inc/mndOffset.h
+++ b/source/dnode/mnode/impl/inc/mndOffset.h
@@ -39,6 +39,7 @@ static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, c
int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic);
+int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey);
bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic);
diff --git a/source/dnode/mnode/impl/inc/mndQnode.h b/source/dnode/mnode/impl/inc/mndQnode.h
index 5d177b3f6db6e2f8c81be3c4461bdea0870ba322..3e38565a4fe67b93d8ba8b9d30160ce54b13dee5 100644
--- a/source/dnode/mnode/impl/inc/mndQnode.h
+++ b/source/dnode/mnode/impl/inc/mndQnode.h
@@ -22,9 +22,15 @@
extern "C" {
#endif
+#define QNODE_LOAD_VALUE(pQnode) (pQnode ? (pQnode->load.numOfQueryInQueue + pQnode->load.numOfFetchInQueue) : 0)
+
int32_t mndInitQnode(SMnode *pMnode);
void mndCleanupQnode(SMnode *pMnode);
+SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId);
+void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj);
+int32_t mndCreateQnodeList(SMnode *pMnode, SArray** pList, int32_t limit);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndScheduler.h b/source/dnode/mnode/impl/inc/mndScheduler.h
index 33af040915688fd83c4a82af3c89047be5d20dae..05aea3f68c4023ceb68e16ad875f59c666f63171 100644
--- a/source/dnode/mnode/impl/inc/mndScheduler.h
+++ b/source/dnode/mnode/impl/inc/mndScheduler.h
@@ -29,7 +29,8 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream);
-int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermark, char** pStr, int32_t* pLen);
+int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr,
+ int32_t* pLen, double filesFactor);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h
index 50cede62ce424ae855f46ba0f359b5088058e4d1..d91c2bd4c3f69063420f3a775f6183e3eaa3824d 100644
--- a/source/dnode/mnode/impl/inc/mndSubscribe.h
+++ b/source/dnode/mnode/impl/inc/mndSubscribe.h
@@ -33,6 +33,7 @@ int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName
int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic);
+int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndSync.h b/source/dnode/mnode/impl/inc/mndSync.h
index 356f215267fcfd76f5a851202c6290b9433796ee..cb9d70d5ee48f542dbe58100328b7f2284ea2926 100644
--- a/source/dnode/mnode/impl/inc/mndSync.h
+++ b/source/dnode/mnode/impl/inc/mndSync.h
@@ -25,7 +25,7 @@ extern "C" {
int32_t mndInitSync(SMnode *pMnode);
void mndCleanupSync(SMnode *pMnode);
bool mndIsMaster(SMnode *pMnode);
-int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw);
+int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId);
void mndSyncStart(SMnode *pMnode);
void mndSyncStop(SMnode *pMnode);
diff --git a/source/dnode/mnode/impl/inc/mndTopic.h b/source/dnode/mnode/impl/inc/mndTopic.h
index c5c4800e0295fa48ee4bf9669200f7ce7a31eff8..4aa18ea591a7058d8ecbbdcb901b0ebdcd82181b 100644
--- a/source/dnode/mnode/impl/inc/mndTopic.h
+++ b/source/dnode/mnode/impl/inc/mndTopic.h
@@ -37,6 +37,8 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]);
int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic);
+bool mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, const SArray *colIds);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h
index 84e7a17192b7ba41028989d8bc58e88229731e10..6d1f3710830563e24fe124a3a95582b316ef4e00 100644
--- a/source/dnode/mnode/impl/inc/mndTrans.h
+++ b/source/dnode/mnode/impl/inc/mndTrans.h
@@ -22,24 +22,29 @@
extern "C" {
#endif
+typedef enum {
+ TRANS_START_FUNC_TEST = 1,
+ TRANS_STOP_FUNC_TEST = 2,
+ TRANS_START_FUNC_MQ_REB = 3,
+ TRANS_STOP_FUNC_MQ_REB = 4,
+} ETrnFunc;
+
typedef struct {
- SEpSet epSet;
- tmsg_t msgType;
- int8_t msgSent;
- int8_t msgReceived;
- int32_t errCode;
- int32_t acceptableCode;
- int32_t contLen;
- void *pCont;
+ int32_t id;
+ int32_t errCode;
+ int32_t acceptableCode;
+ int8_t stage;
+ int8_t actionType; // 0-msg, 1-raw
+ int8_t rawWritten;
+ int8_t msgSent;
+ int8_t msgReceived;
+ tmsg_t msgType;
+ SEpSet epSet;
+ int32_t contLen;
+ void *pCont;
+ SSdbRaw *pRaw;
} STransAction;
-typedef enum {
- TEST_TRANS_START_FUNC = 1,
- TEST_TRANS_STOP_FUNC = 2,
- MQ_REB_TRANS_START_FUNC = 3,
- MQ_REB_TRANS_STOP_FUNC = 4,
-} ETrnFuncType;
-
typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen);
int32_t mndInitTrans(SMnode *pMnode);
@@ -47,7 +52,7 @@ void mndCleanupTrans(SMnode *pMnode);
STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId);
void mndReleaseTrans(SMnode *pMnode, STrans *pTrans);
-STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const SRpcMsg *pReq);
+STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq);
void mndTransDrop(STrans *pTrans);
int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
@@ -55,8 +60,9 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
-void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen);
-void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
+void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
+void mndTransSetDbName(STrans *pTrans, const char *dbname);
+void mndTransSetSerial(STrans *pTrans);
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
void mndTransProcessRsp(SRpcMsg *pRsp);
diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h
index 9bf7b6eb8937cee5078ddab38e04810e77734d05..3f4f3f2053bd4fd633488eaf4a4fac71d642df51 100644
--- a/source/dnode/mnode/impl/inc/mndVgroup.h
+++ b/source/dnode/mnode/impl/inc/mndVgroup.h
@@ -30,12 +30,13 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup);
SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup);
int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId);
+int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups);
SArray *mndBuildDnodesArray(SMnode *pMnode);
int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray);
int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray, SVnodeGid *del1, SVnodeGid *del2);
-void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
+void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, bool standby);
void *mndBuildDropVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c
index 52b9ac62e67c652a914e560e9551c08606971af4..0ce4a8c76e72ce2f2513819139b00a01c67f5231 100644
--- a/source/dnode/mnode/impl/src/mndAcct.c
+++ b/source/dnode/mnode/impl/src/mndAcct.c
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "mndAcct.h"
#include "mndShow.h"
+#include "mndTrans.h"
#define ACCT_VER_NUMBER 1
#define ACCT_RESERVE_SIZE 128
@@ -31,14 +32,16 @@ static int32_t mndProcessAlterAcctReq(SRpcMsg *pReq);
static int32_t mndProcessDropAcctReq(SRpcMsg *pReq);
int32_t mndInitAcct(SMnode *pMnode) {
- SSdbTable table = {.sdbType = SDB_ACCT,
- .keyType = SDB_KEY_BINARY,
- .deployFp = mndCreateDefaultAcct,
- .encodeFp = (SdbEncodeFp)mndAcctActionEncode,
- .decodeFp = (SdbDecodeFp)mndAcctActionDecode,
- .insertFp = (SdbInsertFp)mndAcctActionInsert,
- .updateFp = (SdbUpdateFp)mndAcctActionUpdate,
- .deleteFp = (SdbDeleteFp)mndAcctActionDelete};
+ SSdbTable table = {
+ .sdbType = SDB_ACCT,
+ .keyType = SDB_KEY_BINARY,
+ .deployFp = mndCreateDefaultAcct,
+ .encodeFp = (SdbEncodeFp)mndAcctActionEncode,
+ .decodeFp = (SdbDecodeFp)mndAcctActionDecode,
+ .insertFp = (SdbInsertFp)mndAcctActionInsert,
+ .updateFp = (SdbUpdateFp)mndAcctActionUpdate,
+ .deleteFp = (SdbDeleteFp)mndAcctActionDelete,
+ };
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ACCT, mndProcessCreateAcctReq);
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_ACCT, mndProcessAlterAcctReq);
@@ -56,25 +59,48 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
acctObj.updateTime = acctObj.createdTime;
acctObj.acctId = 1;
acctObj.status = 0;
- acctObj.cfg = (SAcctCfg){.maxUsers = INT32_MAX,
- .maxDbs = INT32_MAX,
- .maxStbs = INT32_MAX,
- .maxTbs = INT32_MAX,
- .maxTimeSeries = INT32_MAX,
- .maxStreams = INT32_MAX,
- .maxFuncs = INT32_MAX,
- .maxConsumers = INT32_MAX,
- .maxConns = INT32_MAX,
- .maxTopics = INT32_MAX,
- .maxStorage = INT64_MAX,
- .accessState = TSDB_VN_ALL_ACCCESS};
+ acctObj.cfg = (SAcctCfg){
+ .maxUsers = INT32_MAX,
+ .maxDbs = INT32_MAX,
+ .maxStbs = INT32_MAX,
+ .maxTbs = INT32_MAX,
+ .maxTimeSeries = INT32_MAX,
+ .maxStreams = INT32_MAX,
+ .maxFuncs = INT32_MAX,
+ .maxConsumers = INT32_MAX,
+ .maxConns = INT32_MAX,
+ .maxTopics = INT32_MAX,
+ .maxStorage = INT64_MAX,
+ .accessState = TSDB_VN_ALL_ACCCESS,
+ };
SSdbRaw *pRaw = mndAcctActionEncode(&acctObj);
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw);
- return sdbWrite(pMnode->pSdb, pRaw);
+ mDebug("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw);
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ if (pTrans == NULL) {
+ mError("acct:%s, failed to create since %s", acctObj.acct, terrstr());
+ return -1;
+ }
+ mDebug("trans:%d, used to create acct:%s", pTrans->id, acctObj.acct);
+
+ if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
+ mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
}
static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) {
diff --git a/source/dnode/mnode/impl/src/mndBnode.c b/source/dnode/mnode/impl/src/mndBnode.c
index 3316a09462ff1d5ff7c940e623941c7abe72a76c..801f335a8056757c2cbe2d7f1ca6d65a4501003f 100644
--- a/source/dnode/mnode/impl/src/mndBnode.c
+++ b/source/dnode/mnode/impl/src/mndBnode.c
@@ -246,7 +246,7 @@ static int32_t mndCreateBnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
bnodeObj.createdTime = taosGetTimestampMs();
bnodeObj.updateTime = bnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_BNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create bnode:%d", pTrans->id, pCreate->dnodeId);
@@ -363,7 +363,7 @@ static int32_t mndSetDropBnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SBn
static int32_t mndDropBnode(SMnode *pMnode, SRpcMsg *pReq, SBnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_BNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop bnode:%d", pTrans->id, pObj->id);
diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c
index f6f6813b97ece46b82428c02df24d8132cf9b697..bb3377d16ac815489ce0cfbec22307ebb02156d0 100644
--- a/source/dnode/mnode/impl/src/mndCluster.c
+++ b/source/dnode/mnode/impl/src/mndCluster.c
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "mndCluster.h"
#include "mndShow.h"
+#include "mndTrans.h"
#define CLUSTER_VER_NUMBE 1
#define CLUSTER_RESERVE_SIZE 64
@@ -143,6 +144,7 @@ _OVER:
static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) {
mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster);
+ pSdb->pMnode->clusterId = pCluster->id;
return 0;
}
@@ -170,14 +172,36 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) {
clusterObj.id = mndGenerateUid(clusterObj.name, TSDB_CLUSTER_ID_LEN);
clusterObj.id = (clusterObj.id >= 0 ? clusterObj.id : -clusterObj.id);
pMnode->clusterId = clusterObj.id;
- mDebug("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name);
+ mInfo("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name);
SSdbRaw *pRaw = mndClusterActionEncode(&clusterObj);
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw);
- return sdbWrite(pMnode->pSdb, pRaw);
+ mDebug("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw);
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ if (pTrans == NULL) {
+ mError("cluster:%" PRId64 ", failed to create since %s", clusterObj.id, terrstr());
+ return -1;
+ }
+ mDebug("trans:%d, used to create cluster:%" PRId64, pTrans->id, clusterObj.id);
+
+ if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
+ mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ sdbSetRawStatus(pRaw, SDB_STATUS_READY);
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
}
static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index 1bb003bab9f526da988945c1025d09aebcffd39d..1f8bf0699322ffdaad5c479b3c8fec3451645527 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -97,7 +97,7 @@ static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) {
mndReleaseConsumer(pMnode, pConsumer);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CONSUMER_LOST, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg);
if (pTrans == NULL) goto FAIL;
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL;
if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL;
@@ -121,7 +121,7 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
mndReleaseConsumer(pMnode, pConsumer);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CONSUMER_RECOVER, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg);
if (pTrans == NULL) goto FAIL;
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL;
if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL;
@@ -306,6 +306,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
ASSERT(pTopic);
taosRLockLatch(&pTopic->lock);
+ tstrncpy(topicEp.db, pTopic->db, TSDB_DB_FNAME_LEN);
topicEp.schema.nCols = pTopic->schema.nCols;
if (topicEp.schema.nCols) {
topicEp.schema.pSchema = taosMemoryCalloc(topicEp.schema.nCols, sizeof(SSchema));
@@ -403,7 +404,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
int32_t newTopicNum = taosArrayGetSize(newSub);
// check topic existance
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_SUBSCRIBE, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg);
if (pTrans == NULL) goto SUBSCRIBE_OVER;
for (int32_t i = 0; i < newTopicNum; i++) {
@@ -414,12 +415,16 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
goto SUBSCRIBE_OVER;
}
+#if 0
// ref topic to prevent drop
// TODO make topic complete
SMqTopicObj topicObj = {0};
memcpy(&topicObj, pTopic, sizeof(SMqTopicObj));
topicObj.refConsumerCnt = pTopic->refConsumerCnt + 1;
+ mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup,
+ topicObj.refConsumerCnt);
if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER;
+#endif
mndReleaseTopic(pMnode, pTopic);
}
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 6921235f8bfdb007811dc92ccff0514a68999824..6d7a638c30818d64e436f2de989c7d2fe1d7b9c5 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -115,6 +115,7 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) {
SDB_SET_INT8(pRaw, dataPos, pRetension->freqUnit, _OVER)
SDB_SET_INT8(pRaw, dataPos, pRetension->keepUnit, _OVER)
}
+ SDB_SET_INT8(pRaw, dataPos, pDb->cfg.schemaless, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@@ -192,6 +193,7 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) {
}
}
}
+ SDB_GET_INT8(pRaw, dataPos, &pDb->cfg.schemaless, _OVER)
SDB_GET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
taosInitRWLatch(&pDb->lock);
@@ -261,7 +263,7 @@ void mndReleaseDb(SMnode *pMnode, SDbObj *pDb) {
sdbRelease(pSdb, pDb);
}
-static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid) {
+static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid, bool standby) {
STransAction action = {0};
SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
@@ -270,7 +272,7 @@ static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *p
mndReleaseDnode(pMnode, pDnode);
int32_t contLen = 0;
- void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen);
+ void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen, standby);
if (pReq == NULL) return -1;
action.pCont = pReq;
@@ -286,7 +288,7 @@ static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *p
return 0;
}
-static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
+static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
@@ -296,7 +298,7 @@ static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
action.pCont = pReq;
action.contLen = contLen;
- action.msgType = TDMT_VND_ALTER_VNODE;
+ action.msgType = msgType;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
@@ -380,6 +382,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
if (pCfg->replications < TSDB_MIN_DB_REPLICA || pCfg->replications > TSDB_MAX_DB_REPLICA) return -1;
if (pCfg->replications != 1 && pCfg->replications != 3) return -1;
if (pCfg->strict < TSDB_DB_STRICT_OFF || pCfg->strict > TSDB_DB_STRICT_ON) return -1;
+ if (pCfg->schemaless < TSDB_DB_SCHEMALESS_OFF || pCfg->schemaless > TSDB_DB_SCHEMALESS_ON) return -1;
if (pCfg->cacheLastRow < TSDB_MIN_DB_CACHE_LAST_ROW || pCfg->cacheLastRow > TSDB_MAX_DB_CACHE_LAST_ROW) return -1;
if (pCfg->hashMethod != 1) return -1;
if (pCfg->replications > mndGetDnodeSize(pMnode)) {
@@ -388,7 +391,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
}
terrno = 0;
- return TSDB_CODE_SUCCESS;
+ return terrno;
}
static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
@@ -411,6 +414,8 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT;
if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
+ if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
+
}
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
@@ -467,7 +472,7 @@ static int32_t mndSetCreateDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
for (int32_t vn = 0; vn < pVgroup->replica; ++vn) {
SVnodeGid *pVgid = pVgroup->vnodeGid + vn;
- if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pVgid) != 0) {
+ if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pVgid, false) != 0) {
return -1;
}
}
@@ -521,6 +526,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
.strict = pCreate->strict,
.cacheLastRow = pCreate->cacheLastRow,
.hashMethod = 1,
+ .schemaless = pCreate->schemaless,
};
dbObj.cfg.numOfRetensions = pCreate->numOfRetensions;
@@ -545,12 +551,12 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
}
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
- mndTransSetDbInfo(pTrans, &dbObj);
+ mndTransSetDbName(pTrans, dbObj.name);
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
@@ -688,29 +694,37 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) {
static int32_t mndSetAlterDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pOld, SDbObj *pNew) {
SSdbRaw *pRedoRaw = mndDbActionEncode(pOld);
if (pRedoRaw == NULL) return -1;
- if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1;
- if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1;
+ if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) {
+ sdbFreeRaw(pRedoRaw);
+ return -1;
+ }
+ sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
return 0;
}
static int32_t mndSetAlterDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pOld, SDbObj *pNew) {
SSdbRaw *pCommitRaw = mndDbActionEncode(pNew);
if (pCommitRaw == NULL) return -1;
- if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
- if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY) != 0) return -1;
+ if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
+ sdbFreeRaw(pCommitRaw);
+ return -1;
+ }
+ sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
return 0;
}
static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) {
if (pVgroup->replica <= 0 || pVgroup->replica == pDb->cfg.replications) {
- if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, pVgroup) != 0) {
+ if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, pVgroup, TDMT_VND_ALTER_CONFIG) != 0) {
return -1;
}
} else {
SVgObj newVgroup = {0};
memcpy(&newVgroup, pVgroup, sizeof(SVgObj));
+ mndTransSetSerial(pTrans);
+
if (newVgroup.replica < pDb->cfg.replications) {
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
pVgroup->vnodeGid[0].dnodeId);
@@ -720,9 +734,9 @@ static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj
return -1;
}
newVgroup.replica = pDb->cfg.replications;
- if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup) != 0) return -1;
- if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
- if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[2]) != 0) return -1;
+ if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[1], true) != 0) return -1;
+ if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[2], true) != 0) return -1;
+ if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup, TDMT_VND_ALTER_REPLICA) != 0) return -1;
} else {
mInfo("db:%s, vgId:%d, will remove 2 vnodes", pVgroup->dbName, pVgroup->vgId);
@@ -733,15 +747,18 @@ static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj
return -1;
}
newVgroup.replica = pDb->cfg.replications;
- if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup) != 0) return -1;
+ if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup, TDMT_VND_ALTER_REPLICA) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVgroup, &del1, true) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVgroup, &del2, true) != 0) return -1;
}
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
- if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1;
- if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1;
+ if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) {
+ sdbFreeRaw(pVgRaw);
+ return -1;
+ }
+ sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
}
return 0;
@@ -774,18 +791,16 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
}
static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *pNew) {
- int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_ALTER_DB, pReq);
- if (pTrans == NULL) goto _OVER;
-
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ if (pTrans == NULL) return -1;
mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name);
- mndTransSetDbInfo(pTrans, pOld);
+ int32_t code = -1;
+ mndTransSetDbName(pTrans, pOld->name);
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
-
code = 0;
_OVER:
@@ -890,6 +905,7 @@ static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq) {
cfgRsp.cacheLastRow = pDb->cfg.cacheLastRow;
cfgRsp.numOfRetensions = pDb->cfg.numOfRetensions;
cfgRsp.pRetensions = pDb->cfg.pRetensions;
+ cfgRsp.schemaless = pDb->cfg.schemaless;
int32_t contLen = tSerializeSDbCfgRsp(NULL, 0, &cfgRsp);
void *pRsp = rpcMallocCont(contLen);
@@ -1036,17 +1052,17 @@ static int32_t mndBuildDropDbRsp(SDbObj *pDb, int32_t *pRspLen, void **ppRsp, bo
static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
- /*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
- /*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
- /*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/
+ if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
+ if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
+ if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser);
@@ -1155,7 +1171,7 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
- if (NULL == pDb || pVgroup->dbUid == pDb->uid) {
+ if ((NULL == pDb || pVgroup->dbUid == pDb->uid) && !pVgroup->isTsma) {
SVgroupInfo vgInfo = {0};
vgInfo.vgId = pVgroup->vgId;
vgInfo.hashBegin = pVgroup->hashBegin;
@@ -1314,7 +1330,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
SDbObj *pDb = mndAcquireDb(pMnode, pDbVgVersion->dbFName);
if (pDb == NULL) {
- mDebug("db:%s, no exist", pDbVgVersion->dbFName);
+ mTrace("db:%s, no exist", pDbVgVersion->dbFName);
memcpy(usedbRsp.db, pDbVgVersion->dbFName, TSDB_DB_FNAME_LEN);
usedbRsp.uid = pDbVgVersion->dbId;
usedbRsp.vgVersion = -1;
@@ -1533,8 +1549,11 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.numOfStables, false);
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)statusB, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.schemaless, false);
}
}
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index 35ba25acd54abf39351e51cfea42152f41b57b9e..b6659e163223914682252b8986b95dcea133d732 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -395,10 +395,8 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
taosInitRWLatch(&pSubNew->lock);
pSubNew->dbUid = pSub->dbUid;
+ pSubNew->stbUid = pSub->stbUid;
pSubNew->subType = pSub->subType;
- pSubNew->withTbName = pSub->withTbName;
- pSubNew->withSchema = pSub->withSchema;
- pSubNew->withTag = pSub->withTag;
pSubNew->vgNum = pSub->vgNum;
pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
@@ -431,9 +429,7 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
tlen += taosEncodeFixedI64(buf, pSub->dbUid);
tlen += taosEncodeFixedI32(buf, pSub->vgNum);
tlen += taosEncodeFixedI8(buf, pSub->subType);
- tlen += taosEncodeFixedI8(buf, pSub->withTbName);
- tlen += taosEncodeFixedI8(buf, pSub->withSchema);
- tlen += taosEncodeFixedI8(buf, pSub->withTag);
+ tlen += taosEncodeFixedI64(buf, pSub->stbUid);
void *pIter = NULL;
int32_t sz = taosHashGetSize(pSub->consumerHash);
@@ -458,9 +454,7 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
buf = taosDecodeFixedI64(buf, &pSub->dbUid);
buf = taosDecodeFixedI32(buf, &pSub->vgNum);
buf = taosDecodeFixedI8(buf, &pSub->subType);
- buf = taosDecodeFixedI8(buf, &pSub->withTbName);
- buf = taosDecodeFixedI8(buf, &pSub->withSchema);
- buf = taosDecodeFixedI8(buf, &pSub->withTag);
+ buf = taosDecodeFixedI64(buf, &pSub->stbUid);
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 0cac7fd86b3649928fcd76da2098964426f2a065..aeff018aa82da7216e21bb46270a6bbb8c3ead7a 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -17,6 +17,7 @@
#include "mndDnode.h"
#include "mndAuth.h"
#include "mndMnode.h"
+#include "mndQnode.h"
#include "mndShow.h"
#include "mndTrans.h"
#include "mndUser.h"
@@ -58,14 +59,16 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter);
int32_t mndInitDnode(SMnode *pMnode) {
- SSdbTable table = {.sdbType = SDB_DNODE,
- .keyType = SDB_KEY_INT32,
- .deployFp = (SdbDeployFp)mndCreateDefaultDnode,
- .encodeFp = (SdbEncodeFp)mndDnodeActionEncode,
- .decodeFp = (SdbDecodeFp)mndDnodeActionDecode,
- .insertFp = (SdbInsertFp)mndDnodeActionInsert,
- .updateFp = (SdbUpdateFp)mndDnodeActionUpdate,
- .deleteFp = (SdbDeleteFp)mndDnodeActionDelete};
+ SSdbTable table = {
+ .sdbType = SDB_DNODE,
+ .keyType = SDB_KEY_INT32,
+ .deployFp = (SdbDeployFp)mndCreateDefaultDnode,
+ .encodeFp = (SdbEncodeFp)mndDnodeActionEncode,
+ .decodeFp = (SdbDecodeFp)mndDnodeActionDecode,
+ .insertFp = (SdbInsertFp)mndDnodeActionInsert,
+ .updateFp = (SdbUpdateFp)mndDnodeActionUpdate,
+ .deleteFp = (SdbDeleteFp)mndDnodeActionDelete,
+ };
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_DNODE, mndProcessCreateDnodeReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_DNODE, mndProcessDropDnodeReq);
@@ -90,13 +93,36 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) {
dnodeObj.updateTime = dnodeObj.createdTime;
dnodeObj.port = pMnode->replicas[0].port;
memcpy(&dnodeObj.fqdn, pMnode->replicas[0].fqdn, TSDB_FQDN_LEN);
+ snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port);
SSdbRaw *pRaw = mndDnodeActionEncode(&dnodeObj);
if (pRaw == NULL) return -1;
if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1;
- mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw);
- return sdbWrite(pMnode->pSdb, pRaw);
+ mDebug("dnode:%d, will be created when deploying, raw:%p", dnodeObj.id, pRaw);
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
+ if (pTrans == NULL) {
+ mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr());
+ return -1;
+ }
+ mDebug("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
+
+ if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
+ mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ sdbSetRawStatus(pRaw, SDB_STATUS_READY);
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
}
static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) {
@@ -230,7 +256,7 @@ int32_t mndGetDnodeSize(SMnode *pMnode) {
bool mndIsDnodeOnline(SMnode *pMnode, SDnodeObj *pDnode, int64_t curMs) {
int64_t interval = TABS(pDnode->lastAccessTime - curMs);
- if (interval > 30000 * tsStatusInterval) {
+ if (interval > 5000 * tsStatusInterval) {
if (pDnode->rebootTime > 0) {
pDnode->offlineReason = DND_REASON_STATUS_MSG_TIMEOUT;
}
@@ -350,9 +376,25 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
mndReleaseVgroup(pMnode, pVgroup);
}
+ SMnodeObj *pObj = mndAcquireMnode(pMnode, pDnode->id);
+ if (pObj != NULL) {
+ if (pObj->state != statusReq.mload.syncState) {
+ pObj->state = statusReq.mload.syncState;
+ pObj->stateStartTime = taosGetTimestampMs();
+ }
+ mndReleaseMnode(pMnode, pObj);
+ }
+
+ SQnodeObj *pQnode = mndAcquireQnode(pMnode, statusReq.qload.dnodeId);
+ if (pQnode != NULL) {
+ pQnode->load = statusReq.qload;
+ mndReleaseQnode(pMnode, pQnode);
+ }
+
+ int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE);
int64_t curMs = taosGetTimestampMs();
bool online = mndIsDnodeOnline(pMnode, pDnode, curMs);
- bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE));
+ bool dnodeChanged = (statusReq.dnodeVer != dnodeVer);
bool reboot = (pDnode->rebootTime != statusReq.rebootTime);
bool needCheck = !online || dnodeChanged || reboot;
@@ -395,7 +437,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
if (!online) {
mInfo("dnode:%d, from offline to online", pDnode->id);
} else {
- mDebug("dnode:%d, send dnode eps", pDnode->id);
+ mDebug("dnode:%d, send dnode epset, online:%d ver:% " PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online,
+ statusReq.dnodeVer, dnodeVer, reboot);
}
pDnode->rebootTime = statusReq.rebootTime;
@@ -403,7 +446,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes;
SStatusRsp statusRsp = {0};
- statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE);
+ statusRsp.dnodeVer = dnodeVer;
statusRsp.dnodeCfg.dnodeId = pDnode->id;
statusRsp.dnodeCfg.clusterId = pMnode->clusterId;
statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp));
@@ -441,7 +484,7 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC
memcpy(dnodeObj.fqdn, pCreate->fqdn, TSDB_FQDN_LEN);
snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_DNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) {
mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr());
return -1;
@@ -517,7 +560,7 @@ CREATE_DNODE_OVER:
}
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_DNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) {
mError("dnode:%d, failed to drop since %s", pDnode->id, terrstr());
return -1;
@@ -570,7 +613,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
pMObj = mndAcquireMnode(pMnode, dropReq.dnodeId);
if (pMObj != NULL) {
- terrno = TSDB_CODE_MND_MNODE_DEPLOYED;
+ terrno = TSDB_CODE_MND_MNODE_NOT_EXIST;
goto DROP_DNODE_OVER;
}
@@ -701,7 +744,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->id, false);
char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0};
- STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
+ STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, buf, false);
diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c
index 9107dab693d4c9eb6adc6599d03126d5a59a5a69..bf4baebd8584bd8324f3e4e53836bbd8a2002fad 100644
--- a/source/dnode/mnode/impl/src/mndFunc.c
+++ b/source/dnode/mnode/impl/src/mndFunc.c
@@ -215,7 +215,7 @@ static int32_t mndCreateFunc(SMnode *pMnode, SRpcMsg *pReq, SCreateFuncReq *pCre
}
memcpy(func.pCode, pCreate->pCode, func.codeSize);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_FUNC, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create func:%s", pTrans->id, pCreate->name);
@@ -245,7 +245,7 @@ _OVER:
static int32_t mndDropFunc(SMnode *pMnode, SRpcMsg *pReq, SFuncObj *pFunc) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_FUNC, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop user:%s", pTrans->id, pFunc->name);
diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mndMain.c
similarity index 65%
rename from source/dnode/mnode/impl/src/mnode.c
rename to source/dnode/mnode/impl/src/mndMain.c
index 5b8c1a3101cad0d6a9c4ee0468eaff547bfd174d..3a3fd7ebdb5ac8f56a64ea5b0169dfeda8cd3b97 100644
--- a/source/dnode/mnode/impl/src/mnode.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -85,7 +85,7 @@ static void *mndThreadFp(void *param) {
while (1) {
lastTime++;
taosMsleep(100);
- if (pMnode->stopped) break;
+ if (mndGetStop(pMnode)) break;
if (lastTime % (tsTransPullupInterval * 10) == 0) {
mndPullupTrans(pMnode);
@@ -118,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) {
}
static void mndCleanupTimer(SMnode *pMnode) {
- pMnode->stopped = true;
if (taosCheckPthreadValid(pMnode->thread)) {
taosThreadJoin(pMnode->thread, NULL);
taosThreadClear(&pMnode->thread);
@@ -153,8 +152,14 @@ static int32_t mndInitSdb(SMnode *pMnode) {
return 0;
}
-static int32_t mndDeploySdb(SMnode *pMnode) { return sdbDeploy(pMnode->pSdb); }
-static int32_t mndReadSdb(SMnode *pMnode) { return sdbReadFile(pMnode->pSdb); }
+static int32_t mndOpenSdb(SMnode *pMnode) {
+ if (!pMnode->deploy) {
+ return sdbReadFile(pMnode->pSdb);
+ } else {
+ // return sdbDeploy(pMnode->pSdb);;
+ return 0;
+ }
+}
static void mndCleanupSdb(SMnode *pMnode) {
if (pMnode->pSdb) {
@@ -176,7 +181,7 @@ static int32_t mndAllocStep(SMnode *pMnode, char *name, MndInitFp initFp, MndCle
return 0;
}
-static int32_t mndInitSteps(SMnode *pMnode, bool deploy) {
+static int32_t mndInitSteps(SMnode *pMnode) {
if (mndAllocStep(pMnode, "mnode-sdb", mndInitSdb, mndCleanupSdb) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-trans", mndInitTrans, mndCleanupTrans) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-cluster", mndInitCluster, mndCleanupCluster) != 0) return -1;
@@ -201,11 +206,7 @@ static int32_t mndInitSteps(SMnode *pMnode, bool deploy) {
if (mndAllocStep(pMnode, "mnode-perfs", mndInitPerfs, mndCleanupPerfs) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-db", mndInitDb, mndCleanupDb) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-func", mndInitFunc, mndCleanupFunc) != 0) return -1;
- if (deploy) {
- if (mndAllocStep(pMnode, "mnode-sdb-deploy", mndDeploySdb, NULL) != 0) return -1;
- } else {
- if (mndAllocStep(pMnode, "mnode-sdb-read", mndReadSdb, NULL) != 0) return -1;
- }
+ if (mndAllocStep(pMnode, "mnode-sdb", mndOpenSdb, NULL) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-profile", mndInitProfile, mndCleanupProfile) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-show", mndInitShow, mndCleanupShow) != 0) return -1;
if (mndAllocStep(pMnode, "mnode-query", mndInitQuery, mndCleanupQuery) != 0) return -1;
@@ -262,7 +263,8 @@ static void mndSetOptions(SMnode *pMnode, const SMnodeOpt *pOption) {
pMnode->selfIndex = pOption->selfIndex;
memcpy(&pMnode->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
pMnode->msgCb = pOption->msgCb;
- pMnode->selfId = pOption->replicas[pOption->selfIndex].id;
+ pMnode->selfDnodeId = pOption->dnodeId;
+ pMnode->syncMgmt.standby = pOption->standby;
}
SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
@@ -279,6 +281,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
(void)taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
mndSetOptions(pMnode, pOption);
+ pMnode->deploy = pOption->deploy;
pMnode->pSteps = taosArrayInit(24, sizeof(SMnodeStep));
if (pMnode->pSteps == NULL) {
taosMemoryFree(pMnode);
@@ -296,7 +299,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
return NULL;
}
- code = mndInitSteps(pMnode, pOption->deploy);
+ code = mndInitSteps(pMnode);
if (code != 0) {
code = terrno;
mError("failed to open mnode since %s", terrstr());
@@ -314,7 +317,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
return NULL;
}
- mndUpdateMnodeRole(pMnode);
mDebug("mnode open successfully ");
return pMnode;
}
@@ -329,153 +331,149 @@ void mndClose(SMnode *pMnode) {
}
}
-int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption) {
- mDebug("start to alter mnode");
- mDebug("mnode is altered");
- return 0;
-}
-
int32_t mndStart(SMnode *pMnode) {
mndSyncStart(pMnode);
+ if (pMnode->deploy) {
+ if (sdbDeploy(pMnode->pSdb) != 0) {
+ mError("failed to deploy sdb while start mnode");
+ return -1;
+ }
+ mndSetRestore(pMnode, true);
+ }
return mndInitTimer(pMnode);
}
void mndStop(SMnode *pMnode) {
+ mndSetStop(pMnode);
mndSyncStop(pMnode);
- return mndCleanupTimer(pMnode);
+ mndCleanupTimer(pMnode);
}
int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
- SMnode *pMnode = pMsg->info.node;
- void *ahandle = pMsg->info.ahandle;
- int32_t ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
-
- if (syncEnvIsStart()) {
- SSyncNode *pSyncNode = syncNodeAcquire(pMnode->syncMgmt.sync);
- assert(pSyncNode != NULL);
-
- ESyncState state = syncGetMyRole(pMnode->syncMgmt.sync);
- SyncTerm currentTerm = syncGetMyTerm(pMnode->syncMgmt.sync);
-
- SMsgHead *pHead = pMsg->pCont;
-
- char logBuf[512];
- char *syncNodeStr = sync2SimpleStr(pMnode->syncMgmt.sync);
- snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
- syncRpcMsgLog2(logBuf, pMsg);
- taosMemoryFree(syncNodeStr);
-
- SRpcMsg *pRpcMsg = pMsg;
-
- if (pRpcMsg->msgType == TDMT_VND_SYNC_TIMEOUT) {
- SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
-
- ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
- syncTimeoutDestroy(pSyncMsg);
+ SMnode *pMnode = pMsg->info.node;
+ SSyncMgmt *pMgmt = &pMnode->syncMgmt;
+ int32_t code = TAOS_SYNC_PROPOSE_OTHER_ERROR;
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING) {
- SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
-
- ret = syncNodeOnPingCb(pSyncNode, pSyncMsg);
- syncPingDestroy(pSyncMsg);
-
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING_REPLY) {
- SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
-
- ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
- syncPingReplyDestroy(pSyncMsg);
-
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_CLIENT_REQUEST) {
- SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
-
- ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
- syncClientRequestDestroy(pSyncMsg);
-
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE) {
- SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
+ if (!syncEnvIsStart()) {
+ mError("failed to process sync msg:%p type:%s since syncEnv stop", pMsg, TMSG_INFO(pMsg->msgType));
+ return TAOS_SYNC_PROPOSE_OTHER_ERROR;
+ }
- ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
- syncRequestVoteDestroy(pSyncMsg);
+ SSyncNode *pSyncNode = syncNodeAcquire(pMgmt->sync);
+ if (pSyncNode == NULL) {
+ mError("failed to process sync msg:%p type:%s since syncNode is null", pMsg, TMSG_INFO(pMsg->msgType));
+ return TAOS_SYNC_PROPOSE_OTHER_ERROR;
+ }
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE_REPLY) {
- SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
+ if (mndAcquireSyncRef(pMnode) != 0) {
+ mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
+ return TAOS_SYNC_PROPOSE_OTHER_ERROR;
+ }
- ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
- syncRequestVoteReplyDestroy(pSyncMsg);
+ char logBuf[512] = {0};
+ char *syncNodeStr = sync2SimpleStr(pMgmt->sync);
+ snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
+ syncRpcMsgLog2(logBuf, pMsg);
+ taosMemoryFree(syncNodeStr);
+
+ if (pMsg->msgType == TDMT_VND_SYNC_TIMEOUT) {
+ SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pMsg);
+ code = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
+ syncTimeoutDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_PING) {
+ SyncPing *pSyncMsg = syncPingFromRpcMsg2(pMsg);
+ code = syncNodeOnPingCb(pSyncNode, pSyncMsg);
+ syncPingDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_PING_REPLY) {
+ SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pMsg);
+ code = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
+ syncPingReplyDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_CLIENT_REQUEST) {
+ SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pMsg);
+ code = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
+ syncClientRequestDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE) {
+ SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pMsg);
+ code = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
+ syncRequestVoteDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE_REPLY) {
+ SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pMsg);
+ code = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
+ syncRequestVoteReplyDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES) {
+ SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pMsg);
+ code = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
+ syncAppendEntriesDestroy(pSyncMsg);
+ } else if (pMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES_REPLY) {
+ SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pMsg);
+ code = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
+ syncAppendEntriesReplyDestroy(pSyncMsg);
+ } else {
+ mError("failed to process msg:%p since invalid type:%s", pMsg, TMSG_INFO(pMsg->msgType));
+ code = TAOS_SYNC_PROPOSE_OTHER_ERROR;
+ }
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES) {
- SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
+ mndReleaseSyncRef(pMnode);
+ return code;
+}
- ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
- syncAppendEntriesDestroy(pSyncMsg);
+static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
+ if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;
- } else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES_REPLY) {
- SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg);
- assert(pSyncMsg != NULL);
+ if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER &&
+ pMsg->msgType != TDMT_MND_TRANS_TIMER) {
+ mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
+ TMSG_INFO(pMsg->msgType));
- ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
- syncAppendEntriesReplyDestroy(pSyncMsg);
+ SEpSet epSet = {0};
+ mndGetMnodeEpSet(pMsg->info.node, &epSet);
+ int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
+ pMsg->info.rsp = rpcMallocCont(contLen);
+ if (pMsg->info.rsp != NULL) {
+ tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
+ pMsg->info.rspLen = contLen;
+ terrno = TSDB_CODE_RPC_REDIRECT;
} else {
- mError("==mndProcessSyncMsg== error msg type:%d", pRpcMsg->msgType);
- ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
}
-
- syncNodeRelease(pSyncNode);
- } else {
- mError("==mndProcessSyncMsg== error syncEnv stop");
- ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
- return ret;
+ return -1;
}
-int32_t mndProcessMsg(SRpcMsg *pMsg) {
- SMnode *pMnode = pMsg->info.node;
- void *ahandle = pMsg->info.ahandle;
- mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle);
+static int32_t mndCheckMsgContent(SRpcMsg *pMsg) {
+ if (!IsReq(pMsg)) return 0;
+ if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0;
- if (IsReq(pMsg)) {
- if (!mndIsMaster(pMnode) && pMsg->msgType != TDMT_MND_TRANS_TIMER && pMsg->msgType != TDMT_MND_MQ_TIMER &&
- pMsg->msgType != TDMT_MND_TELEM_TIMER) {
- terrno = TSDB_CODE_APP_NOT_READY;
- mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
- return -1;
- }
-
- if (pMsg->contLen == 0 || pMsg->pCont == NULL) {
- terrno = TSDB_CODE_INVALID_MSG_LEN;
- mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
- return -1;
- }
- }
+ mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
+ terrno = TSDB_CODE_INVALID_MSG_LEN;
+ return -1;
+}
+int32_t mndProcessRpcMsg(SRpcMsg *pMsg) {
+ SMnode *pMnode = pMsg->info.node;
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)];
if (fp == NULL) {
+ mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
- mError("msg:%p, failed to process since no msg handle, app:%p", pMsg, ahandle);
return -1;
}
+ if (mndCheckMsgContent(pMsg) != 0) return -1;
+ if (mndCheckMnodeState(pMsg) != 0) return -1;
+
+ mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
int32_t code = (*fp)(pMsg);
+ mndReleaseRpcRef(pMnode);
+
if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- terrno = code;
- mTrace("msg:%p, in progress, app:%p", pMsg, ahandle);
- } else if (code != 0) {
- if (terrno != TSDB_CODE_OPS_NOT_SUPPORT) {
- mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
- } else {
- mTrace("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
- }
+ mTrace("msg:%p, won't response immediately since in progress", pMsg);
+ } else if (code == 0) {
+ mTrace("msg:%p, successfully processed and response", pMsg);
} else {
- mTrace("msg:%p, is processed, app:%p", pMsg, ahandle);
+ mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
+ TMSG_INFO(pMsg->msgType));
}
return code;
@@ -504,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) {
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
SMonGrantInfo *pGrantInfo) {
- if (!mndIsMaster(pMnode)) return -1;
+ if (mndAcquireRpcRef(pMnode) != 0) return -1;
SSdb *pSdb = pMnode->pSdb;
int64_t ms = taosGetTimestampMs();
@@ -513,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc));
pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc));
if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) {
+ mndReleaseRpcRef(pMnode);
return -1;
}
@@ -548,15 +547,17 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
SMonMnodeDesc desc = {0};
desc.mnode_id = pObj->id;
tstrncpy(desc.mnode_ep, pObj->pDnode->ep, sizeof(desc.mnode_ep));
- tstrncpy(desc.role, syncStr(pObj->role), sizeof(desc.role));
- taosArrayPush(pClusterInfo->mnodes, &desc);
- sdbRelease(pSdb, pObj);
- if (pObj->role == TAOS_SYNC_STATE_LEADER) {
+ if (pObj->id == pMnode->selfDnodeId) {
pClusterInfo->first_ep_dnode_id = pObj->id;
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
- pClusterInfo->master_uptime = (ms - pObj->roleTime) / (86400000.0f);
+ pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
+ tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role));
+ } else {
+ tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role));
}
+ taosArrayPush(pClusterInfo->mnodes, &desc);
+ sdbRelease(pSdb, pObj);
}
// vgroup info
@@ -605,10 +606,84 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
pGrantInfo->timeseries_total = INT32_MAX;
}
+ mndReleaseRpcRef(pMnode);
return 0;
}
int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) {
- pLoad->syncState = pMnode->syncMgmt.state;
+ pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync);
return 0;
}
+
+int32_t mndAcquireRpcRef(SMnode *pMnode) {
+ int32_t code = 0;
+ taosThreadRwlockRdlock(&pMnode->lock);
+ if (pMnode->stopped) {
+ terrno = TSDB_CODE_APP_NOT_READY;
+ code = -1;
+ } else if (!mndIsMaster(pMnode)) {
+ code = -1;
+ } else {
+ int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1);
+ mTrace("mnode rpc is acquired, ref:%d", ref);
+ }
+ taosThreadRwlockUnlock(&pMnode->lock);
+ return code;
+}
+
+void mndReleaseRpcRef(SMnode *pMnode) {
+ taosThreadRwlockRdlock(&pMnode->lock);
+ int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1);
+ mTrace("mnode rpc is released, ref:%d", ref);
+ taosThreadRwlockUnlock(&pMnode->lock);
+}
+
+void mndSetRestore(SMnode *pMnode, bool restored) {
+ if (restored) {
+ taosThreadRwlockWrlock(&pMnode->lock);
+ pMnode->restored = true;
+ taosThreadRwlockUnlock(&pMnode->lock);
+ mTrace("mnode set restored:%d", restored);
+ } else {
+ taosThreadRwlockWrlock(&pMnode->lock);
+ pMnode->restored = false;
+ taosThreadRwlockUnlock(&pMnode->lock);
+ mTrace("mnode set restored:%d", restored);
+ while (1) {
+ if (pMnode->rpcRef <= 0) break;
+ taosMsleep(3);
+ }
+ }
+}
+
+bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; }
+
+void mndSetStop(SMnode *pMnode) {
+ taosThreadRwlockWrlock(&pMnode->lock);
+ pMnode->stopped = true;
+ taosThreadRwlockUnlock(&pMnode->lock);
+ mTrace("mnode set stopped");
+}
+
+bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; }
+
+int32_t mndAcquireSyncRef(SMnode *pMnode) {
+ int32_t code = 0;
+ taosThreadRwlockRdlock(&pMnode->lock);
+ if (pMnode->stopped) {
+ terrno = TSDB_CODE_APP_NOT_READY;
+ code = -1;
+ } else {
+ int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1);
+ mTrace("mnode sync is acquired, ref:%d", ref);
+ }
+ taosThreadRwlockUnlock(&pMnode->lock);
+ return code;
+}
+
+void mndReleaseSyncRef(SMnode *pMnode) {
+ taosThreadRwlockRdlock(&pMnode->lock);
+ int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1);
+ mTrace("mnode sync is released, ref:%d", ref);
+ taosThreadRwlockUnlock(&pMnode->lock);
+}
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 7f86eb8b3292508c9b903c68fd6306b766ac074f..160fc8d428259927d11391fc89ebae689d7e7c2f 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -18,6 +18,7 @@
#include "mndAuth.h"
#include "mndDnode.h"
#include "mndShow.h"
+#include "mndSync.h"
#include "mndTrans.h"
#include "mndUser.h"
@@ -31,6 +32,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj);
static int32_t mndMnodeActionDelete(SSdb *pSdb, SMnodeObj *pObj);
static int32_t mndMnodeActionUpdate(SSdb *pSdb, SMnodeObj *pOld, SMnodeObj *pNew);
static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq);
+static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq);
static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq);
static int32_t mndProcessCreateMnodeRsp(SRpcMsg *pRsp);
static int32_t mndProcessAlterMnodeRsp(SRpcMsg *pRsp);
@@ -39,16 +41,19 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter);
int32_t mndInitMnode(SMnode *pMnode) {
- SSdbTable table = {.sdbType = SDB_MNODE,
- .keyType = SDB_KEY_INT32,
- .deployFp = (SdbDeployFp)mndCreateDefaultMnode,
- .encodeFp = (SdbEncodeFp)mndMnodeActionEncode,
- .decodeFp = (SdbDecodeFp)mndMnodeActionDecode,
- .insertFp = (SdbInsertFp)mndMnodeActionInsert,
- .updateFp = (SdbUpdateFp)mndMnodeActionUpdate,
- .deleteFp = (SdbDeleteFp)mndMnodeActionDelete};
+ SSdbTable table = {
+ .sdbType = SDB_MNODE,
+ .keyType = SDB_KEY_INT32,
+ .deployFp = (SdbDeployFp)mndCreateDefaultMnode,
+ .encodeFp = (SdbEncodeFp)mndMnodeActionEncode,
+ .decodeFp = (SdbDecodeFp)mndMnodeActionDecode,
+ .insertFp = (SdbInsertFp)mndMnodeActionInsert,
+ .updateFp = (SdbUpdateFp)mndMnodeActionUpdate,
+ .deleteFp = (SdbDeleteFp)mndMnodeActionDelete,
+ };
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq);
+ mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE, mndProcessAlterMnodeReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq);
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndProcessCreateMnodeRsp);
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp);
@@ -75,28 +80,6 @@ void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj) {
sdbRelease(pMnode->pSdb, pObj);
}
-void mndUpdateMnodeRole(SMnode *pMnode) {
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- while (1) {
- SMnodeObj *pObj = NULL;
- pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
- if (pIter == NULL) break;
-
- ESyncState lastRole = pObj->role;
- if (pObj->id == 1) {
- pObj->role = TAOS_SYNC_STATE_LEADER;
- } else {
- pObj->role = TAOS_SYNC_STATE_CANDIDATE;
- }
- if (pObj->role != lastRole) {
- pObj->roleTime = taosGetTimestampMs();
- }
-
- sdbRelease(pSdb, pObj);
- }
-}
-
static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
SMnodeObj mnodeObj = {0};
mnodeObj.id = 1;
@@ -107,8 +90,30 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw);
- return sdbWrite(pMnode->pSdb, pRaw);
+ mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
+ if (pTrans == NULL) {
+ mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr());
+ return -1;
+ }
+ mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
+
+ if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
+ mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ sdbSetRawStatus(pRaw, SDB_STATUS_READY);
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
}
static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) {
@@ -181,7 +186,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj) {
return -1;
}
- pObj->role = TAOS_SYNC_STATE_FOLLOWER;
+ pObj->state = TAOS_SYNC_STATE_ERROR;
return 0;
}
@@ -214,23 +219,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) {
}
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
- SSdb *pSdb = pMnode->pSdb;
- pEpSet->numOfEps = 0;
+ SSdb *pSdb = pMnode->pSdb;
+ int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
+ void *pIter = NULL;
- void *pIter = NULL;
while (1) {
SMnodeObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
- if (pObj->pDnode == NULL) {
- mError("mnode:%d, no corresponding dnode exists", pObj->id);
- } else {
- if (pObj->role == TAOS_SYNC_STATE_LEADER) {
+
+ if (pObj->id == pMnode->selfDnodeId) {
+ if (mndIsMaster(pMnode)) {
pEpSet->inUse = pEpSet->numOfEps;
+ } else {
+ pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes;
}
- addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
- sdbRelease(pSdb, pObj);
}
+ addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
+ sdbRelease(pSdb, pObj);
}
}
@@ -259,75 +265,83 @@ static int32_t mndSetCreateMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnod
}
static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- int32_t numOfReplicas = 0;
-
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ int32_t numOfReplicas = 0;
+ SDAlterMnodeReq alterReq = {0};
SDCreateMnodeReq createReq = {0};
+ SEpSet alterEpset = {0};
+ SEpSet createEpset = {0};
+
while (1) {
SMnodeObj *pMObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
if (pIter == NULL) break;
- SReplica *pReplica = &createReq.replicas[numOfReplicas];
- pReplica->id = pMObj->id;
- pReplica->port = pMObj->pDnode->port;
- memcpy(pReplica->fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
- numOfReplicas++;
+ alterReq.replicas[numOfReplicas].id = pMObj->id;
+ alterReq.replicas[numOfReplicas].port = pMObj->pDnode->port;
+ memcpy(alterReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
+ alterEpset.eps[numOfReplicas].port = pMObj->pDnode->port;
+ memcpy(alterEpset.eps[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
+ if (pMObj->state == TAOS_SYNC_STATE_LEADER) {
+ alterEpset.inUse = numOfReplicas;
+ }
+
+ numOfReplicas++;
sdbRelease(pSdb, pMObj);
}
- SReplica *pReplica = &createReq.replicas[numOfReplicas];
- pReplica->id = pDnode->id;
- pReplica->port = pDnode->port;
- memcpy(pReplica->fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- numOfReplicas++;
+ alterReq.replica = numOfReplicas + 1;
+ alterReq.replicas[numOfReplicas].id = pDnode->id;
+ alterReq.replicas[numOfReplicas].port = pDnode->port;
+ memcpy(alterReq.replicas[numOfReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- createReq.replica = numOfReplicas;
+ alterEpset.numOfEps = numOfReplicas + 1;
+ alterEpset.eps[numOfReplicas].port = pDnode->port;
+ memcpy(alterEpset.eps[numOfReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- while (1) {
- SMnodeObj *pMObj = NULL;
- pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
- if (pIter == NULL) break;
+ createReq.replica = 1;
+ createReq.replicas[0].id = pDnode->id;
+ createReq.replicas[0].port = pDnode->port;
+ memcpy(createReq.replicas[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- STransAction action = {0};
+ createEpset.numOfEps = 1;
+ createEpset.eps[0].port = pDnode->port;
+ memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- createReq.dnodeId = pMObj->id;
+ {
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq);
void *pReq = taosMemoryMalloc(contLen);
tSerializeSDCreateMnodeReq(pReq, contLen, &createReq);
- action.epSet = mndGetDnodeEpset(pMObj->pDnode);
- action.pCont = pReq;
- action.contLen = contLen;
- action.msgType = TDMT_DND_ALTER_MNODE;
- action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED;
+ STransAction action = {
+ .epSet = createEpset,
+ .pCont = pReq,
+ .contLen = contLen,
+ .msgType = TDMT_DND_CREATE_MNODE,
+ .acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED,
+ };
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
- sdbCancelFetch(pSdb, pIter);
- sdbRelease(pSdb, pMObj);
return -1;
}
-
- sdbRelease(pSdb, pMObj);
}
{
- STransAction action = {0};
- action.epSet = mndGetDnodeEpset(pDnode);
-
- createReq.dnodeId = pObj->id;
- int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq);
+ int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
void *pReq = taosMemoryMalloc(contLen);
- tSerializeSDCreateMnodeReq(pReq, contLen, &createReq);
+ tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
+
+ STransAction action = {
+ .epSet = alterEpset,
+ .pCont = pReq,
+ .contLen = contLen,
+ .msgType = TDMT_DND_ALTER_MNODE,
+ .acceptableCode = 0,
+ };
- action.epSet = mndGetDnodeEpset(pDnode);
- action.pCont = pReq;
- action.contLen = contLen;
- action.msgType = TDMT_DND_CREATE_MNODE;
- action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
@@ -345,10 +359,11 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
mnodeObj.createdTime = taosGetTimestampMs();
mnodeObj.updateTime = mnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_MNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
+ mndTransSetSerial(pTrans);
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
@@ -385,12 +400,22 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
+ if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) {
+ terrno = TSDB_CODE_MND_TOO_MANY_MNODES;
+ goto _OVER;
+ }
+
pDnode = mndAcquireDnode(pMnode, createReq.dnodeId);
if (pDnode == NULL) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
goto _OVER;
}
+ if (!mndIsDnodeOnline(pMnode, pDnode, taosGetTimestampMs())) {
+ terrno = TSDB_CODE_NODE_OFFLINE;
+ goto _OVER;
+ }
+
pUser = mndAcquireUser(pMnode, pReq->conn.user);
if (pUser == NULL) {
terrno = TSDB_CODE_MND_NO_USER_FROM_CONN;
@@ -433,73 +458,77 @@ static int32_t mndSetDropMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnodeO
}
static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- int32_t numOfReplicas = 0;
-
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ int32_t numOfReplicas = 0;
SDAlterMnodeReq alterReq = {0};
+ SDDropMnodeReq dropReq = {0};
+ SEpSet alterEpset = {0};
+ SEpSet dropEpSet = {0};
+
while (1) {
SMnodeObj *pMObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
if (pIter == NULL) break;
+ if (pMObj->id == pObj->id) {
+ sdbRelease(pSdb, pMObj);
+ continue;
+ }
+
+ alterReq.replicas[numOfReplicas].id = pMObj->id;
+ alterReq.replicas[numOfReplicas].port = pMObj->pDnode->port;
+ memcpy(alterReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
- if (pMObj->id != pObj->id) {
- SReplica *pReplica = &alterReq.replicas[numOfReplicas];
- pReplica->id = pMObj->id;
- pReplica->port = pMObj->pDnode->port;
- memcpy(pReplica->fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
- numOfReplicas++;
+ alterEpset.eps[numOfReplicas].port = pMObj->pDnode->port;
+ memcpy(alterEpset.eps[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
+ if (pMObj->state == TAOS_SYNC_STATE_LEADER) {
+ alterEpset.inUse = numOfReplicas;
}
+ numOfReplicas++;
sdbRelease(pSdb, pMObj);
}
alterReq.replica = numOfReplicas;
+ alterEpset.numOfEps = numOfReplicas;
- while (1) {
- SMnodeObj *pMObj = NULL;
- pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
- if (pIter == NULL) break;
- if (pMObj->id != pObj->id) {
- STransAction action = {0};
-
- alterReq.dnodeId = pMObj->id;
- int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
- void *pReq = taosMemoryMalloc(contLen);
- tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
-
- action.epSet = mndGetDnodeEpset(pMObj->pDnode);
- action.pCont = pReq;
- action.contLen = contLen;
- action.msgType = TDMT_DND_ALTER_MNODE;
- action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED;
-
- if (mndTransAppendRedoAction(pTrans, &action) != 0) {
- taosMemoryFree(pReq);
- sdbCancelFetch(pSdb, pIter);
- sdbRelease(pSdb, pMObj);
- return -1;
- }
- }
+ dropReq.dnodeId = pDnode->id;
+ dropEpSet.numOfEps = 1;
+ dropEpSet.eps[0].port = pDnode->port;
+ memcpy(dropEpSet.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
- sdbRelease(pSdb, pMObj);
+ {
+ int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
+ void *pReq = taosMemoryMalloc(contLen);
+ tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
+
+ STransAction action = {
+ .epSet = alterEpset,
+ .pCont = pReq,
+ .contLen = contLen,
+ .msgType = TDMT_DND_ALTER_MNODE,
+ .acceptableCode = 0,
+ };
+
+ if (mndTransAppendRedoAction(pTrans, &action) != 0) {
+ taosMemoryFree(pReq);
+ return -1;
+ }
}
{
- STransAction action = {0};
- action.epSet = mndGetDnodeEpset(pDnode);
-
- SDDropMnodeReq dropReq = {0};
- dropReq.dnodeId = pObj->id;
int32_t contLen = tSerializeSCreateDropMQSBNodeReq(NULL, 0, &dropReq);
void *pReq = taosMemoryMalloc(contLen);
tSerializeSCreateDropMQSBNodeReq(pReq, contLen, &dropReq);
- action.epSet = mndGetDnodeEpset(pDnode);
- action.pCont = pReq;
- action.contLen = contLen;
- action.msgType = TDMT_DND_DROP_MNODE;
- action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED;
+ STransAction action = {
+ .epSet = dropEpSet,
+ .pCont = pReq,
+ .contLen = contLen,
+ .msgType = TDMT_DND_DROP_MNODE,
+ .acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED,
+ };
+
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
@@ -512,11 +541,11 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode
static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_MNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
-
+ mndTransSetSerial(pTrans);
if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER;
@@ -553,7 +582,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- if (pMnode->selfId == dropReq.dnodeId) {
+ if (pMnode->selfDnodeId == dropReq.dnodeId) {
terrno = TSDB_CODE_MND_CANT_DROP_MASTER;
goto _OVER;
}
@@ -608,10 +637,12 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
int32_t numOfRows = 0;
int32_t cols = 0;
SMnodeObj *pObj = NULL;
+ ESdbStatus objStatus;
char *pWrite;
+ int64_t curMs = taosGetTimestampMs();
while (numOfRows < rows) {
- pShow->pIter = sdbFetch(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj);
+ pShow->pIter = sdbFetchAll(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj, &objStatus);
if (pShow->pIter == NULL) break;
cols = 0;
@@ -624,15 +655,25 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, b1, false);
- const char *roles = syncStr(pObj->role);
- char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE);
+ const char *roles = "offline";
+ if (pObj->id == pMnode->selfDnodeId) {
+ roles = syncStr(TAOS_SYNC_STATE_LEADER);
+ }
+ if (pObj->pDnode && mndIsDnodeOnline(pMnode, pObj->pDnode, curMs)) {
+ roles = syncStr(pObj->state);
+ }
+ char b2[12 + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes);
-
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
+ const char *status = "ready";
+ if (objStatus == SDB_STATUS_CREATING) status = "creating";
+ if (objStatus == SDB_STATUS_DROPPING) status = "dropping";
+ char b3[9 + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(b3, status, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&pObj->roleTime, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)b3, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false);
@@ -650,3 +691,49 @@ static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
}
+
+static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
+ SMnode *pMnode = pReq->info.node;
+ SDAlterMnodeReq alterReq = {0};
+
+ if (tDeserializeSDCreateMnodeReq(pReq->pCont, pReq->contLen, &alterReq) != 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ return -1;
+ }
+
+ SSyncCfg cfg = {.replicaNum = alterReq.replica, .myIndex = -1};
+ for (int32_t i = 0; i < alterReq.replica; ++i) {
+ SNodeInfo *pNode = &cfg.nodeInfo[i];
+ tstrncpy(pNode->nodeFqdn, alterReq.replicas[i].fqdn, sizeof(pNode->nodeFqdn));
+ pNode->nodePort = alterReq.replicas[i].port;
+ if (alterReq.replicas[i].id == pMnode->selfDnodeId) cfg.myIndex = i;
+ }
+
+ if (cfg.myIndex == -1) {
+ mError("failed to alter mnode since myindex is -1");
+ return -1;
+ } else {
+ mInfo("start to alter mnode sync, replica:%d myindex:%d", cfg.replicaNum, cfg.myIndex);
+ for (int32_t i = 0; i < alterReq.replica; ++i) {
+ SNodeInfo *pNode = &cfg.nodeInfo[i];
+ mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort);
+ }
+ }
+
+ mTrace("trans:-1, sync reconfig will be proposed");
+
+ SSyncMgmt *pMgmt = &pMnode->syncMgmt;
+ pMgmt->standby = 0;
+ int32_t code = syncReconfig(pMgmt->sync, &cfg);
+ if (code != 0) {
+ mError("trans:-1, failed to propose sync reconfig since %s", terrstr());
+ return code;
+ } else {
+ pMgmt->errCode = 0;
+ pMgmt->transId = -1;
+ tsem_wait(&pMgmt->syncSem);
+ mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode));
+ terrno = pMgmt->errCode;
+ return pMgmt->errCode;
+ }
+}
diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c
index dca07f6a6d2910630a939d119b6d21e287112866..00c8bb30d03d87545750b87d9eddab9efb8e821e 100644
--- a/source/dnode/mnode/impl/src/mndOffset.c
+++ b/source/dnode/mnode/impl/src/mndOffset.c
@@ -21,6 +21,7 @@
#include "mndMnode.h"
#include "mndShow.h"
#include "mndStb.h"
+#include "mndTopic.h"
#include "mndTrans.h"
#include "mndUser.h"
#include "mndVgroup.h"
@@ -58,6 +59,12 @@ bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic) {
return false;
}
+bool mndOffsetFromSubKey(SMqOffsetObj *pOffset, const char *subKey) {
+ int32_t i = 0;
+ while (pOffset->key[i] != ':') i++;
+ if (strcmp(&pOffset->key[i + 1], subKey) == 0) return true;
+ return false;
+}
SSdbRaw *mndOffsetActionEncode(SMqOffsetObj *pOffset) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
void *buf = NULL;
@@ -172,7 +179,7 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) {
tDecodeSMqCMCommitOffsetReq(&decoder, &commitOffsetReq);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_COMMIT_OFFSET, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg);
for (int32_t i = 0; i < commitOffsetReq.num; i++) {
SMqOffset *pOffset = &commitOffsetReq.offsets[i];
@@ -182,7 +189,15 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) {
bool create = false;
SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, key);
if (pOffsetObj == NULL) {
+ SMqTopicObj *pTopic = mndAcquireTopic(pMnode, pOffset->topicName);
+ if (pTopic == NULL) {
+ terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST;
+ mError("submit offset to topic %s failed since %s", pOffset->topicName, terrstr());
+ continue;
+ }
pOffsetObj = taosMemoryMalloc(sizeof(SMqOffsetObj));
+ pOffsetObj->dbUid = pTopic->dbUid;
+ mndReleaseTopic(pMnode, pTopic);
memcpy(pOffsetObj->key, key, TSDB_PARTITION_KEY_LEN);
create = true;
}
@@ -303,7 +318,35 @@ int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic)
continue;
}
- if (mndSetDropOffsetRedoLogs(pMnode, pTrans, pOffset) < 0) {
+ if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) {
+ sdbRelease(pSdb, pOffset);
+ goto END;
+ }
+
+ sdbRelease(pSdb, pOffset);
+ }
+
+ code = 0;
+END:
+ return code;
+}
+
+int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey) {
+ int32_t code = -1;
+ SSdb *pSdb = pMnode->pSdb;
+
+ void *pIter = NULL;
+ SMqOffsetObj *pOffset = NULL;
+ while (1) {
+ pIter = sdbFetch(pSdb, SDB_OFFSET, pIter, (void **)&pOffset);
+ if (pIter == NULL) break;
+
+ if (!mndOffsetFromSubKey(pOffset, subKey)) {
+ sdbRelease(pSdb, pOffset);
+ continue;
+ }
+
+ if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) {
sdbRelease(pSdb, pOffset);
goto END;
}
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index b9ac82d890a12f2355834f415f4bbca65461873b..bacdf2f3665fc144eec8320e68fc38fab330e34c 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -18,6 +18,7 @@
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
+#include "mndQnode.h"
#include "mndShow.h"
#include "mndStb.h"
#include "mndUser.h"
@@ -379,9 +380,12 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
}
rspBasic->connId = pConn->id;
- rspBasic->totalDnodes = 1; // TODO
+ rspBasic->totalDnodes = mndGetDnodeSize(pMnode);
rspBasic->onlineDnodes = 1; // TODO
mndGetMnodeEpSet(pMnode, &rspBasic->epSet);
+
+ mndCreateQnodeList(pMnode, &rspBasic->pQnodeList, -1);
+
mndReleaseConn(pMnode, pConn);
hbRsp.query = rspBasic;
diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c
index 3dc6200229b8a519fcf193393535500e98f4df20..27881865af11913b4a04c4fc84df115e98823fd1 100644
--- a/source/dnode/mnode/impl/src/mndQnode.c
+++ b/source/dnode/mnode/impl/src/mndQnode.c
@@ -60,7 +60,7 @@ int32_t mndInitQnode(SMnode *pMnode) {
void mndCleanupQnode(SMnode *pMnode) {}
-static SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) {
+SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) {
SQnodeObj *pObj = sdbAcquire(pMnode->pSdb, SDB_QNODE, &qnodeId);
if (pObj == NULL && terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) {
terrno = TSDB_CODE_MND_QNODE_NOT_EXIST;
@@ -68,7 +68,7 @@ static SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) {
return pObj;
}
-static void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj) {
+void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj) {
SSdb *pSdb = pMnode->pSdb;
sdbRelease(pSdb, pObj);
}
@@ -248,7 +248,7 @@ static int32_t mndCreateQnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
qnodeObj.createdTime = taosGetTimestampMs();
qnodeObj.updateTime = qnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_QNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create qnode:%d", pTrans->id, pCreate->dnodeId);
@@ -365,7 +365,7 @@ static int32_t mndSetDropQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQn
static int32_t mndDropQnode(SMnode *pMnode, SRpcMsg *pReq, SQnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_QNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop qnode:%d", pTrans->id, pObj->id);
@@ -429,49 +429,62 @@ _OVER:
return code;
}
-static int32_t mndProcessQnodeListReq(SRpcMsg *pReq) {
- int32_t code = -1;
- int32_t numOfRows = 0;
- SMnode *pMnode = pReq->info.node;
- SSdb *pSdb = pMnode->pSdb;
+int32_t mndCreateQnodeList(SMnode *pMnode, SArray** pList, int32_t limit) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
SQnodeObj *pObj = NULL;
- SQnodeListReq qlistReq = {0};
- SQnodeListRsp qlistRsp = {0};
-
- if (tDeserializeSQnodeListReq(pReq->pCont, pReq->contLen, &qlistReq) != 0) {
- mError("failed to parse qnode list req");
- terrno = TSDB_CODE_INVALID_MSG;
- goto _OVER;
- }
+ int32_t numOfRows = 0;
- qlistRsp.addrsList = taosArrayInit(5, sizeof(SQueryNodeAddr));
- if (NULL == qlistRsp.addrsList) {
+ SArray* qnodeList = taosArrayInit(5, sizeof(SQueryNodeLoad));
+ if (NULL == qnodeList) {
mError("failed to alloc epSet while process qnode list req");
terrno = TSDB_CODE_OUT_OF_MEMORY;
- goto _OVER;
+ return terrno;
}
-
- void *pIter = NULL;
+
while (1) {
pIter = sdbFetch(pSdb, SDB_QNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
- SQueryNodeAddr nodeAddr = {0};
- nodeAddr.nodeId = QNODE_HANDLE;
- nodeAddr.epSet.numOfEps = 1;
- tstrncpy(nodeAddr.epSet.eps[0].fqdn, pObj->pDnode->fqdn, TSDB_FQDN_LEN);
- nodeAddr.epSet.eps[0].port = pObj->pDnode->port;
+ SQueryNodeLoad nodeLoad = {0};
+ nodeLoad.addr.nodeId = QNODE_HANDLE;
+ nodeLoad.addr.epSet.numOfEps = 1;
+ tstrncpy(nodeLoad.addr.epSet.eps[0].fqdn, pObj->pDnode->fqdn, TSDB_FQDN_LEN);
+ nodeLoad.addr.epSet.eps[0].port = pObj->pDnode->port;
+ nodeLoad.load = QNODE_LOAD_VALUE(pObj);
- (void)taosArrayPush(qlistRsp.addrsList, &nodeAddr);
+ (void)taosArrayPush(qnodeList, &nodeLoad);
numOfRows++;
sdbRelease(pSdb, pObj);
- if (qlistReq.rowNum > 0 && numOfRows >= qlistReq.rowNum) {
+ if (limit > 0 && numOfRows >= limit) {
break;
}
}
+ *pList = qnodeList;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int32_t mndProcessQnodeListReq(SRpcMsg *pReq) {
+ int32_t code = -1;
+ SMnode *pMnode = pReq->info.node;
+ SQnodeListReq qlistReq = {0};
+ SQnodeListRsp qlistRsp = {0};
+
+ if (tDeserializeSQnodeListReq(pReq->pCont, pReq->contLen, &qlistReq) != 0) {
+ mError("failed to parse qnode list req");
+ terrno = TSDB_CODE_INVALID_MSG;
+ goto _OVER;
+ }
+
+ if (mndCreateQnodeList(pMnode, &qlistRsp.qnodeList, qlistReq.rowNum) != 0) {
+ goto _OVER;
+ }
+
int32_t rspLen = tSerializeSQnodeListRsp(NULL, 0, &qlistRsp);
void *pRsp = rpcMallocCont(rspLen);
if (pRsp == NULL) {
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
mTrace("msg:%p, in query queue is processing", pMsg);
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg);
+ code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_CONTINUE:
- code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg);
+ code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_FETCH:
- code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_DROP_TASK:
- code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_HEARTBEAT:
- code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
default:
terrno = TSDB_CODE_VND_APP_ERROR;
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 22a5f37334b4f18a422249afa9e870068e0e5f83..b390a7fe4a37bcb057fcc19837a58eb08d277799 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -28,13 +28,15 @@
#include "mndTrans.h"
#include "mndUser.h"
#include "mndVgroup.h"
+#include "parser.h"
#include "tcompare.h"
#include "tname.h"
#include "tuuid.h"
extern bool tsStreamSchedV;
-int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermark, char** pStr, int32_t* pLen) {
+int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr,
+ int32_t* pLen, double filesFactor) {
SNode* pAst = NULL;
SQueryPlan* pPlan = NULL;
terrno = TSDB_CODE_SUCCESS;
@@ -44,6 +46,11 @@ int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermar
goto END;
}
+ if (qSetSTableIdForRSma(pAst, uid) < 0) {
+ terrno = TSDB_CODE_QRY_INVALID_INPUT;
+ goto END;
+ }
+
SPlanContext cxt = {
.pAstRoot = pAst,
.topicQuery = false,
@@ -51,6 +58,7 @@ int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermar
.rSmaQuery = true,
.triggerType = triggerType,
.watermark = watermark,
+ .filesFactor = filesFactor,
};
if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
@@ -206,6 +214,7 @@ int32_t mndAddShuffledSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* p
} else {
pTask->sinkType = TASK_SINK__TABLE;
pTask->tbSink.stbUid = pStream->targetStbUid;
+ memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
ASSERT(pTask->tbSink.pSchemaWrapper);
}
@@ -229,11 +238,14 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr
taosArrayPush(tasks, &pTask);
pTask->nodeId = pStream->fixedSinkVgId;
+#if 0
SVgObj* pVgroup = mndAcquireVgroup(pMnode, pStream->fixedSinkVgId);
if (pVgroup == NULL) {
return -1;
}
pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
+#endif
+ pTask->epSet = mndGetVgroupEpset(pMnode, &pStream->fixedSinkVg);
// source
pTask->sourceType = TASK_SOURCE__MERGE;
pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
@@ -248,13 +260,15 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr
} else {
pTask->sinkType = TASK_SINK__TABLE;
pTask->tbSink.stbUid = pStream->targetStbUid;
+ memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
}
// dispatch
pTask->dispatchType = TASK_DISPATCH__NONE;
- mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId);
+ /*mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId);*/
+ mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pStream->fixedSinkVg.vgId);
return 0;
}
@@ -273,7 +287,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
pStream->tasks = taosArrayInit(totLevel, sizeof(void*));
bool hasExtraSink = false;
- if (totLevel == 2) {
+ if (totLevel == 2 || strcmp(pStream->sourceDb, pStream->targetDb) != 0) {
SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
taosArrayPush(pStream->tasks, &taskOneLevel);
// add extra sink
@@ -325,6 +339,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
} else {
pTask->sinkType = TASK_SINK__TABLE;
pTask->tbSink.stbUid = pStream->targetStbUid;
+ memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
}
#endif
@@ -345,7 +360,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
// one merge only
ASSERT(taosArrayGetSize(pArray) == 1);
SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0);
- pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC;
+ /*pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC;*/
+ pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH;
pTask->dispatchType = TASK_DISPATCH__FIXED;
pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId;
@@ -390,8 +406,9 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
if (pStream->fixedSinkVgId == 0) {
pTask->dispatchType = TASK_DISPATCH__SHUFFLE;
- pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;
- SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb);
+ /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/
+ pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH;
+ SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
ASSERT(pDb);
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
sdbRelease(pSdb, pDb);
@@ -420,7 +437,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
}
} else {
pTask->dispatchType = TASK_DISPATCH__FIXED;
- pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;
+ /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/
+ pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH;
SArray* pArray = taosArrayGetP(pStream->tasks, 0);
// one sink only
ASSERT(taosArrayGetSize(pArray) == 1);
@@ -489,7 +507,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
SQueryPlan* pPlan = NULL;
SSubplan* plan = NULL;
- if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) {
pPlan = qStringToQueryPlan(pTopic->physicalPlan);
if (pPlan == NULL) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
@@ -535,7 +553,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
mDebug("init subscription %s, assign vg: %d", pSub->key, pVgEp->vgId);
- if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) {
int32_t msgLen;
plan->execNode.epSet = pVgEp->epSet;
diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c
index def6c06896149c4f7c871099d1d9dc7166dc2dd1..6b70825ed46bdfffb703d3c508971a22ec145b82 100644
--- a/source/dnode/mnode/impl/src/mndShow.c
+++ b/source/dnode/mnode/impl/src/mndShow.c
@@ -257,6 +257,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
terrno = rowsRead;
mDebug("show:0x%" PRIx64 ", retrieve completed", pShow->id);
mndReleaseShowObj(pShow, true);
+ blockDataDestroy(pBlock);
return -1;
}
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index b38e901d49dee8e6c93dc629824e51bf44c71e0f..6cb70d1f27895cd64f08fdb383f4072739e03f52 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -295,9 +295,9 @@ static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSm
}
static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) {
- SEncoder encoder = {0};
- int32_t contLen;
- SName name = {0};
+ SEncoder encoder = {0};
+ int32_t contLen;
+ SName name = {0};
tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
SVDropTSmaReq req = {0};
@@ -354,6 +354,22 @@ static int32_t mndSetCreateSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj
return 0;
}
+static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) {
+ SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
+ if (pVgRaw == NULL) return -1;
+ if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
+ return 0;
+}
+
+static int32_t mndSetCreateSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) {
+ SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
+ if (pVgRaw == NULL) return -1;
+ if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1;
+ return 0;
+}
+
static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
@@ -393,6 +409,40 @@ static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
return 0;
}
+static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
+ SSmaObj *pSma) {
+ SVnodeGid *pVgid = pVgroup->vnodeGid + 0;
+ SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
+ if (pDnode == NULL) return -1;
+
+ STransAction action = {0};
+ action.epSet = mndGetDnodeEpset(pDnode);
+ mndReleaseDnode(pMnode, pDnode);
+
+ // todo add sma info here
+ int32_t smaContLen = 0;
+ void *pSmaReq = mndBuildVCreateSmaReq(pMnode, pVgroup, pSma, &smaContLen);
+ if (pSmaReq == NULL) return -1;
+ pVgroup->pTsma = pSmaReq;
+
+ int32_t contLen = 0;
+ void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen, false);
+ taosMemoryFreeClear(pSmaReq);
+ if (pReq == NULL) return -1;
+
+ action.pCont = pReq;
+ action.contLen = contLen;
+ action.msgType = TDMT_DND_CREATE_VNODE;
+ action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED;
+
+ if (mndTransAppendRedoAction(pTrans, &action) != 0) {
+ taosMemoryFree(pReq);
+ return -1;
+ }
+
+ return 0;
+}
+
static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCreate, SDbObj *pDb, SStbObj *pStb) {
SSmaObj smaObj = {0};
memcpy(smaObj.name, pCreate->name, TSDB_TABLE_FNAME_LEN);
@@ -448,20 +498,29 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
streamObj.version = 1;
streamObj.sql = pCreate->sql;
streamObj.createdBy = STREAM_CREATED_BY__SMA;
- streamObj.fixedSinkVgId = smaObj.dstVgId;
streamObj.smaId = smaObj.uid;
- /*streamObj.physicalPlan = "";*/
+
+ if (mndAllocSmaVgroup(pMnode, pDb, &streamObj.fixedSinkVg) != 0) {
+ mError("sma:%s, failed to create since %s", smaObj.name, terrstr());
+ return -1;
+ }
+ smaObj.dstVgId = streamObj.fixedSinkVg.vgId;
+ streamObj.fixedSinkVgId = smaObj.dstVgId;
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_SMA, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
+ mndTransSetSerial(pTrans);
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
+ if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
+ if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER;
+ if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER;
if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -480,7 +539,6 @@ static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) {
if (pCreate->intervalUnit < 0) return -1;
if (pCreate->slidingUnit < 0) return -1;
if (pCreate->timezone < 0) return -1;
- if (pCreate->dstVgId < 0) return -1;
if (pCreate->interval < 0) return -1;
if (pCreate->offset < 0) return -1;
if (pCreate->sliding < 0) return -1;
@@ -602,6 +660,24 @@ static int32_t mndSetDropSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj *
return 0;
}
+static int32_t mndSetDropSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) {
+ SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
+ if (pVgRaw == NULL) return -1;
+ if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPING) != 0) return -1;
+
+ return 0;
+}
+
+static int32_t mndSetDropSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) {
+ SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
+ if (pVgRaw == NULL) return -1;
+ if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPED) != 0) return -1;
+
+ return 0;
+}
+
static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
@@ -643,23 +719,59 @@ static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
return 0;
}
+static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
+ SVnodeGid *pVgid = pVgroup->vnodeGid + 0;
+ SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
+ if (pDnode == NULL) return -1;
+
+ STransAction action = {0};
+ action.epSet = mndGetDnodeEpset(pDnode);
+ mndReleaseDnode(pMnode, pDnode);
+
+ int32_t contLen = 0;
+ void *pReq = mndBuildDropVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen);
+ if (pReq == NULL) return -1;
+
+ action.pCont = pReq;
+ action.contLen = contLen;
+ action.msgType = TDMT_DND_DROP_VNODE;
+ action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED;
+
+ if (mndTransAppendRedoAction(pTrans, &action) != 0) {
+ taosMemoryFree(pReq);
+ return -1;
+ }
+
+ return 0;
+}
+
static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *pSma) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq);
+ SVgObj *pVgroup = NULL;
+ STrans *pTrans = NULL;
+
+ pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId);
+ if (pVgroup == NULL) goto _OVER;
+
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
+ if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
+ if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
+ if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
_OVER:
mndTransDrop(pTrans);
+ mndReleaseVgroup(pMnode, pVgroup);
return code;
}
@@ -846,6 +958,9 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)n1, false);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pSma->dstVgId, false);
+
numOfRows++;
sdbRelease(pSdb, pSma);
}
diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c
index 87b61f59ecb088692941a9f57ebf89db2cefa054..c6acb4fef4a09ef78c561178f11428cb3004b4f3 100644
--- a/source/dnode/mnode/impl/src/mndSnode.c
+++ b/source/dnode/mnode/impl/src/mndSnode.c
@@ -253,7 +253,7 @@ static int32_t mndCreateSnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
snodeObj.createdTime = taosGetTimestampMs();
snodeObj.updateTime = snodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_SNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create snode:%d", pTrans->id, pCreate->dnodeId);
@@ -372,7 +372,7 @@ static int32_t mndSetDropSnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SSn
static int32_t mndDropSnode(SMnode *pMnode, SRpcMsg *pReq, SSnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_SNODE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop snode:%d", pTrans->id, pObj->id);
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 61f115e2bab32b64ee6a57e967fb0c8e5c287d0f..556837f397e0d9c09546d5fa9400654b44f39401 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -87,7 +87,6 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) {
SDB_SET_INT64(pRaw, dataPos, pStb->updateTime, _OVER)
SDB_SET_INT64(pRaw, dataPos, pStb->uid, _OVER)
SDB_SET_INT64(pRaw, dataPos, pStb->dbUid, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pStb->version, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->tagVer, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->colVer, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->nextColId, _OVER)
@@ -167,7 +166,6 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pStb->updateTime, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pStb->uid, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pStb->dbUid, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &pStb->version, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->tagVer, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->colVer, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->nextColId, _OVER)
@@ -320,7 +318,6 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
}
pOld->updateTime = pNew->updateTime;
- pOld->version = pNew->version;
pOld->tagVer = pNew->tagVer;
pOld->colVer = pNew->colVer;
pOld->nextColId = pNew->nextColId;
@@ -388,25 +385,26 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
req.name = (char *)tNameGetTableName(&name);
req.suid = pStb->uid;
req.rollup = pStb->ast1Len > 0 ? 1 : 0;
- req.schema.nCols = pStb->numOfColumns;
- req.schema.sver = pStb->version;
- req.schema.tagVer = pStb->tagVer;
- req.schema.colVer = pStb->colVer;
- req.schema.pSchema = pStb->pColumns;
+ // todo
+ req.schemaRow.nCols = pStb->numOfColumns;
+ req.schemaRow.version = pStb->colVer;
+ req.schemaRow.pSchema = pStb->pColumns;
req.schemaTag.nCols = pStb->numOfTags;
- req.schemaTag.sver = 1;
+ req.schemaTag.version = pStb->tagVer;
req.schemaTag.pSchema = pStb->pTags;
if (req.rollup) {
req.pRSmaParam.xFilesFactor = pStb->xFilesFactor;
req.pRSmaParam.delay = pStb->delay;
if (pStb->ast1Len > 0) {
- if (mndConvertRSmaTask(pStb->pAst1, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) != TSDB_CODE_SUCCESS) {
+ if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len, req.pRSmaParam.xFilesFactor) !=
+ TSDB_CODE_SUCCESS) {
return NULL;
}
}
if (pStb->ast2Len > 0) {
- if (mndConvertRSmaTask(pStb->pAst2, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) != TSDB_CODE_SUCCESS) {
+ if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len, req.pRSmaParam.xFilesFactor) !=
+ TSDB_CODE_SUCCESS) {
return NULL;
}
}
@@ -664,7 +662,6 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
pDst->updateTime = pDst->createdTime;
pDst->uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN);
pDst->dbUid = pDb->uid;
- pDst->version = 1;
pDst->tagVer = 1;
pDst->colVer = 1;
pDst->nextColId = 1;
@@ -738,7 +735,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
@@ -757,7 +754,7 @@ _OVER:
}
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
@@ -956,7 +953,6 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
mDebug("stb:%s, start to add tag %s", pNew->name, pSchema->name);
}
- pNew->version++;
pNew->tagVer++;
return 0;
}
@@ -975,7 +971,6 @@ static int32_t mndDropSuperTableTag(const SStbObj *pOld, SStbObj *pNew, const ch
memmove(pNew->pTags + tag, pNew->pTags + tag + 1, sizeof(SSchema) * (pNew->numOfTags - tag - 1));
pNew->numOfTags--;
- pNew->version++;
pNew->tagVer++;
mDebug("stb:%s, start to drop tag %s", pNew->name, tagName);
return 0;
@@ -1016,7 +1011,6 @@ static int32_t mndAlterStbTagName(const SStbObj *pOld, SStbObj *pNew, SArray *pF
SSchema *pSchema = (SSchema *)(pNew->pTags + tag);
memcpy(pSchema->name, newTagName, TSDB_COL_NAME_LEN);
- pNew->version++;
pNew->tagVer++;
mDebug("stb:%s, start to modify tag %s to %s", pNew->name, oldTagName, newTagName);
return 0;
@@ -1046,7 +1040,6 @@ static int32_t mndAlterStbTagBytes(const SStbObj *pOld, SStbObj *pNew, const SFi
}
pTag->bytes = pField->bytes;
- pNew->version++;
pNew->tagVer++;
mDebug("stb:%s, start to modify tag len %s to %d", pNew->name, pField->name, pField->bytes);
@@ -1086,7 +1079,6 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray
mDebug("stb:%s, start to add column %s", pNew->name, pSchema->name);
}
- pNew->version++;
pNew->colVer++;
return 0;
}
@@ -1115,7 +1107,6 @@ static int32_t mndDropSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, const
memmove(pNew->pColumns + col, pNew->pColumns + col + 1, sizeof(SSchema) * (pNew->numOfColumns - col - 1));
pNew->numOfColumns--;
- pNew->version++;
pNew->colVer++;
mDebug("stb:%s, start to drop col %s", pNew->name, colName);
return 0;
@@ -1154,7 +1145,6 @@ static int32_t mndAlterStbColumnBytes(const SStbObj *pOld, SStbObj *pNew, const
}
pCol->bytes = pField->bytes;
- pNew->version++;
pNew->colVer++;
mDebug("stb:%s, start to modify col len %s to %d", pNew->name, pField->name, pField->bytes);
@@ -1217,13 +1207,125 @@ static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
return 0;
}
+
+static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbName, STableMetaRsp *pRsp) {
+ taosRLockLatch(&pStb->lock);
+
+ int32_t totalCols = pStb->numOfColumns + pStb->numOfTags;
+ pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
+ if (pRsp->pSchemas == NULL) {
+ taosRUnLockLatch(&pStb->lock);
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ strcpy(pRsp->dbFName, pStb->db);
+ strcpy(pRsp->tbName, tbName);
+ strcpy(pRsp->stbName, tbName);
+ pRsp->dbId = pDb->uid;
+ pRsp->numOfTags = pStb->numOfTags;
+ pRsp->numOfColumns = pStb->numOfColumns;
+ pRsp->precision = pDb->cfg.precision;
+ pRsp->tableType = TSDB_SUPER_TABLE;
+ pRsp->sversion = pStb->colVer;
+ pRsp->tversion = pStb->tagVer;
+ pRsp->suid = pStb->uid;
+ pRsp->tuid = pStb->uid;
+
+ for (int32_t i = 0; i < pStb->numOfColumns; ++i) {
+ SSchema *pSchema = &pRsp->pSchemas[i];
+ SSchema *pSrcSchema = &pStb->pColumns[i];
+ memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN);
+ pSchema->type = pSrcSchema->type;
+ pSchema->colId = pSrcSchema->colId;
+ pSchema->bytes = pSrcSchema->bytes;
+ }
+
+ for (int32_t i = 0; i < pStb->numOfTags; ++i) {
+ SSchema *pSchema = &pRsp->pSchemas[i + pStb->numOfColumns];
+ SSchema *pSrcSchema = &pStb->pTags[i];
+ memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN);
+ pSchema->type = pSrcSchema->type;
+ pSchema->colId = pSrcSchema->colId;
+ pSchema->bytes = pSrcSchema->bytes;
+ }
+
+ taosRUnLockLatch(&pStb->lock);
+ return 0;
+}
+
+static int32_t mndBuildStbSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) {
+ char tbFName[TSDB_TABLE_FNAME_LEN] = {0};
+ snprintf(tbFName, sizeof(tbFName), "%s.%s", dbFName, tbName);
+
+ SDbObj *pDb = mndAcquireDb(pMnode, dbFName);
+ if (pDb == NULL) {
+ terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
+ return -1;
+ }
+
+ SStbObj *pStb = mndAcquireStb(pMnode, tbFName);
+ if (pStb == NULL) {
+ mndReleaseDb(pMnode, pDb);
+ terrno = TSDB_CODE_MND_INVALID_STB;
+ return -1;
+ }
+
+ int32_t code = mndBuildStbSchemaImp(pDb, pStb, tbName, pRsp);
+ mndReleaseDb(pMnode, pDb);
+ mndReleaseStb(pMnode, pStb);
+ return code;
+}
+
+
+static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, const SMAlterStbReq *pAlter, SStbObj *pObj, void **pCont, int32_t *pLen) {
+ int ret;
+ SEncoder ec = {0};
+ uint32_t contLen = 0;
+ SMAlterStbRsp alterRsp = {0};
+ SName name = {0};
+ tNameFromString(&name, pAlter->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+
+ alterRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == alterRsp.pMeta) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, alterRsp.pMeta);
+ if (ret) {
+ tFreeSMAlterStbRsp(&alterRsp);
+ return ret;
+ }
+
+ tEncodeSize(tEncodeSMAlterStbRsp, &alterRsp, contLen, ret);
+ if (ret) {
+ tFreeSMAlterStbRsp(&alterRsp);
+ return ret;
+ }
+
+ void* cont = taosMemoryMalloc(contLen);
+ tEncoderInit(&ec, cont, contLen);
+ tEncodeSMAlterStbRsp(&ec, &alterRsp);
+ tEncoderClear(&ec);
+
+ tFreeSMAlterStbRsp(&alterRsp);
+
+ *pCont = cont;
+ *pLen = contLen;
+
+ return 0;
+}
+
static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *pAlter, SDbObj *pDb, SStbObj *pOld) {
+ bool needRsp = true;
SStbObj stbObj = {0};
taosRLockLatch(&pOld->lock);
memcpy(&stbObj, pOld, sizeof(SStbObj));
stbObj.pColumns = NULL;
stbObj.pTags = NULL;
stbObj.updateTime = taosGetTimestampMs();
+ stbObj.lock = 0;
taosRUnLockLatch(&pOld->lock);
int32_t code = -1;
@@ -1257,9 +1359,11 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p
code = mndAlterStbColumnBytes(pOld, &stbObj, pField0);
break;
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
+ needRsp = false;
code = mndUpdateStbCommentAndTTL(pOld, &stbObj, pAlter->comment, pAlter->commentLen, pAlter->ttl);
break;
default:
+ needRsp = false;
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
break;
}
@@ -1267,12 +1371,19 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p
if (code != 0) goto _OVER;
code = -1;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_ALTER_STB, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to alter stb:%s", pTrans->id, pAlter->name);
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
+ if (needRsp) {
+ void* pCont = NULL;
+ int32_t contLen = 0;
+ if (mndBuildSMAlterStbRsp(pDb, pAlter, &stbObj, &pCont, &contLen)) goto _OVER;
+ mndTransSetRpcRsp(pTrans, pCont, contLen);
+ }
+
if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER;
if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER;
if (mndSetAlterStbRedoActions(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER;
@@ -1315,9 +1426,10 @@ static int32_t mndProcessMAlterStbReq(SRpcMsg *pReq) {
goto _OVER;
}
- if (alterReq.verInBlock > 0 && alterReq.verInBlock <= pStb->version) {
- mDebug("stb:%s, already exist, verInBlock:%d smaller than verInStb:%d, alter success", alterReq.name,
- alterReq.verInBlock, pStb->version);
+ if ((alterReq.tagVer > 0 && alterReq.colVer > 0) &&
+ (alterReq.tagVer <= pStb->tagVer || alterReq.colVer <= pStb->colVer)) {
+ mDebug("stb:%s, already exist, tagVer:%d colVer:%d smaller than in mnode, tagVer:%d colVer:%d, alter success",
+ alterReq.name, alterReq.tagVer, alterReq.colVer, pStb->tagVer, pStb->colVer);
code = 0;
goto _OVER;
}
@@ -1412,11 +1524,11 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_STB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
@@ -1492,74 +1604,6 @@ static int32_t mndProcessVDropStbRsp(SRpcMsg *pRsp) {
return 0;
}
-static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbName, STableMetaRsp *pRsp) {
- taosRLockLatch(&pStb->lock);
-
- int32_t totalCols = pStb->numOfColumns + pStb->numOfTags;
- pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
- if (pRsp->pSchemas == NULL) {
- taosRUnLockLatch(&pStb->lock);
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- strcpy(pRsp->dbFName, pStb->db);
- strcpy(pRsp->tbName, tbName);
- strcpy(pRsp->stbName, tbName);
- pRsp->dbId = pDb->uid;
- pRsp->numOfTags = pStb->numOfTags;
- pRsp->numOfColumns = pStb->numOfColumns;
- pRsp->precision = pDb->cfg.precision;
- pRsp->tableType = TSDB_SUPER_TABLE;
- pRsp->sversion = pStb->version;
- pRsp->suid = pStb->uid;
- pRsp->tuid = pStb->uid;
-
- for (int32_t i = 0; i < pStb->numOfColumns; ++i) {
- SSchema *pSchema = &pRsp->pSchemas[i];
- SSchema *pSrcSchema = &pStb->pColumns[i];
- memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN);
- pSchema->type = pSrcSchema->type;
- pSchema->colId = pSrcSchema->colId;
- pSchema->bytes = pSrcSchema->bytes;
- }
-
- for (int32_t i = 0; i < pStb->numOfTags; ++i) {
- SSchema *pSchema = &pRsp->pSchemas[i + pStb->numOfColumns];
- SSchema *pSrcSchema = &pStb->pTags[i];
- memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN);
- pSchema->type = pSrcSchema->type;
- pSchema->colId = pSrcSchema->colId;
- pSchema->bytes = pSrcSchema->bytes;
- }
-
- taosRUnLockLatch(&pStb->lock);
- return 0;
-}
-
-static int32_t mndBuildStbSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) {
- char tbFName[TSDB_TABLE_FNAME_LEN] = {0};
- snprintf(tbFName, sizeof(tbFName), "%s.%s", dbFName, tbName);
-
- SDbObj *pDb = mndAcquireDb(pMnode, dbFName);
- if (pDb == NULL) {
- terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
- return -1;
- }
-
- SStbObj *pStb = mndAcquireStb(pMnode, tbFName);
- if (pStb == NULL) {
- mndReleaseDb(pMnode, pDb);
- terrno = TSDB_CODE_MND_INVALID_STB;
- return -1;
- }
-
- int32_t code = mndBuildStbSchemaImp(pDb, pStb, tbName, pRsp);
- mndReleaseDb(pMnode, pDb);
- mndReleaseStb(pMnode, pStb);
- return code;
-}
-
static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@@ -1605,7 +1649,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
pReq->info.rspLen = rspLen;
code = 0;
- mDebug("stb:%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName);
+ mTrace("%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName);
_OVER:
if (code != 0) {
@@ -1638,7 +1682,7 @@ int32_t mndValidateStbInfo(SMnode *pMnode, SSTableMetaVersion *pStbVersions, int
metaRsp.suid = pStbVersion->suid;
}
- if (pStbVersion->sversion != metaRsp.sversion) {
+ if (pStbVersion->sversion != metaRsp.sversion || pStbVersion->tversion != metaRsp.tversion) {
taosArrayPush(batchMetaRsp.pArray, &metaRsp);
} else {
tFreeSTableMetaRsp(&metaRsp);
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 9de6138689b43b195ea004c6ba6a7fa489d4060f..5ee5b06a578f7c31ab18f66f2de1cdef2aa85a04 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -393,7 +393,16 @@ static int32_t mndCreateStream(SMnode *pMnode, SRpcMsg *pReq, SCMCreateStreamReq
streamObj.trigger = pCreate->triggerType;
streamObj.waterMark = pCreate->watermark;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STREAM, pReq);
+ if (streamObj.targetSTbName[0]) {
+ pDb = mndAcquireDbByStb(pMnode, streamObj.targetSTbName);
+ if (pDb == NULL) {
+ terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
+ return -1;
+ }
+ tstrncpy(streamObj.targetDb, pDb->name, TSDB_DB_FNAME_LEN);
+ }
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("stream:%s, failed to create since %s", pCreate->name, terrstr());
return -1;
@@ -456,7 +465,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto CREATE_STREAM_OVER;
}
- pDb = mndAcquireDbByStream(pMnode, createStreamReq.name);
+ pDb = mndAcquireDb(pMnode, createStreamReq.sourceDB);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
goto CREATE_STREAM_OVER;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 3713bd501a42e5e450a0cb2dc80e0ebe14c32417..c7f8415b65db611500d3df1907405a1d07b4b3c2 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -42,6 +42,7 @@ static int32_t mndSubActionDelete(SSdb *pSdb, SMqSubscribeObj *);
static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubscribeObj *pNewSub);
static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg);
+static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg);
static int32_t mndProcessSubscribeInternalRsp(SRpcMsg *pMsg);
static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
@@ -75,6 +76,9 @@ int32_t mndInitSubscribe(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_CHANGE_RSP, mndProcessSubscribeInternalRsp);
mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_DELETE_RSP, mndProcessSubscribeInternalRsp);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq);
+ mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq);
+ mndSetMsgHandle(pMnode, TDMT_MND_MQ_DROP_CGROUP, mndProcessDropCgroupReq);
+ mndSetMsgHandle(pMnode, TDMT_MND_MQ_DROP_CGROUP_RSP, mndProcessSubscribeInternalRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_SUBSCRIPTIONS, mndRetrieveSubscribe);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextSubscribe);
@@ -89,10 +93,8 @@ static SMqSubscribeObj *mndCreateSub(SMnode *pMnode, const SMqTopicObj *pTopic,
return NULL;
}
pSub->dbUid = pTopic->dbUid;
+ pSub->stbUid = pTopic->stbUid;
pSub->subType = pTopic->subType;
- pSub->withTbName = pTopic->withTbName;
- pSub->withSchema = pTopic->withSchema;
- pSub->withTag = pTopic->withTag;
ASSERT(pSub->unassignedVgs->size == 0);
ASSERT(taosHashGetSize(pSub->consumerHash) == 0);
@@ -117,9 +119,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
req.vgId = pRebVg->pVgEp->vgId;
req.qmsg = pRebVg->pVgEp->qmsg;
req.subType = pSub->subType;
- req.withTbName = pSub->withTbName;
- req.withSchema = pSub->withSchema;
- req.withTag = pSub->withTag;
+ req.suid = pSub->stbUid;
strncpy(req.subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
int32_t tlen = sizeof(SMsgHead) + tEncodeSMqRebVgReq(NULL, &req);
@@ -154,6 +154,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM
int32_t vgId = pRebVg->pVgEp->vgId;
SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId);
if (pVgObj == NULL) {
+ ASSERT(0);
taosMemoryFree(buf);
return -1;
}
@@ -389,8 +390,8 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
mInfo("rebalance calculation completed, rebalanced vg:");
for (int32_t i = 0; i < taosArrayGetSize(pOutput->rebVgs); i++) {
SMqRebOutputVg *pOutputRebVg = taosArrayGet(pOutput->rebVgs, i);
- mInfo("vg: %d moved from consumer %ld to consumer %ld", pOutputRebVg->pVgEp->vgId, pOutputRebVg->oldConsumerId,
- pOutputRebVg->newConsumerId);
+ mInfo("vgId:%d, moved from consumer %" PRId64 " to consumer %" PRId64, pOutputRebVg->pVgEp->vgId,
+ pOutputRebVg->oldConsumerId, pOutputRebVg->newConsumerId);
}
// 9. clear
@@ -400,10 +401,9 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
}
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_REBALANCE, pMsg);
- if (pTrans == NULL) {
- return -1;
- }
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg);
+ if (pTrans == NULL) return -1;
+
// make txn:
// 1. redo action: action to all vg
const SArray *rebVgs = pOutput->rebVgs;
@@ -417,7 +417,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
// 2. redo log: subscribe and vg assignment
// subscribe
- if (mndSetSubRedoLogs(pMnode, pTrans, pOutput->pSub) != 0) {
+ if (mndSetSubCommitLogs(pMnode, pTrans, pOutput->pSub) != 0) {
goto REB_FAIL;
}
@@ -448,6 +448,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
taosArrayPush(pConsumerNew->rebNewTopics, &topic);
mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
+ ASSERT(0);
goto REB_FAIL;
}
}
@@ -466,9 +467,11 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
taosArrayPush(pConsumerNew->rebRemovedTopics, &topic);
mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
+ ASSERT(0);
goto REB_FAIL;
}
}
+#if 0
if (consumerNum) {
char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
@@ -479,17 +482,28 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
SMqTopicObj topicObj = {0};
memcpy(&topicObj, pTopic, sizeof(SMqTopicObj));
topicObj.refConsumerCnt = pTopic->refConsumerCnt - consumerNum;
- if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL;
+ // TODO is that correct?
+ pTopic->refConsumerCnt = topicObj.refConsumerCnt;
+ mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup,
+ topicObj.refConsumerCnt);
+ if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) {
+ ASSERT(0);
+ goto REB_FAIL;
+ }
}
}
+#endif
// 4. TODO commit log: modification log
// 5. set cb
- mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0);
+ mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_MQ_REB, NULL, 0);
// 6. execution
- if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL;
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ ASSERT(0);
+ goto REB_FAIL;
+ }
mndTransDrop(pTrans);
return 0;
@@ -577,6 +591,63 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
return 0;
}
+static int32_t mndProcessDropCgroupReq(SRpcMsg *pReq) {
+ SMnode *pMnode = pReq->info.node;
+ SSdb *pSdb = pMnode->pSdb;
+ SMDropCgroupReq dropReq = {0};
+
+ if (tDeserializeSMDropCgroupReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ return -1;
+ }
+
+ SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, dropReq.cgroup, dropReq.topic);
+ if (pSub == NULL) {
+ if (dropReq.igNotExists) {
+ mDebug("cgroup:%s on topic:%s, not exist, ignore not exist is set", dropReq.cgroup, dropReq.topic);
+ return 0;
+ } else {
+ terrno = TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST;
+ mError("topic:%s, cgroup:%s, failed to drop since %s", dropReq.topic, dropReq.cgroup, terrstr());
+ return -1;
+ }
+ }
+
+ if (taosHashGetSize(pSub->consumerHash) != 0) {
+ terrno = TSDB_CODE_MND_CGROUP_USED;
+ mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
+ mndReleaseSubscribe(pMnode, pSub);
+ return -1;
+ }
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ if (pTrans == NULL) {
+ mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
+ mndReleaseSubscribe(pMnode, pSub);
+ return -1;
+ }
+
+ mDebug("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
+
+ if (mndDropOffsetBySubKey(pMnode, pTrans, pSub->key) < 0) {
+ ASSERT(0);
+ mndReleaseSubscribe(pMnode, pSub);
+ return -1;
+ }
+
+ if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
+ mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
+ mndReleaseSubscribe(pMnode, pSub);
+ return -1;
+ }
+
+ mndTransPrepare(pMnode, pTrans);
+
+ mndReleaseSubscribe(pMnode, pSub);
+
+ return TSDB_CODE_ACTION_IN_PROGRESS;
+}
+
void mndCleanupSubscribe(SMnode *pMnode) {}
static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *pSub) {
@@ -731,7 +802,7 @@ static int32_t mndSetDropSubRedoLogs(SMnode *pMnode, STrans *pTrans, SMqSubscrib
return 0;
}
-static int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) {
+int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) {
SSdbRaw *pCommitRaw = mndSubActionEncode(pSub);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
@@ -882,7 +953,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
}
// do not show for cleared subscription
-#if 0
+#if 1
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index e3f7b40526328cd13ac69655980cb674980f2443..245f0938b906300af29bf3f6caf71c834877eaa1 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -17,28 +17,41 @@
#include "mndSync.h"
#include "mndTrans.h"
-int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
- int32_t code = tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg);
- if (code != 0) {
- rpcFreeCont(pMsg->pCont);
- }
- return code;
+int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
+ SMsgHead *pHead = pMsg->pCont;
+ pHead->contLen = htonl(pHead->contLen);
+ pHead->vgId = htonl(pHead->vgId);
+
+ return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg);
}
int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SMnode *pMnode = pFsm->data;
- SSdb *pSdb = pMnode->pSdb;
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
SSdbRaw *pRaw = pMsg->pCont;
- mTrace("ver:%" PRId64 ", apply raw:%p to sdb, role:%s", cbMeta.index, pRaw, syncStr(cbMeta.state));
- sdbWriteWithoutFree(pSdb, pRaw);
- sdbSetApplyIndex(pSdb, cbMeta.index);
- sdbSetApplyTerm(pSdb, cbMeta.term);
- if (cbMeta.state == TAOS_SYNC_STATE_LEADER) {
+ int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
+ pMgmt->errCode = cbMeta.code;
+ mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId,
+ pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw);
+
+ if (pMgmt->errCode == 0) {
+ sdbWriteWithoutFree(pMnode->pSdb, pRaw);
+ sdbSetApplyIndex(pMnode->pSdb, cbMeta.index);
+ sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
+ }
+
+ if (pMgmt->transId == transId) {
+ if (pMgmt->errCode != 0) {
+ mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
+ }
tsem_post(&pMgmt->syncSem);
+ } else {
+ if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) {
+ sdbWriteFile(pMnode->pSdb);
+ }
}
}
@@ -51,8 +64,63 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
void mndRestoreFinish(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
- mndTransPullup(pMnode);
- pMnode->syncMgmt.restored = true;
+ if (!pMnode->deploy) {
+ mInfo("mnode sync restore finished, and will handle outstanding transactions");
+ mndTransPullup(pMnode);
+ mndSetRestore(pMnode, true);
+ } else {
+ mInfo("mnode sync restore finished, and will set ready after first deploy");
+ }
+}
+
+void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
+ SMnode *pMnode = pFsm->data;
+ SSyncMgmt *pMgmt = &pMnode->syncMgmt;
+
+ pMgmt->errCode = cbMeta.code;
+ mInfo("trans:-1, sync reconfig is proposed, savedTransId:%d code:0x%x, curTerm:%" PRId64 " term:%" PRId64,
+ pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
+
+ if (pMgmt->transId == -1) {
+ if (pMgmt->errCode != 0) {
+ mError("trans:-1, failed to propose sync reconfig since %s", tstrerror(pMgmt->errCode));
+ }
+ tsem_post(&pMgmt->syncSem);
+ }
+}
+
+int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) {
+ mInfo("start to read snapshot from sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader);
+}
+
+int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
+ mInfo("stop to read snapshot from sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStopRead(pMnode->pSdb, pReader);
+}
+
+int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
+ SMnode *pMnode = pFsm->data;
+ return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len);
+}
+
+int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) {
+ mInfo("start to apply snapshot to sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter);
+}
+
+int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) {
+ mInfo("stop to apply snapshot to sdb, apply:%d", isApply);
+ SMnode *pMnode = pFsm->data;
+ return sdbStopWrite(pMnode->pSdb, pWriter, isApply);
+}
+
+int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
+ SMnode *pMnode = pFsm->data;
+ return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
}
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
@@ -61,9 +129,15 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
pFsm->FpCommitCb = mndSyncCommitMsg;
pFsm->FpPreCommitCb = NULL;
pFsm->FpRollBackCb = NULL;
+ pFsm->FpRestoreFinishCb = mndRestoreFinish;
+ pFsm->FpReConfigCb = mndReConfig;
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
- pFsm->FpRestoreFinish = mndRestoreFinish;
- pFsm->FpRestoreSnapshot = NULL;
+ pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
+ pFsm->FpSnapshotStopRead = mndSnapshotStopRead;
+ pFsm->FpSnapshotDoRead = mndSnapshotDoRead;
+ pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite;
+ pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite;
+ pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite;
return pFsm;
}
@@ -92,14 +166,17 @@ int32_t mndInitSync(SMnode *pMnode) {
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", pMnode->path, TD_DIRSEP);
syncInfo.pWal = pMgmt->pWal;
syncInfo.pFsm = mndSyncMakeFsm(pMnode);
+ syncInfo.isStandBy = pMgmt->standby;
SSyncCfg *pCfg = &syncInfo.syncCfg;
pCfg->replicaNum = pMnode->replica;
pCfg->myIndex = pMnode->selfIndex;
+ mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, pMgmt->standby);
for (int32_t i = 0; i < pMnode->replica; ++i) {
SNodeInfo *pNode = &pCfg->nodeInfo[i];
tstrncpy(pNode->nodeFqdn, pMnode->replicas[i].fqdn, sizeof(pNode->nodeFqdn));
pNode->nodePort = pMnode->replicas[i].port;
+ mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort);
}
tsem_init(&pMgmt->syncSem, 0, 0);
@@ -126,15 +203,17 @@ void mndCleanupSync(SMnode *pMnode) {
memset(pMgmt, 0, sizeof(SSyncMgmt));
}
-int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
+int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
- pMgmt->errCode = 0;
-
- SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
+ SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
rsp.pCont = rpcMallocCont(rsp.contLen);
if (rsp.pCont == NULL) return -1;
memcpy(rsp.pCont, pRaw, rsp.contLen);
+ pMgmt->errCode = 0;
+ pMgmt->transId = transId;
+ mTrace("trans:%d, will be proposed", pMgmt->transId);
+
const bool isWeak = false;
int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak);
if (code == 0) {
@@ -147,22 +226,42 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
terrno = TSDB_CODE_APP_ERROR;
}
- if (code != 0) return code;
+ rpcFreeCont(rsp.pCont);
+ if (code != 0) {
+ mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code);
+ return code;
+ }
+
return pMgmt->errCode;
}
void mndSyncStart(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
syncSetMsgCb(pMgmt->sync, &pMnode->msgCb);
- syncStart(pMgmt->sync);
- mDebug("sync:%" PRId64 " is started", pMgmt->sync);
+
+ if (pMgmt->standby) {
+ syncStartStandBy(pMgmt->sync);
+ } else {
+ syncStart(pMgmt->sync);
+ }
+ mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
}
void mndSyncStop(SMnode *pMnode) {}
bool mndIsMaster(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
- pMgmt->state = syncGetMyRole(pMgmt->sync);
- return (pMgmt->state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
+ ESyncState state = syncGetMyRole(pMgmt->sync);
+ if (state != TAOS_SYNC_STATE_LEADER) {
+ terrno = TSDB_CODE_SYN_NOT_LEADER;
+ return false;
+ }
+
+ if (!pMnode->restored) {
+ terrno = TSDB_CODE_APP_NOT_READY;
+ return false;
+ }
+
+ return true;
}
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index ec3d30ff07b2eeb169cc214cc24fe43be4fe9f15..e0d565c9afb0fafe46c5ff3dd96a83ee9c76372d 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -15,6 +15,7 @@
#include "mndTopic.h"
#include "mndAuth.h"
+#include "mndConsumer.h"
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
@@ -69,6 +70,56 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]) {
return strchr(topic, '.') + 1;
}
+bool mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, const SArray *colAndTagIds) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ bool found = false;
+ while (1) {
+ SMqTopicObj *pTopic = NULL;
+ pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
+ if (pIter == NULL) break;
+ if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
+ sdbRelease(pSdb, pTopic);
+ continue;
+ }
+
+ SNode *pAst = NULL;
+ if (nodesStringToNode(pTopic->ast, &pAst) != 0) {
+ ASSERT(0);
+ return false;
+ }
+
+ SHashObj *pColHash = NULL;
+ SNodeList *pNodeList;
+ nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
+ SNode *pNode = NULL;
+ FOREACH(pNode, pNodeList) {
+ SColumnNode *pCol = (SColumnNode *)pNode;
+ if (pCol->tableId != suid) goto NEXT;
+ if (pColHash == NULL) {
+ pColHash = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
+ }
+ if (pCol->colId > 0) {
+ taosHashPut(pColHash, &pCol->colId, sizeof(int16_t), NULL, 0);
+ }
+ }
+
+ for (int32_t i = 0; i < taosArrayGetSize(colAndTagIds); i++) {
+ int16_t *pColId = taosArrayGet(colAndTagIds, i);
+ if (taosHashGet(pColHash, pColId, sizeof(int16_t)) != NULL) {
+ found = true;
+ goto NEXT;
+ }
+ }
+
+ NEXT:
+ sdbRelease(pSdb, pTopic);
+ nodesDestroyNode(pAst);
+ if (found) return false;
+ }
+ return true;
+}
+
SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -95,11 +146,8 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
SDB_SET_INT64(pRaw, dataPos, pTopic->dbUid, TOPIC_ENCODE_OVER);
SDB_SET_INT32(pRaw, dataPos, pTopic->version, TOPIC_ENCODE_OVER);
SDB_SET_INT8(pRaw, dataPos, pTopic->subType, TOPIC_ENCODE_OVER);
- SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER);
- SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER);
- SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER);
- SDB_SET_INT32(pRaw, dataPos, pTopic->consumerCnt, TOPIC_ENCODE_OVER);
+ SDB_SET_INT64(pRaw, dataPos, pTopic->stbUid, TOPIC_ENCODE_OVER);
SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER);
SDB_SET_BINARY(pRaw, dataPos, pTopic->sql, pTopic->sqlLen, TOPIC_ENCODE_OVER);
SDB_SET_INT32(pRaw, dataPos, pTopic->astLen, TOPIC_ENCODE_OVER);
@@ -121,8 +169,6 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER);
}
- SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER);
-
SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER);
SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER);
@@ -167,12 +213,8 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pTopic->dbUid, TOPIC_DECODE_OVER);
SDB_GET_INT32(pRaw, dataPos, &pTopic->version, TOPIC_DECODE_OVER);
SDB_GET_INT8(pRaw, dataPos, &pTopic->subType, TOPIC_DECODE_OVER);
- SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER);
- SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER);
- SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER);
-
- SDB_GET_INT32(pRaw, dataPos, &pTopic->consumerCnt, TOPIC_DECODE_OVER);
+ SDB_GET_INT64(pRaw, dataPos, &pTopic->stbUid, TOPIC_DECODE_OVER);
SDB_GET_INT32(pRaw, dataPos, &pTopic->sqlLen, TOPIC_DECODE_OVER);
pTopic->sql = taosMemoryCalloc(pTopic->sqlLen, sizeof(char));
if (pTopic->sql == NULL) {
@@ -217,12 +259,10 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
}
} else {
pTopic->schema.nCols = 0;
- pTopic->schema.sver = 0;
+ pTopic->schema.version = 0;
pTopic->schema.pSchema = NULL;
}
- SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER);
-
SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER);
terrno = TSDB_CODE_SUCCESS;
@@ -253,8 +293,6 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic
atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime);
atomic_exchange_32(&pOldTopic->version, pNewTopic->version);
- atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt);
-
/*taosWLockLatch(&pOldTopic->lock);*/
// TODO handle update
@@ -277,18 +315,6 @@ void mndReleaseTopic(SMnode *pMnode, SMqTopicObj *pTopic) {
sdbRelease(pSdb, pTopic);
}
-#if 0
-static SDbObj *mndAcquireDbByTopic(SMnode *pMnode, char *topicName) {
- SName name = {0};
- tNameFromString(&name, topicName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
-
- char db[TSDB_TOPIC_FNAME_LEN] = {0};
- tNameGetFullDbName(&name, db);
-
- return mndAcquireDb(pMnode, db);
-}
-#endif
-
static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMqTopicObj *pTopic) {
int32_t contLen = sizeof(SDDropTopicReq);
@@ -307,11 +333,19 @@ static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMq
}
static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) {
- if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->subscribeDbName[0] == 0) {
- terrno = TSDB_CODE_MND_INVALID_TOPIC;
- return -1;
+ terrno = TSDB_CODE_MND_INVALID_TOPIC;
+
+ if (pCreate->sql == NULL) return -1;
+
+ if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) {
+ if (pCreate->ast == NULL || pCreate->ast[0] == 0) return -1;
+ } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pCreate->subStbName[0] == 0) return -1;
+ } else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {
+ if (pCreate->subDbName[0] == 0) return -1;
}
+ terrno = TSDB_CODE_SUCCESS;
return 0;
}
@@ -327,14 +361,11 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
topicObj.version = 1;
topicObj.sql = strdup(pCreate->sql);
topicObj.sqlLen = strlen(pCreate->sql) + 1;
- topicObj.refConsumerCnt = 0;
+ topicObj.subType = pCreate->subType;
- if (pCreate->ast && pCreate->ast[0]) {
+ if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) {
topicObj.ast = strdup(pCreate->ast);
topicObj.astLen = strlen(pCreate->ast) + 1;
- topicObj.subType = TOPIC_SUB_TYPE__TABLE;
- topicObj.withTbName = pCreate->withTbName;
- topicObj.withSchema = pCreate->withSchema;
SNode *pAst = NULL;
if (nodesStringToNode(pCreate->ast, &pAst) != 0) {
@@ -367,16 +398,18 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
taosMemoryFree(topicObj.sql);
return -1;
}
- } else {
- topicObj.ast = NULL;
- topicObj.astLen = 0;
- topicObj.physicalPlan = NULL;
- topicObj.subType = TOPIC_SUB_TYPE__DB;
- topicObj.withTbName = 1;
- topicObj.withSchema = 1;
- }
-
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_TOPIC, pReq);
+ } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) {
+ SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName);
+ topicObj.stbUid = pStb->uid;
+ }
+ /*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/
+ /*topicObj.ast = NULL;*/
+ /*topicObj.astLen = 0;*/
+ /*topicObj.physicalPlan = NULL;*/
+ /*topicObj.withTbName = 1;*/
+ /*topicObj.withSchema = 1;*/
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("topic:%s, failed to create since %s", pCreate->name, terrstr());
taosMemoryFreeClear(topicObj.ast);
@@ -441,7 +474,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
goto CREATE_TOPIC_OVER;
}
- pDb = mndAcquireDb(pMnode, createTopicReq.subscribeDbName);
+ pDb = mndAcquireDb(pMnode, createTopicReq.subDbName);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
goto CREATE_TOPIC_OVER;
@@ -492,8 +525,8 @@ static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTo
}
static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
- SMnode *pMnode = pReq->info.node;
- /*SSdb *pSdb = pMnode->pSdb;*/
+ SMnode *pMnode = pReq->info.node;
+ SSdb *pSdb = pMnode->pSdb;
SMDropTopicReq dropReq = {0};
if (tDeserializeSMDropTopicReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
@@ -513,14 +546,38 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
}
}
+ void *pIter = NULL;
+ SMqConsumerObj *pConsumer;
+ while (1) {
+ pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer);
+ if (pIter == NULL) break;
+
+ if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue;
+ int32_t sz = taosArrayGetSize(pConsumer->assignedTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ char *name = taosArrayGetP(pConsumer->assignedTopics, i);
+ if (strcmp(name, pTopic->name) == 0) {
+ mndReleaseConsumer(pMnode, pConsumer);
+ mndReleaseTopic(pMnode, pTopic);
+ terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED;
+ mError("topic:%s, failed to drop since subscribed by consumer %ld from cgroup %s", dropReq.name,
+ pConsumer->consumerId, pConsumer->cgroup);
+ return -1;
+ }
+ }
+ sdbRelease(pSdb, pConsumer);
+ }
+
+#if 0
if (pTopic->refConsumerCnt != 0) {
mndReleaseTopic(pMnode, pTopic);
terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED;
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
return -1;
}
+#endif
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_TOPIC, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("topic:%s, failed to drop since %s", pTopic->name, terrstr());
return -1;
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index d685c8850028b65563da0ee824917611746835a5..bbee59090d1600693478d38fec9ff47082bcc032 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -37,19 +37,18 @@ static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction);
static void mndTransDropLogs(SArray *pArray);
static void mndTransDropActions(SArray *pArray);
static void mndTransDropData(STrans *pTrans);
-static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray);
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray);
static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans);
-static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans);
+static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans);
-static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans);
+static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
@@ -83,40 +82,30 @@ int32_t mndInitTrans(SMnode *pMnode) {
void mndCleanupTrans(SMnode *pMnode) {}
-static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+static int32_t mndTransGetActionsSize(SArray *pArray) {
+ int32_t actionNum = taosArrayGetSize(pArray);
+ int32_t rawDataLen = 0;
- int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE;
- int32_t redoLogNum = taosArrayGetSize(pTrans->redoLogs);
- int32_t undoLogNum = taosArrayGetSize(pTrans->undoLogs);
- int32_t commitLogNum = taosArrayGetSize(pTrans->commitLogs);
- int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
- int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
-
- for (int32_t i = 0; i < redoLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i);
- rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t));
- }
-
- for (int32_t i = 0; i < undoLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i);
- rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t));
+ for (int32_t i = 0; i < actionNum; ++i) {
+ STransAction *pAction = taosArrayGet(pArray, i);
+ if (pAction->actionType) {
+ rawDataLen += (sdbGetRawTotalSize(pAction->pRaw) + sizeof(int32_t));
+ } else {
+ rawDataLen += (sizeof(STransAction) + pAction->contLen);
+ }
+ rawDataLen += sizeof(pAction->actionType);
}
- for (int32_t i = 0; i < commitLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i);
- rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t));
- }
+ return rawDataLen;
+}
- for (int32_t i = 0; i < redoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->redoActions, i);
- rawDataLen += (sizeof(STransAction) + pAction->contLen);
- }
+static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
- for (int32_t i = 0; i < undoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->undoActions, i);
- rawDataLen += (sizeof(STransAction) + pAction->contLen);
- }
+ int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE;
+ rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
+ rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
+ rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen);
if (pRaw == NULL) {
@@ -126,66 +115,85 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
int32_t dataPos = 0;
SDB_SET_INT32(pRaw, dataPos, pTrans->id, _OVER)
-
- ETrnStage stage = pTrans->stage;
- if (stage == TRN_STAGE_REDO_LOG || stage == TRN_STAGE_REDO_ACTION) {
- stage = TRN_STAGE_PREPARE;
- } else if (stage == TRN_STAGE_UNDO_ACTION || stage == TRN_STAGE_UNDO_LOG) {
- stage = TRN_STAGE_ROLLBACK;
- } else if (stage == TRN_STAGE_COMMIT_LOG || stage == TRN_STAGE_FINISHED) {
- stage = TRN_STAGE_COMMIT;
- } else {
- }
-
- SDB_SET_INT16(pRaw, dataPos, stage, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pTrans->stage, _OVER)
SDB_SET_INT16(pRaw, dataPos, pTrans->policy, _OVER)
- SDB_SET_INT16(pRaw, dataPos, pTrans->type, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pTrans->conflict, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pTrans->exec, _OVER)
SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER)
- SDB_SET_INT64(pRaw, dataPos, pTrans->dbUid, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER)
- SDB_SET_INT32(pRaw, dataPos, redoLogNum, _OVER)
- SDB_SET_INT32(pRaw, dataPos, undoLogNum, _OVER)
- SDB_SET_INT32(pRaw, dataPos, commitLogNum, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
+
+ int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
+ int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
+ int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions);
SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER)
SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER)
-
- for (int32_t i = 0; i < redoLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i);
- int32_t len = sdbGetRawTotalSize(pTmp);
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER)
- }
-
- for (int32_t i = 0; i < undoLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i);
- int32_t len = sdbGetRawTotalSize(pTmp);
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER)
- }
-
- for (int32_t i = 0; i < commitLogNum; ++i) {
- SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i);
- int32_t len = sdbGetRawTotalSize(pTmp);
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER)
- }
+ SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER)
for (int32_t i = 0; i < redoActionNum; ++i) {
STransAction *pAction = taosArrayGet(pTrans->redoActions, i);
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
+ if (pAction->actionType) {
+ int32_t len = sdbGetRawTotalSize(pAction->pRaw);
+ SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, len, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
+ } else {
+ SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ }
}
for (int32_t i = 0; i < undoActionNum; ++i) {
STransAction *pAction = taosArrayGet(pTrans->undoActions, i);
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pCont, pAction->contLen, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
+ if (pAction->actionType) {
+ int32_t len = sdbGetRawTotalSize(pAction->pRaw);
+ SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, len, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
+ } else {
+ SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ }
+ }
+
+ for (int32_t i = 0; i < commitActionNum; ++i) {
+ STransAction *pAction = taosArrayGet(pTrans->commitActions, i);
+ SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
+ if (pAction->actionType) {
+ int32_t len = sdbGetRawTotalSize(pAction->pRaw);
+ SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, len, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
+ } else {
+ SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ }
}
SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER)
@@ -219,11 +227,9 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
char *pData = NULL;
int32_t dataLen = 0;
int8_t sver = 0;
- int32_t redoLogNum = 0;
- int32_t undoLogNum = 0;
- int32_t commitLogNum = 0;
int32_t redoActionNum = 0;
int32_t undoActionNum = 0;
+ int32_t commitActionNum = 0;
int32_t dataPos = 0;
STransAction action = {0};
@@ -244,86 +250,116 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
int16_t stage = 0;
int16_t policy = 0;
- int16_t type = 0;
+ int16_t conflict = 0;
+ int16_t exec = 0;
SDB_GET_INT16(pRaw, dataPos, &stage, _OVER)
SDB_GET_INT16(pRaw, dataPos, &policy, _OVER)
- SDB_GET_INT16(pRaw, dataPos, &type, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &conflict, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &exec, _OVER)
pTrans->stage = stage;
pTrans->policy = policy;
- pTrans->type = type;
+ pTrans->conflict = conflict;
+ pTrans->exec = exec;
SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER)
- SDB_GET_INT64(pRaw, dataPos, &pTrans->dbUid, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &redoLogNum, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &undoLogNum, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &commitLogNum, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER)
- pTrans->redoLogs = taosArrayInit(redoLogNum, sizeof(void *));
- pTrans->undoLogs = taosArrayInit(undoLogNum, sizeof(void *));
- pTrans->commitLogs = taosArrayInit(commitLogNum, sizeof(void *));
pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction));
+ pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction));
- if (pTrans->redoLogs == NULL) goto _OVER;
- if (pTrans->undoLogs == NULL) goto _OVER;
- if (pTrans->commitLogs == NULL) goto _OVER;
if (pTrans->redoActions == NULL) goto _OVER;
if (pTrans->undoActions == NULL) goto _OVER;
-
- for (int32_t i = 0; i < redoLogNum; ++i) {
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- pData = taosMemoryMalloc(dataLen);
- if (pData == NULL) goto _OVER;
- mTrace("raw:%p, is created", pData);
- SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER);
- if (taosArrayPush(pTrans->redoLogs, &pData) == NULL) goto _OVER;
- pData = NULL;
- }
-
- for (int32_t i = 0; i < undoLogNum; ++i) {
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- pData = taosMemoryMalloc(dataLen);
- if (pData == NULL) goto _OVER;
- mTrace("raw:%p, is created", pData);
- SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER);
- if (taosArrayPush(pTrans->undoLogs, &pData) == NULL) goto _OVER;
- pData = NULL;
- }
-
- for (int32_t i = 0; i < commitLogNum; ++i) {
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- pData = taosMemoryMalloc(dataLen);
- if (pData == NULL) goto _OVER;
- mTrace("raw:%p, is created", pData);
- SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER);
- if (taosArrayPush(pTrans->commitLogs, &pData) == NULL) goto _OVER;
- pData = NULL;
- }
+ if (pTrans->commitActions == NULL) goto _OVER;
for (int32_t i = 0; i < redoActionNum; ++i) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
+ SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER)
+ if (action.actionType) {
+ SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
+ action.pRaw = taosMemoryMalloc(dataLen);
+ if (action.pRaw == NULL) goto _OVER;
+ mTrace("raw:%p, is created", pData);
+ SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
+ if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
+ action.pRaw = NULL;
+ } else {
+ SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
+ SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
+ action.pCont = taosMemoryMalloc(action.contLen);
+ if (action.pCont == NULL) goto _OVER;
+ SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
+ if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
+ action.pCont = NULL;
+ }
}
for (int32_t i = 0; i < undoActionNum; ++i) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER)
+ if (action.actionType) {
+ SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
+ action.pRaw = taosMemoryMalloc(dataLen);
+ if (action.pRaw == NULL) goto _OVER;
+ mTrace("raw:%p, is created", pData);
+ SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
+ if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
+ action.pRaw = NULL;
+ } else {
+ SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
+ SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
+ action.pCont = taosMemoryMalloc(action.contLen);
+ if (action.pCont == NULL) goto _OVER;
+ SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
+ if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
+ action.pCont = NULL;
+ }
+ }
+
+ for (int32_t i = 0; i < commitActionNum; ++i) {
+ SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
+ SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER)
+ if (action.actionType) {
+ SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
+ action.pRaw = taosMemoryMalloc(dataLen);
+ if (action.pRaw == NULL) goto _OVER;
+ mTrace("raw:%p, is created", action.pRaw);
+ SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
+ if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
+ action.pRaw = NULL;
+ } else {
+ SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
+ SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
+ action.pCont = taosMemoryMalloc(action.contLen);
+ if (action.pCont == NULL) goto _OVER;
+ SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
+ if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
+ action.pCont = NULL;
+ }
}
SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER)
@@ -343,7 +379,6 @@ _OVER:
mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr());
mndTransDropData(pTrans);
taosMemoryFreeClear(pRow);
- taosMemoryFreeClear(pData);
taosMemoryFreeClear(action.pCont);
return NULL;
}
@@ -356,20 +391,16 @@ static const char *mndTransStr(ETrnStage stage) {
switch (stage) {
case TRN_STAGE_PREPARE:
return "prepare";
- case TRN_STAGE_REDO_LOG:
- return "redoLog";
case TRN_STAGE_REDO_ACTION:
return "redoAction";
- case TRN_STAGE_COMMIT:
- return "commit";
- case TRN_STAGE_COMMIT_LOG:
- return "commitLog";
- case TRN_STAGE_UNDO_ACTION:
- return "undoAction";
- case TRN_STAGE_UNDO_LOG:
- return "undoLog";
case TRN_STAGE_ROLLBACK:
return "rollback";
+ case TRN_STAGE_UNDO_ACTION:
+ return "undoAction";
+ case TRN_STAGE_COMMIT:
+ return "commit";
+ case TRN_STAGE_COMMIT_ACTION:
+ return "commitAction";
case TRN_STAGE_FINISHED:
return "finished";
default:
@@ -377,81 +408,6 @@ static const char *mndTransStr(ETrnStage stage) {
}
}
-static const char *mndTransType(ETrnType type) {
- switch (type) {
- case TRN_TYPE_CREATE_USER:
- return "create-user";
- case TRN_TYPE_ALTER_USER:
- return "alter-user";
- case TRN_TYPE_DROP_USER:
- return "drop-user";
- case TRN_TYPE_CREATE_FUNC:
- return "create-func";
- case TRN_TYPE_DROP_FUNC:
- return "drop-func";
- case TRN_TYPE_CREATE_SNODE:
- return "create-snode";
- case TRN_TYPE_DROP_SNODE:
- return "drop-snode";
- case TRN_TYPE_CREATE_QNODE:
- return "create-qnode";
- case TRN_TYPE_DROP_QNODE:
- return "drop-qnode";
- case TRN_TYPE_CREATE_BNODE:
- return "create-bnode";
- case TRN_TYPE_DROP_BNODE:
- return "drop-bnode";
- case TRN_TYPE_CREATE_MNODE:
- return "create-mnode";
- case TRN_TYPE_DROP_MNODE:
- return "drop-mnode";
- case TRN_TYPE_CREATE_TOPIC:
- return "create-topic";
- case TRN_TYPE_DROP_TOPIC:
- return "drop-topic";
- case TRN_TYPE_SUBSCRIBE:
- return "subscribe";
- case TRN_TYPE_REBALANCE:
- return "rebalance";
- case TRN_TYPE_COMMIT_OFFSET:
- return "commit-offset";
- case TRN_TYPE_CREATE_STREAM:
- return "create-stream";
- case TRN_TYPE_DROP_STREAM:
- return "drop-stream";
- case TRN_TYPE_CONSUMER_LOST:
- return "consumer-lost";
- case TRN_TYPE_CONSUMER_RECOVER:
- return "consumer-recover";
- case TRN_TYPE_CREATE_DNODE:
- return "create-qnode";
- case TRN_TYPE_DROP_DNODE:
- return "drop-qnode";
- case TRN_TYPE_CREATE_DB:
- return "create-db";
- case TRN_TYPE_ALTER_DB:
- return "alter-db";
- case TRN_TYPE_DROP_DB:
- return "drop-db";
- case TRN_TYPE_SPLIT_VGROUP:
- return "split-vgroup";
- case TRN_TYPE_MERGE_VGROUP:
- return "merge-vgroup";
- case TRN_TYPE_CREATE_STB:
- return "create-stb";
- case TRN_TYPE_ALTER_STB:
- return "alter-stb";
- case TRN_TYPE_DROP_STB:
- return "drop-stb";
- case TRN_TYPE_CREATE_SMA:
- return "create-sma";
- case TRN_TYPE_DROP_SMA:
- return "drop-sma";
- default:
- return "invalid";
- }
-}
-
static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) {
mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen);
}
@@ -460,15 +416,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen)
mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
}
-static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) {
+static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
switch (ftype) {
- case TEST_TRANS_START_FUNC:
+ case TRANS_START_FUNC_TEST:
return mndTransTestStartFunc;
- case TEST_TRANS_STOP_FUNC:
+ case TRANS_STOP_FUNC_TEST:
return mndTransTestStopFunc;
- case MQ_REB_TRANS_START_FUNC:
+ case TRANS_START_FUNC_MQ_REB:
return mndRebCntInc;
- case MQ_REB_TRANS_STOP_FUNC:
+ case TRANS_STOP_FUNC_MQ_REB:
return mndRebCntDec;
default:
return NULL;
@@ -489,11 +445,9 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
}
static void mndTransDropData(STrans *pTrans) {
- mndTransDropLogs(pTrans->redoLogs);
- mndTransDropLogs(pTrans->undoLogs);
- mndTransDropLogs(pTrans->commitLogs);
mndTransDropActions(pTrans->redoActions);
mndTransDropActions(pTrans->undoActions);
+ mndTransDropActions(pTrans->commitActions);
if (pTrans->rpcRsp != NULL) {
taosMemoryFree(pTrans->rpcRsp);
pTrans->rpcRsp = NULL;
@@ -507,7 +461,7 @@ static void mndTransDropData(STrans *pTrans) {
}
static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
- mDebug("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage),
+ mTrace("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage),
callFunc);
if (pTrans->stopFunc > 0 && callFunc) {
TransCbFp fp = mndTransGetCbFp(pTrans->stopFunc);
@@ -520,20 +474,35 @@ static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
return 0;
}
-static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
- if (pNew->stage == TRN_STAGE_COMMIT) {
- pNew->stage = TRN_STAGE_COMMIT_LOG;
- mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_COMMIT), mndTransStr(TRN_STAGE_COMMIT_LOG));
- }
-
- if (pNew->stage == TRN_STAGE_ROLLBACK) {
- pNew->stage = TRN_STAGE_FINISHED;
- mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_ROLLBACK), mndTransStr(TRN_STAGE_FINISHED));
+static void mndTransUpdateActions(SArray *pOldArray, SArray *pNewArray) {
+ for (int32_t i = 0; i < taosArrayGetSize(pOldArray); ++i) {
+ STransAction *pOldAction = taosArrayGet(pOldArray, i);
+ STransAction *pNewAction = taosArrayGet(pNewArray, i);
+ pOldAction->rawWritten = pNewAction->rawWritten;
+ pOldAction->msgSent = pNewAction->msgSent;
+ pOldAction->msgReceived = pNewAction->msgReceived;
+ pOldAction->errCode = pNewAction->errCode;
}
+}
+static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
mTrace("trans:%d, perform update action, old row:%p stage:%s, new row:%p stage:%s", pOld->id, pOld,
mndTransStr(pOld->stage), pNew, mndTransStr(pNew->stage));
+ mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
+ mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
+ mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
pOld->stage = pNew->stage;
+ pOld->redoActionPos = pNew->redoActionPos;
+
+ if (pOld->stage == TRN_STAGE_COMMIT) {
+ pOld->stage = TRN_STAGE_COMMIT_ACTION;
+ mTrace("trans:%d, stage from commit to commitAction", pNew->id);
+ }
+
+ if (pOld->stage == TRN_STAGE_ROLLBACK) {
+ pOld->stage = TRN_STAGE_FINISHED;
+ mTrace("trans:%d, stage from rollback to finished", pNew->id);
+ }
return 0;
}
@@ -550,7 +519,7 @@ void mndReleaseTrans(SMnode *pMnode, STrans *pTrans) {
sdbRelease(pSdb, pTrans);
}
-STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const SRpcMsg *pReq) {
+STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq) {
STrans *pTrans = taosMemoryCalloc(1, sizeof(STrans));
if (pTrans == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -561,41 +530,33 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const S
pTrans->id = sdbGetMaxId(pMnode->pSdb, SDB_TRANS);
pTrans->stage = TRN_STAGE_PREPARE;
pTrans->policy = policy;
- pTrans->type = type;
+ pTrans->conflict = conflict;
+ pTrans->exec = TRN_EXEC_PRARLLEL;
pTrans->createdTime = taosGetTimestampMs();
- pTrans->rpcInfo = pReq->info;
- pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
- pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
- pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
+ pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
- if (pTrans->redoLogs == NULL || pTrans->undoLogs == NULL || pTrans->commitLogs == NULL ||
- pTrans->redoActions == NULL || pTrans->undoActions == NULL) {
+ if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed to create transaction since %s", terrstr());
return NULL;
}
- mDebug("trans:%d, local object is created, data:%p", pTrans->id, pTrans);
+ if (pReq != NULL) pTrans->rpcInfo = pReq->info;
+ mTrace("trans:%d, local object is created, data:%p", pTrans->id, pTrans);
return pTrans;
}
-static void mndTransDropLogs(SArray *pArray) {
- int32_t size = taosArrayGetSize(pArray);
- for (int32_t i = 0; i < size; ++i) {
- SSdbRaw *pRaw = taosArrayGetP(pArray, i);
- sdbFreeRaw(pRaw);
- }
-
- taosArrayDestroy(pArray);
-}
-
static void mndTransDropActions(SArray *pArray) {
int32_t size = taosArrayGetSize(pArray);
for (int32_t i = 0; i < size; ++i) {
STransAction *pAction = taosArrayGet(pArray, i);
- taosMemoryFreeClear(pAction->pCont);
+ if (pAction->actionType) {
+ taosMemoryFreeClear(pAction->pRaw);
+ } else {
+ taosMemoryFreeClear(pAction->pCont);
+ }
}
taosArrayDestroy(pArray);
@@ -604,18 +565,15 @@ static void mndTransDropActions(SArray *pArray) {
void mndTransDrop(STrans *pTrans) {
if (pTrans != NULL) {
mndTransDropData(pTrans);
- mDebug("trans:%d, local object is freed, data:%p", pTrans->id, pTrans);
+ mTrace("trans:%d, local object is freed, data:%p", pTrans->id, pTrans);
taosMemoryFreeClear(pTrans);
}
}
-static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) {
- if (pArray == NULL || pRaw == NULL) {
- terrno = TSDB_CODE_INVALID_PARA;
- return -1;
- }
+static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) {
+ pAction->id = taosArrayGetSize(pArray);
- void *ptr = taosArrayPush(pArray, &pRaw);
+ void *ptr = taosArrayPush(pArray, pAction);
if (ptr == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@@ -624,27 +582,28 @@ static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) {
return 0;
}
-int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->redoLogs, pRaw); }
-
-int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->undoLogs, pRaw); }
-
-int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->commitLogs, pRaw); }
+int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) {
+ STransAction action = {.stage = TRN_STAGE_REDO_ACTION, .actionType = true, .pRaw = pRaw};
+ return mndTransAppendAction(pTrans->redoActions, &action);
+}
-static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) {
- void *ptr = taosArrayPush(pArray, pAction);
- if (ptr == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
+int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) {
+ STransAction action = {.stage = TRN_STAGE_UNDO_ACTION, .actionType = true, .pRaw = pRaw};
+ return mndTransAppendAction(pTrans->undoActions, &action);
+}
- return 0;
+int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) {
+ STransAction action = {.stage = TRN_STAGE_COMMIT_ACTION, .actionType = true, .pRaw = pRaw};
+ return mndTransAppendAction(pTrans->commitActions, &action);
}
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
+ pAction->stage = TRN_STAGE_REDO_ACTION;
return mndTransAppendAction(pTrans->redoActions, pAction);
}
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction) {
+ pAction->stage = TRN_STAGE_UNDO_ACTION;
return mndTransAppendAction(pTrans->undoActions, pAction);
}
@@ -653,17 +612,16 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) {
pTrans->rpcRspLen = contLen;
}
-void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) {
+void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) {
pTrans->startFunc = startFunc;
pTrans->stopFunc = stopFunc;
pTrans->param = param;
pTrans->paramLen = paramLen;
}
-void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb) {
- pTrans->dbUid = pDb->uid;
- memcpy(pTrans->dbname, pDb->name, TSDB_DB_FNAME_LEN);
-}
+void mndTransSetDbName(STrans *pTrans, const char *dbname) { memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN); }
+
+void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; }
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
SSdbRaw *pRaw = mndTransActionEncode(pTrans);
@@ -673,95 +631,63 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("trans:%d, sync to other nodes", pTrans->id);
- int32_t code = mndSyncPropose(pMnode, pRaw);
+ mDebug("trans:%d, sync to other mnodes", pTrans->id);
+ int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
if (code != 0) {
mError("trans:%d, failed to sync since %s", pTrans->id, terrstr());
sdbFreeRaw(pRaw);
return -1;
}
+ sdbFreeRaw(pRaw);
mDebug("trans:%d, sync finished", pTrans->id);
return 0;
}
-static bool mndIsBasicTrans(STrans *pTrans) {
- return pTrans->type > TRN_TYPE_BASIC_SCOPE && pTrans->type < TRN_TYPE_BASIC_SCOPE_END;
-}
-
-static bool mndIsGlobalTrans(STrans *pTrans) {
- return pTrans->type > TRN_TYPE_GLOBAL_SCOPE && pTrans->type < TRN_TYPE_GLOBAL_SCOPE_END;
-}
-
-static bool mndIsDbTrans(STrans *pTrans) {
- return pTrans->type > TRN_TYPE_DB_SCOPE && pTrans->type < TRN_TYPE_DB_SCOPE_END;
-}
-
-static bool mndIsStbTrans(STrans *pTrans) {
- return pTrans->type > TRN_TYPE_STB_SCOPE && pTrans->type < TRN_TYPE_STB_SCOPE_END;
-}
-
-static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNewTrans) {
+static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
STrans *pTrans = NULL;
void *pIter = NULL;
bool conflict = false;
- if (mndIsBasicTrans(pNewTrans)) return conflict;
+ if (pNew->conflict == TRN_CONFLICT_NOTHING) return conflict;
while (1) {
pIter = sdbFetch(pMnode->pSdb, SDB_TRANS, pIter, (void **)&pTrans);
if (pIter == NULL) break;
- if (mndIsGlobalTrans(pNewTrans)) {
- if (mndIsDbTrans(pTrans) || mndIsStbTrans(pTrans)) {
- mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname);
- conflict = true;
- } else {
- }
+ if (pNew->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
+ if (pNew->conflict == TRN_CONFLICT_DB) {
+ if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
+ if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true;
+ if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true;
}
-
- else if (mndIsDbTrans(pNewTrans)) {
- if (mndIsGlobalTrans(pTrans)) {
- mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id);
- conflict = true;
- } else if (mndIsDbTrans(pTrans) || mndIsStbTrans(pTrans)) {
- if (pNewTrans->dbUid == pTrans->dbUid) {
- mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname);
- conflict = true;
- }
- } else {
- }
- }
-
- else if (mndIsStbTrans(pNewTrans)) {
- if (mndIsGlobalTrans(pTrans)) {
- mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id);
- conflict = true;
- } else if (mndIsDbTrans(pTrans)) {
- if (pNewTrans->dbUid == pTrans->dbUid) {
- mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname);
- conflict = true;
- }
- } else {
- }
+ if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) {
+ if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
+ if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true;
}
-
+ mError("trans:%d, can't execute since conflict with trans:%d, db:%s", pNew->id, pTrans->id, pTrans->dbname);
sdbRelease(pMnode->pSdb, pTrans);
}
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pTrans);
return conflict;
}
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
+ if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
+ if (strlen(pTrans->dbname) == 0) {
+ terrno = TSDB_CODE_MND_TRANS_CONFLICT;
+ mError("trans:%d, failed to prepare conflict db not set", pTrans->id);
+ return -1;
+ }
+ }
+
if (mndCheckTransConflict(pMnode, pTrans)) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1;
}
- if (taosArrayGetSize(pTrans->commitLogs) <= 0) {
+ if (taosArrayGetSize(pTrans->commitActions) <= 0) {
terrno = TSDB_CODE_MND_TRANS_CLOG_IS_NULL;
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1;
@@ -792,8 +718,6 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
}
static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) {
- if (taosArrayGetSize(pTrans->commitLogs) == 0 && taosArrayGetSize(pTrans->redoActions) == 0) return 0;
-
mDebug("trans:%d, commit transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to commit since %s", pTrans->id, terrstr());
@@ -822,32 +746,35 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
}
if (pTrans->policy == TRN_POLICY_ROLLBACK) {
- if (pTrans->stage == TRN_STAGE_UNDO_LOG || pTrans->stage == TRN_STAGE_UNDO_ACTION ||
- pTrans->stage == TRN_STAGE_ROLLBACK) {
+ if (pTrans->stage == TRN_STAGE_UNDO_ACTION || pTrans->stage == TRN_STAGE_ROLLBACK) {
if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
sendRsp = true;
}
} else {
- if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 0) {
+ if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 3) {
if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
sendRsp = true;
}
}
if (sendRsp && pTrans->rpcInfo.handle != NULL) {
- void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
- if (rpcCont != NULL) {
- memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
+ mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
+ pTrans->rpcInfo.ahandle);
+ if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ code = TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL;
+ }
+ SRpcMsg rspMsg = {.code = code, .info = pTrans->rpcInfo};
+
+ if (pTrans->rpcRspLen != 0) {
+ void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
+ if (rpcCont != NULL) {
+ memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
+ rspMsg.pCont = rpcCont;
+ rspMsg.contLen = pTrans->rpcRspLen;
+ }
+ taosMemoryFree(pTrans->rpcRsp);
}
- taosMemoryFree(pTrans->rpcRsp);
- mDebug("trans:%d, send rsp, code:0x%x stage:%d app:%p", pTrans->id, code, pTrans->stage, pTrans->rpcInfo.ahandle);
- SRpcMsg rspMsg = {
- .code = code,
- .pCont = rpcCont,
- .contLen = pTrans->rpcRspLen,
- .info = pTrans->rpcInfo,
- };
tmsgSendRsp(&rspMsg);
pTrans->rpcInfo.handle = NULL;
pTrans->rpcRsp = NULL;
@@ -892,148 +819,150 @@ void mndTransProcessRsp(SRpcMsg *pRsp) {
if (pAction != NULL) {
pAction->msgReceived = 1;
pAction->errCode = pRsp->code;
- if (pAction->errCode != 0) {
- tstrncpy(pTrans->lastError, tstrerror(pAction->errCode), TSDB_TRANS_ERROR_LEN);
- }
}
- mDebug("trans:%d, action:%d response is received, code:0x%x, accept:0x%04x", transId, action, pRsp->code,
- pAction->acceptableCode);
+ mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x", transId, mndTransStr(pAction->stage), action,
+ pRsp->code, pAction->acceptableCode);
mndTransExecute(pMnode, pTrans);
_OVER:
mndReleaseTrans(pMnode, pTrans);
}
-static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray) {
- SSdb *pSdb = pMnode->pSdb;
- int32_t arraySize = taosArrayGetSize(pArray);
+static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
+ int32_t numOfActions = taosArrayGetSize(pArray);
- if (arraySize == 0) return 0;
+ for (int32_t action = 0; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pArray, action);
+ if (pAction->msgSent && pAction->msgReceived &&
+ (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode))
+ continue;
+ if (pAction->rawWritten && (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) continue;
- int32_t code = 0;
- for (int32_t i = 0; i < arraySize; ++i) {
- SSdbRaw *pRaw = taosArrayGetP(pArray, i);
- if (sdbWriteWithoutFree(pSdb, pRaw) != 0) {
- code = ((terrno != 0) ? terrno : -1);
- }
+ pAction->rawWritten = 0;
+ pAction->msgSent = 0;
+ pAction->msgReceived = 0;
+ pAction->errCode = 0;
+ mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action);
}
-
- terrno = code;
- return code;
}
-static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans) {
- int32_t code = mndTransExecuteLogs(pMnode, pTrans->redoLogs);
- if (code != 0) {
- mError("failed to execute redoLogs since %s", terrstr());
- }
- return code;
-}
+static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
+ if (pAction->rawWritten) return 0;
-static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans) {
- int32_t code = mndTransExecuteLogs(pMnode, pTrans->undoLogs);
- if (code != 0) {
- mError("failed to execute undoLogs since %s, return success", terrstr());
+ int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pAction->pRaw);
+ if (code == 0 || terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) {
+ pAction->rawWritten = true;
+ pAction->errCode = 0;
+ code = 0;
+ mDebug("trans:%d, %s:%d write to sdb", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ } else {
+ pAction->errCode = (terrno != 0) ? terrno : code;
+ mError("trans:%d, %s:%d failed to write sdb since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ terrstr());
}
- return 0; // return success in any case
-}
-
-static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans) {
- int32_t code = mndTransExecuteLogs(pMnode, pTrans->commitLogs);
- if (code != 0) {
- mError("failed to execute commitLogs since %s", terrstr());
- }
return code;
}
-static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
- int32_t numOfActions = taosArrayGetSize(pArray);
+static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
+ if (pAction->msgSent) return 0;
+ if (!pMnode->deploy && !mndIsMaster(pMnode)) return -1;
- for (int32_t action = 0; action < numOfActions; ++action) {
- STransAction *pAction = taosArrayGet(pArray, action);
- if (pAction == NULL) continue;
- if (pAction->msgSent && pAction->msgReceived && pAction->errCode == 0) continue;
+ int64_t signature = pTrans->id;
+ signature = (signature << 32);
+ signature += pAction->id;
- pAction->msgSent = 0;
+ SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature};
+ rpcMsg.pCont = rpcMallocCont(pAction->contLen);
+ if (rpcMsg.pCont == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen);
+
+ int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg);
+ if (code == 0) {
+ pAction->msgSent = 1;
pAction->msgReceived = 0;
pAction->errCode = 0;
- mDebug("trans:%d, action:%d execute status is reset", pTrans->id, action);
+ mDebug("trans:%d, %s:%d is sent to %s:%u", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ pAction->epSet.eps[pAction->epSet.inUse].fqdn, pAction->epSet.eps[pAction->epSet.inUse].port);
+ } else {
+ pAction->msgSent = 0;
+ pAction->msgReceived = 0;
+ pAction->errCode = (terrno != 0) ? terrno : code;
+ mError("trans:%d, %s:%d not send since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, terrstr());
+ }
+
+ return code;
+}
+
+static int32_t mndTransExecSingleAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
+ if (pAction->actionType) {
+ return mndTransWriteSingleLog(pMnode, pTrans, pAction);
+ } else {
+ return mndTransSendSingleMsg(pMnode, pTrans, pAction);
}
}
-static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
+static int32_t mndTransExecSingleActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
int32_t numOfActions = taosArrayGetSize(pArray);
+ int32_t code = 0;
for (int32_t action = 0; action < numOfActions; ++action) {
STransAction *pAction = taosArrayGet(pArray, action);
- if (pAction == NULL) continue;
- if (pAction->msgSent) continue;
-
- int64_t signature = pTrans->id;
- signature = (signature << 32);
- signature += action;
-
- SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature};
- rpcMsg.pCont = rpcMallocCont(pAction->contLen);
- if (rpcMsg.pCont == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
- memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen);
-
- if (tmsgSendReq(&pAction->epSet, &rpcMsg) == 0) {
- mDebug("trans:%d, action:%d is sent to %s:%u", pTrans->id, action, pAction->epSet.eps[pAction->epSet.inUse].fqdn,
- pAction->epSet.eps[pAction->epSet.inUse].port);
- pAction->msgSent = 1;
- pAction->msgReceived = 0;
- pAction->errCode = 0;
- } else {
- pAction->msgSent = 0;
- pAction->msgReceived = 0;
- pAction->errCode = terrno;
- mError("trans:%d, action:%d not send since %s", pTrans->id, action, terrstr());
- return -1;
- }
+ code = mndTransExecSingleAction(pMnode, pTrans, pAction);
+ if (code != 0) break;
}
- return 0;
+ return code;
}
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
int32_t numOfActions = taosArrayGetSize(pArray);
if (numOfActions == 0) return 0;
- if (mndTransSendActionMsg(pMnode, pTrans, pArray) != 0) {
+ if (mndTransExecSingleActions(pMnode, pTrans, pArray) != 0) {
return -1;
}
- int32_t numOfReceived = 0;
- int32_t errCode = 0;
+ int32_t numOfExecuted = 0;
+ int32_t errCode = 0;
+ STransAction *pErrAction = NULL;
for (int32_t action = 0; action < numOfActions; ++action) {
STransAction *pAction = taosArrayGet(pArray, action);
- if (pAction == NULL) continue;
- if (pAction->msgSent && pAction->msgReceived) {
- numOfReceived++;
+ if (pAction->msgReceived || pAction->rawWritten) {
+ numOfExecuted++;
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
errCode = pAction->errCode;
+ pErrAction = pAction;
}
}
}
- if (numOfReceived == numOfActions) {
+ if (numOfExecuted == numOfActions) {
if (errCode == 0) {
+ pTrans->lastErrorAction = 0;
+ pTrans->lastErrorNo = 0;
+ pTrans->lastErrorMsgType = 0;
+ memset(&pTrans->lastErrorEpset, 0, sizeof(pTrans->lastErrorEpset));
mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
return 0;
} else {
mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF);
+ if (pErrAction != NULL) {
+ pTrans->lastErrorMsgType = pErrAction->msgType;
+ pTrans->lastErrorAction = pErrAction->id;
+ pTrans->lastErrorNo = pErrAction->errCode;
+ pTrans->lastErrorEpset = pErrAction->epSet;
+ }
mndTransResetActions(pMnode, pTrans, pArray);
terrno = errCode;
return errCode;
}
} else {
- mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfReceived, numOfActions);
+ mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
}
@@ -1054,35 +983,99 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans) {
return code;
}
-static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
- bool continueExec = true;
- pTrans->stage = TRN_STAGE_REDO_LOG;
- mDebug("trans:%d, stage from prepare to redoLog", pTrans->id);
- return continueExec;
+static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans) {
+ int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions);
+ if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
+ mError("failed to execute commitActions since %s", terrstr());
+ }
+ return code;
}
-static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans) {
- bool continueExec = true;
- int32_t code = mndTransExecuteRedoLogs(pMnode, pTrans);
+static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) {
+ int32_t code = 0;
+ int32_t numOfActions = taosArrayGetSize(pTrans->redoActions);
+ if (numOfActions == 0) return code;
+ if (pTrans->redoActionPos >= numOfActions) return code;
+
+ for (int32_t action = pTrans->redoActionPos; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->redoActionPos);
+
+ code = mndTransExecSingleAction(pMnode, pTrans, pAction);
+ if (code == 0) {
+ if (pAction->msgSent) {
+ if (pAction->msgReceived) {
+ if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
+ code = pAction->errCode;
+ pAction->msgSent = 0;
+ pAction->msgReceived = 0;
+ mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action);
+ }
+ } else {
+ code = TSDB_CODE_ACTION_IN_PROGRESS;
+ }
+ }
+ if (pAction->rawWritten) {
+ if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
+ code = pAction->errCode;
+ }
+ }
+ }
- if (code == 0) {
- pTrans->code = 0;
- pTrans->stage = TRN_STAGE_REDO_ACTION;
- mDebug("trans:%d, stage from redoLog to redoAction", pTrans->id);
- } else {
- pTrans->code = terrno;
- pTrans->stage = TRN_STAGE_UNDO_LOG;
- mError("trans:%d, stage from redoLog to undoLog since %s", pTrans->id, terrstr());
+ if (code == 0) {
+ pTrans->lastErrorAction = 0;
+ pTrans->lastErrorNo = 0;
+ pTrans->lastErrorMsgType = 0;
+ memset(&pTrans->lastErrorEpset, 0, sizeof(pTrans->lastErrorEpset));
+ } else {
+ pTrans->lastErrorMsgType = pAction->msgType;
+ pTrans->lastErrorAction = action;
+ pTrans->lastErrorNo = pAction->errCode;
+ pTrans->lastErrorEpset = pAction->epSet;
+ }
+
+ if (code == 0) {
+ pTrans->code = 0;
+ pTrans->redoActionPos++;
+ mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
+ pAction->id);
+ code = mndTransSync(pMnode, pTrans);
+ if (code != 0) {
+ pTrans->code = terrno;
+ mError("trans:%d, %s:%d is executed and failed to sync to other mnodes since %s", pTrans->id,
+ mndTransStr(pAction->stage), pAction->id, terrstr());
+ break;
+ }
+ } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
+ mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ break;
+ } else {
+ terrno = code;
+ pTrans->code = code;
+ mError("trans:%d, %s:%d failed to execute since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ terrstr());
+ break;
+ }
}
+ return code;
+}
+
+static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
+ bool continueExec = true;
+ pTrans->stage = TRN_STAGE_REDO_ACTION;
+ mDebug("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
}
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
- if (!mndIsMaster(pMnode)) return false;
-
bool continueExec = true;
- int32_t code = mndTransExecuteRedoActions(pMnode, pTrans);
+ int32_t code = 0;
+
+ if (pTrans->exec == TRN_EXEC_SERIAL) {
+ code = mndTransExecuteRedoActionsSerial(pMnode, pTrans);
+ } else {
+ code = mndTransExecuteRedoActions(pMnode, pTrans);
+ }
if (code == 0) {
pTrans->code = 0;
@@ -1114,8 +1107,8 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
- pTrans->stage = TRN_STAGE_COMMIT_LOG;
- mDebug("trans:%d, stage from commit to commitLog", pTrans->id);
+ pTrans->stage = TRN_STAGE_COMMIT_ACTION;
+ mDebug("trans:%d, stage from commit to commitAction", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1134,35 +1127,19 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans) {
+static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
- int32_t code = mndTransExecuteCommitLogs(pMnode, pTrans);
+ int32_t code = mndTransExecuteCommitActions(pMnode, pTrans);
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from commitLog to finished", pTrans->id);
+ mDebug("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
pTrans->failedTimes++;
- mError("trans:%d, stage keep on commitLog since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
- continueExec = false;
- }
-
- return continueExec;
-}
-
-static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) {
- bool continueExec = true;
- int32_t code = mndTransExecuteUndoLogs(pMnode, pTrans);
-
- if (code == 0) {
- pTrans->stage = TRN_STAGE_ROLLBACK;
- mDebug("trans:%d, stage from undoLog to rollback", pTrans->id);
- continueExec = true;
- } else {
- mError("trans:%d, stage keep on undoLog since %s", pTrans->id, terrstr());
+ mError("trans:%d, stage keep on commitAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
continueExec = false;
}
@@ -1170,14 +1147,12 @@ static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) {
}
static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
- if (!mndIsMaster(pMnode)) return false;
-
bool continueExec = true;
int32_t code = mndTransExecuteUndoActions(pMnode, pTrans);
if (code == 0) {
- pTrans->stage = TRN_STAGE_UNDO_LOG;
- mDebug("trans:%d, stage from undoAction to undoLog", pTrans->id);
+ pTrans->stage = TRN_STAGE_ROLLBACK;
+ mDebug("trans:%d, stage from undoAction to rollback", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
@@ -1222,8 +1197,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
}
- mDebug("trans:%d, finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
-
+ mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
return continueExec;
}
@@ -1236,24 +1210,18 @@ static void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
case TRN_STAGE_PREPARE:
continueExec = mndTransPerformPrepareStage(pMnode, pTrans);
break;
- case TRN_STAGE_REDO_LOG:
- continueExec = mndTransPerformRedoLogStage(pMnode, pTrans);
- break;
case TRN_STAGE_REDO_ACTION:
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
break;
- case TRN_STAGE_UNDO_LOG:
- continueExec = mndTransPerformUndoLogStage(pMnode, pTrans);
+ case TRN_STAGE_COMMIT:
+ continueExec = mndTransPerformCommitStage(pMnode, pTrans);
+ break;
+ case TRN_STAGE_COMMIT_ACTION:
+ continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
break;
case TRN_STAGE_UNDO_ACTION:
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
break;
- case TRN_STAGE_COMMIT_LOG:
- continueExec = mndTransPerformCommitLogStage(pMnode, pTrans);
- break;
- case TRN_STAGE_COMMIT:
- continueExec = mndTransPerformCommitStage(pMnode, pTrans);
- break;
case TRN_STAGE_ROLLBACK:
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
break;
@@ -1285,22 +1253,11 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
return -1;
}
- int32_t size = taosArrayGetSize(pArray);
-
- for (int32_t i = 0; i < size; ++i) {
+ for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
STransAction *pAction = taosArrayGet(pArray, i);
- if (pAction == NULL) continue;
-
- if (pAction->msgReceived == 0) {
- mInfo("trans:%d, action:%d set processed for kill msg received", pTrans->id, i);
- pAction->msgSent = 1;
- pAction->msgReceived = 1;
- pAction->errCode = 0;
- }
-
if (pAction->errCode != 0) {
- mInfo("trans:%d, action:%d set processed for kill msg received, errCode from %s to success", pTrans->id, i,
- tstrerror(pAction->errCode));
+ mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id,
+ mndTransStr(pAction->stage), i, tstrerror(pAction->errCode));
pAction->msgSent = 1;
pAction->msgReceived = 1;
pAction->errCode = 0;
@@ -1336,9 +1293,7 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) {
pTrans = mndAcquireTrans(pMnode, killReq.transId);
if (pTrans == NULL) {
- terrno = TSDB_CODE_MND_TRANS_NOT_EXIST;
- mError("trans:%d, failed to kill since %s", killReq.transId, terrstr());
- return -1;
+ goto _OVER;
}
code = mndKillTrans(pMnode, pTrans);
@@ -1346,9 +1301,9 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) {
_OVER:
if (code != 0) {
mError("trans:%d, failed to kill since %s", killReq.transId, terrstr());
- return -1;
}
+ mndReleaseUser(pMnode, pUser);
mndReleaseTrans(pMnode, pTrans);
return code;
}
@@ -1414,11 +1369,6 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)dbname, false);
- char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0};
- STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes);
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)type, false);
-
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false);
@@ -1426,7 +1376,20 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false);
char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0};
- STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
+ char detail[TSDB_TRANS_ERROR_LEN] = {0};
+ if (pTrans->lastErrorNo != 0) {
+ int32_t len = snprintf(detail, sizeof(detail), "action:%d errno:0x%x(%s) ", pTrans->lastErrorAction,
+ pTrans->lastErrorNo & 0xFFFF, tstrerror(pTrans->lastErrorNo));
+ SEpSet epset = pTrans->lastErrorEpset;
+ if (epset.numOfEps > 0) {
+ len += snprintf(detail + len, sizeof(detail) - len, "msgType:%s numOfEps:%d inUse:%d ",
+ TMSG_INFO(pTrans->lastErrorMsgType), epset.numOfEps, epset.inUse);
+ }
+ for (int32_t i = 0; i < pTrans->lastErrorEpset.numOfEps; ++i) {
+ len += snprintf(detail + len, sizeof(detail) - len, "ep:%d-%s:%u ", i, epset.eps[i].fqdn, epset.eps[i].port);
+ }
+ }
+ STR_WITH_MAXSIZE_TO_VARSTR(lastError, detail, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)lastError, false);
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index 5f2147a5fe95f03873f8be2bc89df25e90092a9e..345d756f4399a46b4d4abfa8db1ea74b2271b01e 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -77,8 +77,30 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw);
- return sdbWrite(pMnode->pSdb, pRaw);
+ mDebug("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw);
+
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ if (pTrans == NULL) {
+ mError("user:%s, failed to create since %s", userObj.user, terrstr());
+ return -1;
+ }
+ mDebug("trans:%d, used to create user:%s", pTrans->id, userObj.user);
+
+ if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
+ mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ sdbSetRawStatus(pRaw, SDB_STATUS_READY);
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
}
static int32_t mndCreateDefaultUsers(SMnode *pMnode) {
@@ -265,7 +287,7 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate
userObj.updateTime = userObj.createdTime;
userObj.superUser = pCreate->superUser;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_USER, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("user:%s, failed to create since %s", pCreate->user, terrstr());
return -1;
@@ -345,7 +367,7 @@ _OVER:
}
static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpcMsg *pReq) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_ALTER_USER, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("user:%s, failed to alter since %s", pOld->user, terrstr());
return -1;
@@ -552,7 +574,7 @@ _OVER:
}
static int32_t mndDropUser(SMnode *pMnode, SRpcMsg *pReq, SUserObj *pUser) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_USER, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
if (pTrans == NULL) {
mError("user:%s, failed to drop since %s", pUser->user, terrstr());
return -1;
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index 62021c6a7edc467bd7cd62fba9ef9eddbef1193b..2577febf6611ffbdbeb8d4021df3292e99c7873b 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -51,9 +51,10 @@ int32_t mndInitVgroup(SMnode *pMnode) {
};
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_VNODE_RSP, mndProcessCreateVnodeRsp);
- mndSetMsgHandle(pMnode, TDMT_VND_ALTER_VNODE_RSP, mndProcessAlterVnodeRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_ALTER_REPLICA_RSP, mndProcessAlterVnodeRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_ALTER_CONFIG_RSP, mndProcessAlterVnodeRsp);
mndSetMsgHandle(pMnode, TDMT_DND_DROP_VNODE_RSP, mndProcessDropVnodeRsp);
- mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_VNODE_RSP, mndProcessCompactVnodeRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_RSP, mndProcessCompactVnodeRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_VGROUP, mndRetrieveVgroups);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_VGROUP, mndCancelGetNextVgroup);
@@ -80,6 +81,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup) {
SDB_SET_INT32(pRaw, dataPos, pVgroup->hashEnd, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER)
SDB_SET_INT64(pRaw, dataPos, pVgroup->dbUid, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pVgroup->isTsma, _OVER)
SDB_SET_INT8(pRaw, dataPos, pVgroup->replica, _OVER)
for (int8_t i = 0; i < pVgroup->replica; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
@@ -127,6 +129,7 @@ SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &pVgroup->hashEnd, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pVgroup->dbUid, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &pVgroup->isTsma, _OVER)
SDB_GET_INT8(pRaw, dataPos, &pVgroup->replica, _OVER)
for (int8_t i = 0; i < pVgroup->replica; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
@@ -167,6 +170,7 @@ static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew) {
pOld->hashBegin = pNew->hashBegin;
pOld->hashEnd = pNew->hashEnd;
pOld->replica = pNew->replica;
+ pOld->isTsma = pNew->isTsma;
memcpy(pOld->vnodeGid, pNew->vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid));
return 0;
}
@@ -185,10 +189,10 @@ void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup) {
sdbRelease(pSdb, pVgroup);
}
-void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen) {
+void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen,
+ bool standby) {
SCreateVnodeReq createReq = {0};
createReq.vgId = pVgroup->vgId;
- createReq.dnodeId = pDnode->id;
memcpy(createReq.db, pDb->name, TSDB_DB_FNAME_LEN);
createReq.dbUid = pDb->uid;
createReq.vgVersion = pVgroup->version;
@@ -215,6 +219,9 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
createReq.hashMethod = pDb->cfg.hashMethod;
createReq.numOfRetensions = pDb->cfg.numOfRetensions;
createReq.pRetensions = pDb->cfg.pRetensions;
+ createReq.standby = standby;
+ createReq.isTsma = pVgroup->isTsma;
+ createReq.pTsma = pVgroup->pTsma;
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = &createReq.replicas[v];
@@ -271,7 +278,6 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_
alterReq.strict = pDb->cfg.strict;
alterReq.cacheLastRow = pDb->cfg.cacheLastRow;
alterReq.replica = pVgroup->replica;
- alterReq.selfIndex = -1;
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = &alterReq.replicas[v];
@@ -287,13 +293,6 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_
mndReleaseDnode(pMnode, pVgidDnode);
}
-#if 0
- if (alterReq.selfIndex == -1) {
- terrno = TSDB_CODE_MND_APP_ERROR;
- return NULL;
- }
-#endif
-
int32_t contLen = tSerializeSAlterVnodeReq(NULL, 0, &alterReq);
if (contLen < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -426,6 +425,25 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr
return 0;
}
+int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) {
+ SArray *pArray = mndBuildDnodesArray(pMnode);
+ if (pArray == NULL) return -1;
+
+ pVgroup->vgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
+ pVgroup->isTsma = 1;
+ pVgroup->createdTime = taosGetTimestampMs();
+ pVgroup->updateTime = pVgroup->createdTime;
+ pVgroup->version = 1;
+ memcpy(pVgroup->dbName, pDb->name, TSDB_DB_FNAME_LEN);
+ pVgroup->dbUid = pDb->uid;
+ pVgroup->replica = 1;
+
+ if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) return -1;
+
+ mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId);
+ return 0;
+}
+
int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) {
int32_t code = -1;
SArray *pArray = NULL;
@@ -479,7 +497,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) {
*ppVgroups = pVgroups;
code = 0;
- mInfo("db:%s, %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications);
+ mInfo("db:%s, total %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications);
_OVER:
if (code != 0) taosMemoryFree(pVgroups);
@@ -514,10 +532,10 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[maxPos];
pVgid->dnodeId = pDnode->id;
- pVgid->role = TAOS_SYNC_STATE_FOLLOWER;
+ pVgid->role = TAOS_SYNC_STATE_ERROR;
pDnode->numOfVnodes++;
- mInfo("db:%s, vgId:%d, vn:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId);
+ mInfo("db:%s, vgId:%d, vnode_index:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId);
maxPos++;
if (maxPos == 3) return 0;
}
@@ -527,14 +545,13 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
}
int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray, SVnodeGid *del1, SVnodeGid *del2) {
- int32_t removedNum = 0;
-
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
mDebug("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes);
}
+ int32_t removedNum = 0;
for (int32_t d = taosArrayGetSize(pArray) - 1; d >= 0; --d) {
SDnodeObj *pDnode = taosArrayGet(pArray, d);
@@ -640,6 +657,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
int32_t numOfRows = 0;
SVgObj *pVgroup = NULL;
int32_t cols = 0;
+ int64_t curMs = taosGetTimestampMs();
SDbObj *pDb = NULL;
if (strlen(pShow->db) > 0) {
@@ -679,12 +697,15 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
if (i < pVgroup->replica) {
colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vnodeGid[i].dnodeId, false);
+ bool online = false;
+ SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgroup->vnodeGid[i].dnodeId);
+ if (pDnode != NULL) {
+ online = mndIsDnodeOnline(pMnode, pDnode, curMs);
+ mndReleaseDnode(pMnode, pDnode);
+ }
+
char buf1[20] = {0};
- SDnodeObj *pDnodeObj = mndAcquireDnode(pMnode, pVgroup->vnodeGid[i].dnodeId);
- ASSERT(pDnodeObj != NULL);
- bool isOffLine = !mndIsDnodeOnline(pMnode, pDnodeObj, taosGetTimestampMs());
- const char *role = isOffLine ? "OFFLINE" : syncStr(pVgroup->vnodeGid[i].role);
-
+ const char *role = online ? syncStr(pVgroup->vnodeGid[i].role) : "offline";
STR_WITH_MAXSIZE_TO_VARSTR(buf1, role, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
@@ -702,9 +723,12 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->isTsma, false);
+
numOfRows++;
sdbRelease(pSdb, pVgroup);
}
diff --git a/source/dnode/mnode/impl/test/acct/CMakeLists.txt b/source/dnode/mnode/impl/test/acct/CMakeLists.txt
index 40f8b0726e28446170a71bbbccde979376448fbb..d72292e34bd605ec91b16788fadd9f1ff1c68cc4 100644
--- a/source/dnode/mnode/impl/test/acct/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/acct/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME acctTest
- COMMAND acctTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME acctTest
+ COMMAND acctTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/db/CMakeLists.txt b/source/dnode/mnode/impl/test/db/CMakeLists.txt
index 3f6a80835ffa7b2a0a6fcdcff21e1cfd39a02c5f..e28cdd4f61824c04f62513868a9010113140fd31 100644
--- a/source/dnode/mnode/impl/test/db/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/db/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME dbTest
- COMMAND dbTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME dbTest
+ COMMAND dbTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/func/CMakeLists.txt b/source/dnode/mnode/impl/test/func/CMakeLists.txt
index ecb4f851be9d95a7c894d1e2ef2b3d9ce83067d3..2a8eb0a39d89275ae204e6405de2b774b4412619 100644
--- a/source/dnode/mnode/impl/test/func/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/func/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME funcTest
- COMMAND funcTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME funcTest
+ COMMAND funcTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/profile/CMakeLists.txt b/source/dnode/mnode/impl/test/profile/CMakeLists.txt
index 8b811ebfed3a56ab139ecfc81f3556af2f9bb032..b6586192b2b4c6e428c2f00fddb11527a1747707 100644
--- a/source/dnode/mnode/impl/test/profile/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/profile/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME profileTest
- COMMAND profileTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME profileTest
+ COMMAND profileTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
index df535c4456615b8b501236f2c7ad1684c2f4ac6f..43be55dd1de822d098475747a7b5b6452f379058 100644
--- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
+++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
@@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
- ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 );
+ ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2);
sdbSetApplyIndex(pSdb, -1);
ASSERT_EQ(sdbGetApplyIndex(pSdb), -1);
ASSERT_EQ(mnode.insertTimes, 2);
@@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) {
ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING);
}
+ {
+ SSdbIter *pReader = NULL;
+ SSdbIter *pWritter = NULL;
+ void *pBuf = NULL;
+ int32_t len = 0;
+ int32_t code = 0;
+
+ code = sdbStartRead(pSdb, &pReader);
+ ASSERT_EQ(code, 0);
+ code = sdbStartWrite(pSdb, &pWritter);
+ ASSERT_EQ(code, 0);
+
+ while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) {
+ if (pBuf != NULL && len != 0) {
+ sdbDoWrite(pSdb, pWritter, pBuf, len);
+ taosMemoryFree(pBuf);
+ } else {
+ break;
+ }
+ }
+
+ sdbStopRead(pSdb, pReader);
+ sdbStopWrite(pSdb, pWritter, true);
+ }
+
+ ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1);
+ ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4);
+
sdbCleanup(pSdb);
- ASSERT_EQ(mnode.insertTimes, 5);
- ASSERT_EQ(mnode.deleteTimes, 5);
+ ASSERT_EQ(mnode.insertTimes, 9);
+ ASSERT_EQ(mnode.deleteTimes, 9);
}
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/test/show/CMakeLists.txt b/source/dnode/mnode/impl/test/show/CMakeLists.txt
index 69e93e7086147de77676ea02017a6ce5533acf42..9b4e21501ed478e527adfa69a5a2297e173876e1 100644
--- a/source/dnode/mnode/impl/test/show/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/show/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME showTest
- COMMAND showTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME showTest
+ COMMAND showTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
index 3f9ec123a80e88371a98fa54c99342726831372d..fd596c5021674bb9d4ec185924129b0fd3bbade8 100644
--- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME smaTest
- COMMAND smaTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME smaTest
+ COMMAND smaTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
index d2fe3879979f4f52a215a3d44e25e912be3abb90..857c404c1c299767685fa1572a7f5a0b6463c939 100644
--- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME stbTest
- COMMAND stbTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME stbTest
+ COMMAND stbTest
+ )
+endif(NOT TD_WINDOWS)
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp
index 56f1b8240d0ee28a7023a204ed2e10ca9f93cc7e..56b8936cf44f5520d1e72dfce8f0877fa4be6684 100644
--- a/source/dnode/mnode/impl/test/stb/stb.cpp
+++ b/source/dnode/mnode/impl/test/stb/stb.cpp
@@ -277,7 +277,8 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c
req.numOfFields = 1;
req.pFields = taosArrayInit(1, sizeof(SField));
req.alterType = TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES;
- req.verInBlock = verInBlock;
+ req.tagVer = verInBlock;
+ req.colVer = verInBlock;
SField field = {0};
field.bytes = bytes;
@@ -343,7 +344,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
EXPECT_EQ(metaRsp.precision, TSDB_TIME_PRECISION_MILLI);
EXPECT_EQ(metaRsp.tableType, TSDB_SUPER_TABLE);
EXPECT_EQ(metaRsp.sversion, 1);
- EXPECT_EQ(metaRsp.tversion, 0);
+ EXPECT_EQ(metaRsp.tversion, 1);
EXPECT_GT(metaRsp.suid, 0);
EXPECT_GT(metaRsp.tuid, 0);
EXPECT_EQ(metaRsp.vgId, 0);
diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp
index b78f1c7021ef44313a2a6393ecc58294921f2a18..022c82c73d66ab39f9cf07aeb34642278018722d 100644
--- a/source/dnode/mnode/impl/test/trans/trans2.cpp
+++ b/source/dnode/mnode/impl/test/trans/trans2.cpp
@@ -11,6 +11,8 @@
#include
+#if 0
+
#include "mndTrans.h"
#include "mndUser.h"
#include "tcache.h"
@@ -103,7 +105,7 @@ class MndTestTrans2 : public ::testing::Test {
void SetUp() override {}
void TearDown() override {}
- int32_t CreateUserLog(const char *acct, const char *user, ETrnType type, SDbObj *pDb) {
+ int32_t CreateUserLog(const char *acct, const char *user, ETrnConflct conflict, SDbObj *pDb) {
SUserObj userObj = {0};
taosEncryptPass_c((uint8_t *)"taosdata", strlen("taosdata"), userObj.pass);
tstrncpy(userObj.user, user, TSDB_USER_LEN);
@@ -113,7 +115,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, type, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, conflict, &rpcMsg);
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
@@ -123,10 +125,10 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <=====");
- mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
+ mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
if (pDb != NULL) {
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
}
int32_t code = mndTransPrepare(pMnode, pTrans);
@@ -135,7 +137,7 @@ class MndTestTrans2 : public ::testing::Test {
return code;
}
- int32_t CreateUserAction(const char *acct, const char *user, bool hasUndoAction, ETrnPolicy policy, ETrnType type,
+ int32_t CreateUserAction(const char *acct, const char *user, bool hasUndoAction, ETrnPolicy policy, ETrnConflct conflict,
SDbObj *pDb) {
SUserObj userObj = {0};
taosEncryptPass_c((uint8_t *)"taosdata", strlen("taosdata"), userObj.pass);
@@ -146,7 +148,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, policy, type, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, policy, conflict, &rpcMsg);
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
@@ -156,7 +158,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test action <=====");
- mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
+ mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
{
STransAction action = {0};
@@ -199,7 +201,7 @@ class MndTestTrans2 : public ::testing::Test {
}
if (pDb != NULL) {
- mndTransSetDbInfo(pTrans, pDb);
+ mndTransSetDbName(pTrans, pDb->name);
}
int32_t code = mndTransPrepare(pMnode, pTrans);
@@ -218,7 +220,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_USER, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, &rpcMsg);
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
@@ -228,7 +230,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <=====");
- mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
+ mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
int32_t code = mndTransPrepare(pMnode, pTrans);
mndTransDrop(pTrans);
@@ -528,3 +530,5 @@ TEST_F(MndTestTrans2, 04_Conflict) {
mndReleaseUser(pMnode, pUser);
}
}
+
+#endif
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/test/user/CMakeLists.txt b/source/dnode/mnode/impl/test/user/CMakeLists.txt
index b39ea0e73f728cacc648f6eb0723328e028c05f4..ed4d96461742a77fd4a2ba3d0b9cd070c2f00c43 100644
--- a/source/dnode/mnode/impl/test/user/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/user/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME userTest
- COMMAND userTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME userTest
+ COMMAND userTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/sdb/CMakeLists.txt b/source/dnode/mnode/sdb/CMakeLists.txt
index e2ebed7a788c58cb6bbe2ba384eeabeb5cf3f2f0..2001a70da217d67e8a3b63137f40fbce9eaf6192 100644
--- a/source/dnode/mnode/sdb/CMakeLists.txt
+++ b/source/dnode/mnode/sdb/CMakeLists.txt
@@ -2,8 +2,7 @@ aux_source_directory(src MNODE_SRC)
add_library(sdb STATIC ${MNODE_SRC})
target_include_directories(
sdb
- PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode/sdb"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
+ PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
sdb os common util wal
diff --git a/include/dnode/mnode/sdb/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h
similarity index 83%
rename from include/dnode/mnode/sdb/sdb.h
rename to source/dnode/mnode/sdb/inc/sdb.h
index 94d41a7416679f496e2324e033ef667e262f3b1c..4a00befa1e8ec1b4ef4ff20a51a066ed08cf1883 100644
--- a/include/dnode/mnode/sdb/sdb.h
+++ b/source/dnode/mnode/sdb/inc/sdb.h
@@ -27,6 +27,15 @@
extern "C" {
#endif
+// clang-format off
+#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
+#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
+#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
+#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
+#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
+#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
+// clang-format on
+
#define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \
{ \
if (func(pRaw, dataPos, val) != 0) { \
@@ -44,12 +53,9 @@ extern "C" {
}
#define SDB_GET_INT64(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt64, int64_t)
-
#define SDB_GET_INT32(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt32, int32_t)
-
#define SDB_GET_INT16(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt16, int16_t)
-
-#define SDB_GET_INT8(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt8, int8_t)
+#define SDB_GET_INT8(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt8, int8_t)
#define SDB_GET_RESERVE(pRaw, dataPos, valLen, pos) \
{ \
@@ -66,12 +72,9 @@ extern "C" {
}
#define SDB_SET_INT64(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt64, int64_t)
-
#define SDB_SET_INT32(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt32, int32_t)
-
#define SDB_SET_INT16(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt16, int16_t)
-
-#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t)
+#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t)
#define SDB_SET_BINARY(pRaw, dataPos, val, valLen, pos) \
{ \
@@ -95,8 +98,16 @@ extern "C" {
}
typedef struct SMnode SMnode;
+typedef struct SSdb SSdb;
typedef struct SSdbRaw SSdbRaw;
typedef struct SSdbRow SSdbRow;
+typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj);
+typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj);
+typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc);
+typedef int32_t (*SdbDeployFp)(SMnode *pMnode);
+typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw);
+typedef SSdbRaw *(*SdbEncodeFp)(void *pObj);
+typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3);
typedef enum {
SDB_KEY_BINARY = 1,
@@ -136,14 +147,49 @@ typedef enum {
SDB_MAX = 20
} ESdbType;
-typedef struct SSdb SSdb;
-typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj);
-typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj);
-typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc);
-typedef int32_t (*SdbDeployFp)(SMnode *pMnode);
-typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw);
-typedef SSdbRaw *(*SdbEncodeFp)(void *pObj);
-typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3);
+typedef struct SSdbRaw {
+ int8_t type;
+ int8_t status;
+ int8_t sver;
+ int8_t reserved;
+ int32_t dataLen;
+ char pData[];
+} SSdbRaw;
+
+typedef struct SSdbRow {
+ ESdbType type;
+ ESdbStatus status;
+ int32_t refCount;
+ char pObj[];
+} SSdbRow;
+
+typedef struct SSdb {
+ SMnode *pMnode;
+ char *currDir;
+ char *tmpDir;
+ int64_t lastCommitVer;
+ int64_t lastCommitTerm;
+ int64_t curVer;
+ int64_t curTerm;
+ int64_t tableVer[SDB_MAX];
+ int64_t maxId[SDB_MAX];
+ EKeyType keyTypes[SDB_MAX];
+ SHashObj *hashObjs[SDB_MAX];
+ TdThreadRwlock locks[SDB_MAX];
+ SdbInsertFp insertFps[SDB_MAX];
+ SdbUpdateFp updateFps[SDB_MAX];
+ SdbDeleteFp deleteFps[SDB_MAX];
+ SdbDeployFp deployFps[SDB_MAX];
+ SdbEncodeFp encodeFps[SDB_MAX];
+ SdbDecodeFp decodeFps[SDB_MAX];
+ TdThreadMutex filelock;
+} SSdb;
+
+typedef struct SSdbIter {
+ TdFilePtr file;
+ int64_t total;
+ char *name;
+} SSdbIter;
typedef struct {
ESdbType sdbType;
@@ -255,6 +301,7 @@ void sdbRelease(SSdb *pSdb, void *pObj);
* @return void* The next iterator of the table.
*/
void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj);
+void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) ;
/**
* @brief Cancel a traversal
@@ -334,27 +381,19 @@ int32_t sdbGetRawTotalSize(SSdbRaw *pRaw);
SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
+void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
-typedef struct SSdb {
- SMnode *pMnode;
- char *currDir;
- char *syncDir;
- char *tmpDir;
- int64_t lastCommitVer;
- int64_t curVer;
- int64_t curTerm;
- int64_t tableVer[SDB_MAX];
- int64_t maxId[SDB_MAX];
- EKeyType keyTypes[SDB_MAX];
- SHashObj *hashObjs[SDB_MAX];
- TdThreadRwlock locks[SDB_MAX];
- SdbInsertFp insertFps[SDB_MAX];
- SdbUpdateFp updateFps[SDB_MAX];
- SdbDeleteFp deleteFps[SDB_MAX];
- SdbDeployFp deployFps[SDB_MAX];
- SdbEncodeFp encodeFps[SDB_MAX];
- SdbDecodeFp decodeFps[SDB_MAX];
-} SSdb;
+int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter);
+int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter);
+int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len);
+
+int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter);
+int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply);
+int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len);
+
+const char *sdbTableName(ESdbType type);
+void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);
+int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/sdb/inc/sdbInt.h b/source/dnode/mnode/sdb/inc/sdbInt.h
deleted file mode 100644
index c49d6e8fb287619d9503282dd2e164ed432ce823..0000000000000000000000000000000000000000
--- a/source/dnode/mnode/sdb/inc/sdbInt.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef _TD_SDB_INT_H_
-#define _TD_SDB_INT_H_
-
-#include "os.h"
-
-#include "sdb.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// clang-format off
-#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
-#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
-#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
-#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
-#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
-#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
-// clang-format on
-
-typedef struct SSdbRaw {
- int8_t type;
- int8_t status;
- int8_t sver;
- int8_t reserved;
- int32_t dataLen;
- char pData[];
-} SSdbRaw;
-
-typedef struct SSdbRow {
- ESdbType type;
- ESdbStatus status;
- int32_t refCount;
- char pObj[];
-} SSdbRow;
-
-const char *sdbTableName(ESdbType type);
-void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);
-
-void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*_TD_SDB_INT_H_*/
diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c
index 7b90d8acb53083461220ac4cf6ab19c025bf2a72..0526ea5c2d65cee2b57d6312b92b90830bad0b8b 100644
--- a/source/dnode/mnode/sdb/src/sdb.c
+++ b/source/dnode/mnode/sdb/src/sdb.c
@@ -14,7 +14,7 @@
*/
#define _DEFAULT_SOURCE
-#include "sdbInt.h"
+#include "sdb.h"
static int32_t sdbCreateDir(SSdb *pSdb);
@@ -55,7 +55,9 @@ SSdb *sdbInit(SSdbOpt *pOption) {
pSdb->curVer = -1;
pSdb->curTerm = -1;
pSdb->lastCommitVer = -1;
+ pSdb->lastCommitTerm = -1;
pSdb->pMnode = pOption->pMnode;
+ taosThreadMutexInit(&pSdb->filelock, NULL);
mDebug("sdb init successfully");
return pSdb;
}
@@ -69,11 +71,8 @@ void sdbCleanup(SSdb *pSdb) {
taosMemoryFreeClear(pSdb->currDir);
}
- if (pSdb->syncDir != NULL) {
- taosMemoryFreeClear(pSdb->syncDir);
- }
-
if (pSdb->tmpDir != NULL) {
+ taosRemoveDir(pSdb->tmpDir);
taosMemoryFreeClear(pSdb->tmpDir);
}
@@ -104,6 +103,7 @@ void sdbCleanup(SSdb *pSdb) {
mDebug("sdb table:%s is cleaned up", sdbTableName(i));
}
+ taosThreadMutexDestroy(&pSdb->filelock);
taosMemoryFree(pSdb);
mDebug("sdb is cleaned up");
}
diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c
index b000c208c87b0393616cf0fb1d4a0cdbc08782b7..83135491a993e5f8106ed05409255951342c0ac7 100644
--- a/source/dnode/mnode/sdb/src/sdbFile.c
+++ b/source/dnode/mnode/sdb/src/sdbFile.c
@@ -14,7 +14,7 @@
*/
#define _DEFAULT_SOURCE
-#include "sdbInt.h"
+#include "sdb.h"
#include "tchecksum.h"
#include "wal.h"
@@ -22,13 +22,14 @@
#define SDB_RESERVE_SIZE 512
#define SDB_FILE_VER 1
-static int32_t sdbRunDeployFp(SSdb *pSdb) {
+static int32_t sdbDeployData(SSdb *pSdb) {
mDebug("start to deploy sdb");
for (int32_t i = SDB_MAX - 1; i >= 0; --i) {
SdbDeployFp fp = pSdb->deployFps[i];
if (fp == NULL) continue;
+ mDebug("start to deploy sdb:%s", sdbTableName(i));
if ((*fp)(pSdb->pMnode) != 0) {
mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr());
return -1;
@@ -39,6 +40,40 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) {
return 0;
}
+static void sdbResetData(SSdb *pSdb) {
+ mDebug("start to reset sdb");
+
+ for (ESdbType i = 0; i < SDB_MAX; ++i) {
+ SHashObj *hash = pSdb->hashObjs[i];
+ if (hash == NULL) continue;
+
+ SSdbRow **ppRow = taosHashIterate(hash, NULL);
+ while (ppRow != NULL) {
+ SSdbRow *pRow = *ppRow;
+ if (pRow == NULL) continue;
+
+ sdbFreeRow(pSdb, pRow, true);
+ ppRow = taosHashIterate(hash, ppRow);
+ }
+ }
+
+ for (ESdbType i = 0; i < SDB_MAX; ++i) {
+ SHashObj *hash = pSdb->hashObjs[i];
+ if (hash == NULL) continue;
+
+ taosHashClear(pSdb->hashObjs[i]);
+ pSdb->tableVer[i] = 0;
+ pSdb->maxId[i] = 0;
+ mDebug("sdb:%s is reset", sdbTableName(i));
+ }
+
+ pSdb->curVer = -1;
+ pSdb->curTerm = -1;
+ pSdb->lastCommitVer = -1;
+ pSdb->lastCommitTerm = -1;
+ mDebug("sdb reset successfully");
+}
+
static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
int64_t sver = 0;
int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t));
@@ -169,35 +204,33 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) {
return 0;
}
-int32_t sdbReadFile(SSdb *pSdb) {
+static int32_t sdbReadFileImp(SSdb *pSdb) {
int64_t offset = 0;
int32_t code = 0;
int32_t readLen = 0;
int64_t ret = 0;
+ char file[PATH_MAX] = {0};
+
+ snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
+ mDebug("start to read sdb file:%s", file);
SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100);
if (pRaw == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- mError("failed read file since %s", terrstr());
+ mError("failed read sdb file since %s", terrstr());
return -1;
}
- char file[PATH_MAX] = {0};
- snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- mDebug("start to read file:%s", file);
-
TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ);
if (pFile == NULL) {
taosMemoryFree(pRaw);
terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to read file:%s since %s", file, terrstr());
+ mError("failed to read sdb file:%s since %s", file, terrstr());
return 0;
}
if (sdbReadFileHead(pSdb, pFile) != 0) {
- mError("failed to read file:%s head since %s", file, terrstr());
- pSdb->curVer = -1;
- pSdb->curTerm = -1;
+ mError("failed to read sdb file:%s head since %s", file, terrstr());
taosMemoryFree(pRaw);
taosCloseFile(&pFile);
return -1;
@@ -213,13 +246,13 @@ int32_t sdbReadFile(SSdb *pSdb) {
if (ret < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- mError("failed to read file:%s since %s", file, tstrerror(code));
+ mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
if (ret != readLen) {
code = TSDB_CODE_FILE_CORRUPTED;
- mError("failed to read file:%s since %s", file, tstrerror(code));
+ mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
@@ -227,34 +260,36 @@ int32_t sdbReadFile(SSdb *pSdb) {
ret = taosReadFile(pFile, pRaw->pData, readLen);
if (ret < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- mError("failed to read file:%s since %s", file, tstrerror(code));
+ mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
if (ret != readLen) {
code = TSDB_CODE_FILE_CORRUPTED;
- mError("failed to read file:%s since %s", file, tstrerror(code));
+ mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
int32_t totalLen = sizeof(SSdbRaw) + pRaw->dataLen + sizeof(int32_t);
if ((!taosCheckChecksumWhole((const uint8_t *)pRaw, totalLen)) != 0) {
code = TSDB_CODE_CHECKSUM_ERROR;
- mError("failed to read file:%s since %s", file, tstrerror(code));
+ mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
code = sdbWriteWithoutFree(pSdb, pRaw);
if (code != 0) {
- mError("failed to read file:%s since %s", file, terrstr());
+ mError("failed to read sdb file:%s since %s", file, terrstr());
goto _OVER;
}
}
code = 0;
pSdb->lastCommitVer = pSdb->curVer;
+ pSdb->lastCommitTerm = pSdb->curTerm;
memcpy(pSdb->tableVer, tableVer, sizeof(tableVer));
- mDebug("read file:%s successfully, ver:%" PRId64, file, pSdb->lastCommitVer);
+ mDebug("read sdb file:%s successfully, ver:%" PRId64 " term:%" PRId64, file, pSdb->lastCommitVer,
+ pSdb->lastCommitTerm);
_OVER:
taosCloseFile(&pFile);
@@ -264,6 +299,20 @@ _OVER:
return code;
}
+int32_t sdbReadFile(SSdb *pSdb) {
+ taosThreadMutexLock(&pSdb->filelock);
+
+ sdbResetData(pSdb);
+ int32_t code = sdbReadFileImp(pSdb);
+ if (code != 0) {
+ mError("failed to read sdb file since %s", terrstr());
+ sdbResetData(pSdb);
+ }
+
+ taosThreadMutexUnlock(&pSdb->filelock);
+ return code;
+}
+
static int32_t sdbWriteFileImp(SSdb *pSdb) {
int32_t code = 0;
@@ -272,18 +321,19 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
char curfile[PATH_MAX] = {0};
snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- mDebug("start to write file:%s, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer,
- pSdb->curTerm, pSdb->lastCommitVer);
+ mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64
+ " file:%s",
+ pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile);
TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to open file:%s for write since %s", tmpfile, terrstr());
+ mError("failed to open sdb file:%s for write since %s", tmpfile, terrstr());
return -1;
}
if (sdbWriteFileHead(pSdb, pFile) != 0) {
- mError("failed to write file:%s head since %s", tmpfile, terrstr());
+ mError("failed to write sdb file:%s head since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
@@ -292,7 +342,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
SdbEncodeFp encodeFp = pSdb->encodeFps[i];
if (encodeFp == NULL) continue;
- mTrace("write %s to file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
+ mTrace("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
SHashObj *hash = pSdb->hashObjs[i];
TdThreadRwlock *pLock = &pSdb->locks[i];
@@ -348,7 +398,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
code = taosFsyncFile(pFile);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
- mError("failed to sync file:%s since %s", tmpfile, tstrerror(code));
+ mError("failed to sync sdb file:%s since %s", tmpfile, tstrerror(code));
}
}
@@ -358,15 +408,17 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
code = taosRenameFile(tmpfile, curfile);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
- mError("failed to write file:%s since %s", curfile, tstrerror(code));
+ mError("failed to write sdb file:%s since %s", curfile, tstrerror(code));
}
}
if (code != 0) {
- mError("failed to write file:%s since %s", curfile, tstrerror(code));
+ mError("failed to write sdb file:%s since %s", curfile, tstrerror(code));
} else {
pSdb->lastCommitVer = pSdb->curVer;
- mDebug("write file:%s successfully, ver:%" PRId64 " term:%" PRId64, curfile, pSdb->lastCommitVer, pSdb->curTerm);
+ pSdb->lastCommitTerm = pSdb->curTerm;
+ mDebug("write sdb file successfully, ver:%" PRId64 " term:%" PRId64 " file:%s", pSdb->lastCommitVer,
+ pSdb->lastCommitTerm, curfile);
}
terrno = code;
@@ -378,17 +430,187 @@ int32_t sdbWriteFile(SSdb *pSdb) {
return 0;
}
- return sdbWriteFileImp(pSdb);
+ taosThreadMutexLock(&pSdb->filelock);
+ int32_t code = sdbWriteFileImp(pSdb);
+ if (code != 0) {
+ mError("failed to write sdb file since %s", terrstr());
+ }
+ taosThreadMutexUnlock(&pSdb->filelock);
+ return code;
}
int32_t sdbDeploy(SSdb *pSdb) {
- if (sdbRunDeployFp(pSdb) != 0) {
+ if (sdbDeployData(pSdb) != 0) {
return -1;
}
- if (sdbWriteFileImp(pSdb) != 0) {
+ if (sdbWriteFile(pSdb) != 0) {
return -1;
}
return 0;
}
+
+static SSdbIter *sdbCreateIter(SSdb *pSdb) {
+ SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter));
+ if (pIter == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ char name[PATH_MAX + 100] = {0};
+ snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter);
+ pIter->name = strdup(name);
+ if (pIter->name == NULL) {
+ taosMemoryFree(pIter);
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ return pIter;
+}
+
+static void sdbCloseIter(SSdbIter *pIter) {
+ if (pIter == NULL) return;
+
+ if (pIter->file != NULL) {
+ taosCloseFile(&pIter->file);
+ pIter->file = NULL;
+ }
+
+ if (pIter->name != NULL) {
+ taosRemoveFile(pIter->name);
+ taosMemoryFree(pIter->name);
+ pIter->name = NULL;
+ }
+
+ mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
+ taosMemoryFree(pIter);
+}
+
+int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
+ SSdbIter *pIter = sdbCreateIter(pSdb);
+ if (pIter == NULL) return -1;
+
+ char datafile[PATH_MAX] = {0};
+ snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
+
+ taosThreadMutexLock(&pSdb->filelock);
+ if (taosCopyFile(datafile, pIter->name) < 0) {
+ taosThreadMutexUnlock(&pSdb->filelock);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to copy sdb file %s to %s since %s", datafile, pIter->name, terrstr());
+ sdbCloseIter(pIter);
+ return -1;
+ }
+ taosThreadMutexUnlock(&pSdb->filelock);
+
+ pIter->file = taosOpenFile(pIter->name, TD_FILE_READ);
+ if (pIter->file == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to open sdb file:%s since %s", pIter->name, terrstr());
+ sdbCloseIter(pIter);
+ return -1;
+ }
+
+ *ppIter = pIter;
+ mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name);
+ return 0;
+}
+
+int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) {
+ sdbCloseIter(pIter);
+ return 0;
+}
+
+int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
+ int32_t maxlen = 100;
+ void *pBuf = taosMemoryCalloc(1, maxlen);
+ if (pBuf == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen);
+ if (readlen < 0 || readlen > maxlen) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total);
+ *ppBuf = NULL;
+ *len = 0;
+ taosMemoryFree(pBuf);
+ return -1;
+ } else if (readlen == 0) {
+ mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
+ *ppBuf = NULL;
+ *len = 0;
+ taosMemoryFree(pBuf);
+ return 0;
+ } else { // (readlen <= maxlen)
+ pIter->total += readlen;
+ mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
+ *ppBuf = pBuf;
+ *len = readlen;
+ return 0;
+ }
+}
+
+int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
+ SSdbIter *pIter = sdbCreateIter(pSdb);
+ if (pIter == NULL) return -1;
+
+ pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
+ if (pIter->file == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to open %s since %s", pIter->name, terrstr());
+ return -1;
+ }
+
+ *ppIter = pIter;
+ mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
+ return 0;
+}
+
+int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) {
+ int32_t code = 0;
+
+ if (!isApply) {
+ sdbCloseIter(pIter);
+ mInfo("sdbiter:%p, not apply to sdb", pIter);
+ return 0;
+ }
+
+ taosFsyncFile(pIter->file);
+ taosCloseFile(&pIter->file);
+ pIter->file = NULL;
+
+ char datafile[PATH_MAX] = {0};
+ snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
+ if (taosRenameFile(pIter->name, datafile) != 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr());
+ sdbCloseIter(pIter);
+ return -1;
+ }
+
+ sdbCloseIter(pIter);
+ if (sdbReadFile(pSdb) != 0) {
+ mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
+ return -1;
+ }
+
+ mInfo("sdbiter:%p, successfully applyed to sdb", pIter);
+ return 0;
+}
+
+int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
+ int32_t writelen = taosWriteFile(pIter->file, pBuf, len);
+ if (writelen != len) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total);
+ return -1;
+ }
+
+ pIter->total += writelen;
+ mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
+ return 0;
+}
\ No newline at end of file
diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c
index a25c7a5233d79049e22764717e95f95a1f0f3674..162da2bd0aaa3e2400f14cefa0596b5022e7afbe 100644
--- a/source/dnode/mnode/sdb/src/sdbHash.c
+++ b/source/dnode/mnode/sdb/src/sdbHash.c
@@ -14,7 +14,7 @@
*/
#define _DEFAULT_SOURCE
-#include "sdbInt.h"
+#include "sdb.h"
static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow);
@@ -368,6 +368,34 @@ void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj) {
return ppRow;
}
+void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) {
+ *ppObj = NULL;
+
+ SHashObj *hash = sdbGetHash(pSdb, type);
+ if (hash == NULL) return NULL;
+
+ TdThreadRwlock *pLock = &pSdb->locks[type];
+ taosThreadRwlockRdlock(pLock);
+
+ SSdbRow **ppRow = taosHashIterate(hash, pIter);
+ while (ppRow != NULL) {
+ SSdbRow *pRow = *ppRow;
+ if (pRow == NULL) {
+ ppRow = taosHashIterate(hash, ppRow);
+ continue;
+ }
+
+ atomic_add_fetch_32(&pRow->refCount, 1);
+ sdbPrintOper(pSdb, pRow, "fetch");
+ *ppObj = pRow->pObj;
+ *status = pRow->status;
+ break;
+ }
+ taosThreadRwlockUnlock(pLock);
+
+ return ppRow;
+}
+
void sdbCancelFetch(SSdb *pSdb, void *pIter) {
if (pIter == NULL) return;
SSdbRow *pRow = *(SSdbRow **)pIter;
diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c
index fd2f20c242bff4bf96fc1289b3996be9d87462af..90643a54a9de42d4f505fdcb4f1d25ef95b80ac7 100644
--- a/source/dnode/mnode/sdb/src/sdbRaw.c
+++ b/source/dnode/mnode/sdb/src/sdbRaw.c
@@ -14,7 +14,17 @@
*/
#define _DEFAULT_SOURCE
-#include "sdbInt.h"
+#include "sdb.h"
+
+int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw) {
+ EKeyType keytype = pSdb->keyTypes[pRaw->type];
+ if (keytype == SDB_KEY_INT32) {
+ int32_t id = *((int32_t *)(pRaw->pData));
+ return id;
+ } else {
+ return -2;
+ }
+}
SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw));
diff --git a/source/dnode/mnode/sdb/src/sdbRow.c b/source/dnode/mnode/sdb/src/sdbRow.c
index 43f70cb2453358bf115cc44e65d13a5728c9160f..e57a6b028bf9b134c771e2cf82724951a8c87217 100644
--- a/source/dnode/mnode/sdb/src/sdbRow.c
+++ b/source/dnode/mnode/sdb/src/sdbRow.c
@@ -14,7 +14,7 @@
*/
#define _DEFAULT_SOURCE
-#include "sdbInt.h"
+#include "sdb.h"
SSdbRow *sdbAllocRow(int32_t objSize) {
SSdbRow *pRow = taosMemoryCalloc(1, objSize + sizeof(SSdbRow));
diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c
index 929643fcdf91ef7ba0d6a02b8a07de34f0209d54..438982ac6ae2ca13f2244acd978bdc58c723d6de 100644
--- a/source/dnode/qnode/src/qnode.c
+++ b/source/dnode/qnode/src/qnode.c
@@ -40,46 +40,58 @@ void qndClose(SQnode *pQnode) {
taosMemoryFree(pQnode);
}
-int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; }
+int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) {
+ SReadHandle handle = {.pMsgCb = &pQnode->msgCb};
+ SQWorkerStat stat = {0};
+
+ int32_t code = qWorkerGetStat(&handle, pQnode->pQuery, &stat);
+ if (code) {
+ return code;
+ }
+
+ pLoad->numOfQueryInQueue = stat.numOfQueryInQueue;
+ pLoad->numOfFetchInQueue = stat.numOfFetchInQueue;
+ pLoad->timeInQueryQueue = stat.timeInQueryQueue;
+ pLoad->timeInFetchQueue = stat.timeInFetchQueue;
+ pLoad->cacheDataSize = stat.cacheDataSize;
+ pLoad->numOfProcessedQuery = stat.queryProcessed;
+ pLoad->numOfProcessedCQuery = stat.cqueryProcessed;
+ pLoad->numOfProcessedFetch = stat.fetchProcessed;
+ pLoad->numOfProcessedDrop = stat.dropProcessed;
+ pLoad->numOfProcessedHb = stat.hbProcessed;
+
+ return 0;
+}
-int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) {
+int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) {
int32_t code = -1;
SReadHandle handle = {.pMsgCb = &pQnode->msgCb};
qTrace("message in qnode queue is processing");
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg);
+ code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_QUERY_CONTINUE:
- code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg);
+ code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH:
- code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH_RSP:
- code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg);
- break;
- case TDMT_VND_RES_READY:
- code = qWorkerProcessReadyMsg(pQnode, pQnode->pQuery, pMsg);
- break;
- case TDMT_VND_TASKS_STATUS:
- code = qWorkerProcessStatusMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CANCEL_TASK:
- code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_DROP_TASK:
- code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
- case TDMT_VND_TABLE_META:
- // code = vnodeGetTableMeta(pQnode, pMsg);
- // break;
case TDMT_VND_CONSUME:
// code = tqProcessConsumeReq(pQnode->pTq, pMsg);
// break;
case TDMT_VND_QUERY_HEARTBEAT:
- code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
default:
qError("unknown msg type:%d in qnode queue", pMsg->msgType);
diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c
index ec75ffcae1b2b00bf2881b1fbf6e32c5d4f8a481..37b406466df5e229833d8271c81cdfeb28084a80 100644
--- a/source/dnode/snode/src/snode.c
+++ b/source/dnode/snode/src/snode.c
@@ -103,8 +103,8 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) {
tDecoderClear(&decoder);
sndMetaDeployTask(pSnode->pMeta, pTask);
- } else if (pMsg->msgType == TDMT_SND_TASK_EXEC) {
- sndProcessTaskExecReq(pSnode, pMsg);
+ /*} else if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/
+ /*sndProcessTaskExecReq(pSnode, pMsg);*/
} else {
ASSERT(0);
}
@@ -112,9 +112,9 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) {
void sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) {
// operator exec
- if (pMsg->msgType == TDMT_SND_TASK_EXEC) {
- sndProcessTaskExecReq(pSnode, pMsg);
- } else {
- ASSERT(0);
- }
+ /*if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/
+ /*sndProcessTaskExecReq(pSnode, pMsg);*/
+ /*} else {*/
+ ASSERT(0);
+ /*}*/
}
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index 4141485d28baf839a222676b4e9eb50286156280..05b4a270d6d6b7bdbf524c8c344b909a64a8c852 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -13,6 +13,8 @@ target_sources(
"src/vnd/vnodeModule.c"
"src/vnd/vnodeSvr.c"
"src/vnd/vnodeSync.c"
+ "src/vnd/vnodeSnapshot.c"
+ "src/vnd/vnodeUtil.c"
# meta
"src/meta/metaOpen.c"
@@ -22,6 +24,7 @@ target_sources(
"src/meta/metaQuery.c"
"src/meta/metaCommit.c"
"src/meta/metaEntry.c"
+ "src/meta/metaSnapshot.c"
# sma
"src/sma/sma.c"
@@ -29,10 +32,9 @@ target_sources(
"src/sma/smaEnv.c"
"src/sma/smaOpen.c"
"src/sma/smaRollup.c"
- "src/sma/smaTimeRange.c"
+ "src/sma/smaTimeRange2.c"
# tsdb
- # "src/tsdb/tsdbTDBImpl.c"
"src/tsdb/tsdbCommit.c"
"src/tsdb/tsdbCommit2.c"
"src/tsdb/tsdbFile.c"
@@ -42,15 +44,18 @@ target_sources(
"src/tsdb/tsdbMemTable2.c"
"src/tsdb/tsdbRead.c"
"src/tsdb/tsdbReadImpl.c"
- # "src/tsdb/tsdbSma.c"
"src/tsdb/tsdbWrite.c"
+ "src/tsdb/tsdbSnapshot.c"
# tq
"src/tq/tq.c"
- "src/tq/tqCommit.c"
+ "src/tq/tqExec.c"
+ "src/tq/tqMeta.c"
+ "src/tq/tqRead.c"
"src/tq/tqOffset.c"
"src/tq/tqPush.c"
- "src/tq/tqRead.c"
+ "src/tq/tqSink.c"
+ "src/tq/tqCommit.c"
)
target_include_directories(
vnode
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 9e33973c05de18139b642d1af2e854f2f6dc712c..35b2b935467507e7b5662a1493fec2c38805abc2 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -39,9 +39,10 @@ extern "C" {
#endif
// vnode
-typedef struct SVnode SVnode;
-typedef struct STsdbCfg STsdbCfg; // todo: remove
-typedef struct SVnodeCfg SVnodeCfg;
+typedef struct SVnode SVnode;
+typedef struct STsdbCfg STsdbCfg; // todo: remove
+typedef struct SVnodeCfg SVnodeCfg;
+typedef struct SVSnapshotReader SVSnapshotReader;
extern const SVnodeCfg vnodeCfgDefault;
@@ -59,13 +60,15 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
-
int32_t vnodeStart(SVnode *pVnode);
void vnodeStop(SVnode *pVnode);
-
int64_t vnodeGetSyncHandle(SVnode *pVnode);
void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
+int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever);
+int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader);
+int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData);
+int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
// meta
typedef struct SMeta SMeta; // todo: remove
@@ -76,7 +79,19 @@ void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderClear(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaReadNext(SMetaReader *pReader);
-const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid);
+const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal);
+
+typedef struct SMetaFltParam {
+ tb_uid_t suid;
+ int16_t cid;
+ int16_t type;
+ char *val;
+ bool reverse;
+ int (*filterFunc)(void *a, void *b, int16_t type);
+
+} SMetaFltParam;
+
+int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *results);
#if 1 // refact APIs below (TODO)
typedef SVCreateTbReq STbCfg;
@@ -97,24 +112,22 @@ typedef void *tsdbReaderT;
#define BLOCK_LOAD_TABLE_SEQ_ORDER 2
#define BLOCK_LOAD_TABLE_RR_ORDER 3
-tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId,
+tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *tableInfoGroup, uint64_t qId,
uint64_t taskId);
-tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId,
+tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId,
void *pMemRef);
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
-int32_t tsdbQuerySTableByTagCond(void *pMeta, uint64_t uid, TSKEY skey, const char *pTagCond, size_t len,
- int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupInfo,
- SColIndex *pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId);
+int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list);
+int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list);
+void *tsdbGetIdx(SMeta *pMeta);
int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle);
-bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
-void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
+
+bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
+void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave);
SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList);
-void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond);
-void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
-int32_t tsdbGetOneTableGroup(void *pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo);
-int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo);
+void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx);
void tsdbCleanupReadHandle(tsdbReaderT queryHandle);
// tq
@@ -134,6 +147,9 @@ bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids
int32_t tqRetrieveDataBlock(SArray **ppCols, STqReadHandle *pHandle, uint64_t *pGroupId, uint64_t *pUid,
int32_t *pNumOfRows, int16_t *pNumOfCols);
+// sma
+int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
+
// need to reposition
// structs
@@ -160,12 +176,15 @@ struct SVnodeCfg {
uint64_t szBuf;
bool isHeap;
bool isWeak;
+ int8_t isTsma;
+ int8_t isRsma;
+ int8_t hashMethod;
+ int8_t standby;
STsdbCfg tsdbCfg;
SWalCfg walCfg;
SSyncCfg syncCfg;
uint32_t hashBegin;
uint32_t hashEnd;
- int8_t hashMethod;
};
typedef struct {
@@ -180,7 +199,7 @@ struct SMetaEntry {
char *name;
union {
struct {
- SSchemaWrapper schema;
+ SSchemaWrapper schemaRow;
SSchemaWrapper schemaTag;
} stbEntry;
struct {
@@ -193,7 +212,7 @@ struct SMetaEntry {
int64_t ctime;
int32_t ttlDays;
int32_t ncid; // next column id
- SSchemaWrapper schema;
+ SSchemaWrapper schemaRow;
} ntbEntry;
struct {
STSma *tsma;
diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h
index 3340bbb91ce0f8ed29b2ef48fc325472676b56e1..b610676c19db7b9cdb9528b3d8044e883d811780 100644
--- a/source/dnode/vnode/src/inc/meta.h
+++ b/source/dnode/vnode/src/inc/meta.h
@@ -16,8 +16,8 @@
#ifndef _TD_VNODE_META_H_
#define _TD_VNODE_META_H_
-#include "vnodeInt.h"
#include "index.h"
+#include "vnodeInt.h"
#ifdef __cplusplus
extern "C" {
@@ -28,12 +28,12 @@ typedef struct SMetaDB SMetaDB;
// metaDebug ==================
// clang-format off
-#define metaFatal(...) do { if (metaDebugFlag & DEBUG_FATAL) { taosPrintLog("META FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
-#define metaError(...) do { if (metaDebugFlag & DEBUG_ERROR) { taosPrintLog("META ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
-#define metaWarn(...) do { if (metaDebugFlag & DEBUG_WARN) { taosPrintLog("META WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
-#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("META ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
-#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("META ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0)
-#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("META ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0)
+#define metaFatal(...) do { if (metaDebugFlag & DEBUG_FATAL) { taosPrintLog("MTA FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
+#define metaError(...) do { if (metaDebugFlag & DEBUG_ERROR) { taosPrintLog("MTA ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
+#define metaWarn(...) do { if (metaDebugFlag & DEBUG_WARN) { taosPrintLog("MTA WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
+#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("MTA ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
+#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("MTA ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0)
+#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("MTA ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
// metaOpen ==================
@@ -45,8 +45,6 @@ int32_t metaULock(SMeta* pMeta);
int metaEncodeEntry(SEncoder* pCoder, const SMetaEntry* pME);
int metaDecodeEntry(SDecoder* pCoder, SMetaEntry* pME);
-// metaTable ==================
-
// metaQuery ==================
int metaGetTableEntryByVersion(SMetaReader* pReader, int64_t version, tb_uid_t uid);
@@ -118,6 +116,10 @@ typedef struct {
int64_t smaUid;
} SSmaIdxKey;
+// metaTable ==================
+int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void* pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
+ STagIdxKey** ppTagIdxKey, int32_t* nTagIdxKey);
+
#ifndef META_REFACT
// SMetaDB
int metaOpenDB(SMeta* pMeta);
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index 0601df61e71317aed596d6f200cb8314156430f5..4ca62f1de9fbe9183d74e9df1dfeca8fbde2e0fb 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -219,12 +219,14 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SDisk
void *tdFreeRSmaInfo(SRSmaInfo *pInfo);
int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg);
-int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version);
+int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version);
// TODO: This is the basic params, and should wrap the params to a queryHandle.
int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult);
+int32_t tdGetTSmaDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
+
#ifdef __cplusplus
}
#endif
-#endif /*_TD_VNODE_SMA_H_*/
\ No newline at end of file
+#endif /*_TD_VNODE_SMA_H_*/
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index ad3f8cc869fe61bc2a265d630863b2bd481c3de7..7cd82b0ac37446fa413715150210dbf951485006 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -44,59 +44,97 @@ extern "C" {
typedef struct STqOffsetCfg STqOffsetCfg;
typedef struct STqOffsetStore STqOffsetStore;
+// tqRead
+
struct STqReadHandle {
int64_t ver;
- SHashObj* tbIdHash;
const SSubmitReq* pMsg;
SSubmitBlk* pBlock;
SSubmitMsgIter msgIter;
SSubmitBlkIter blkIter;
- SMeta* pVnodeMeta;
- SArray* pColIdList; // SArray
- int32_t sver;
- int64_t cachedSchemaUid;
- SSchemaWrapper* pSchemaWrapper;
- STSchema* pSchema;
+
+ SMeta* pVnodeMeta;
+ SHashObj* tbIdHash;
+ SArray* pColIdList; // SArray
+
+ int32_t cachedSchemaVer;
+ int64_t cachedSchemaUid;
+ SSchemaWrapper* pSchemaWrapper;
+ STSchema* pSchema;
};
+// tqPush
+
typedef struct {
- int64_t consumerId;
- int32_t epoch;
- int32_t skipLogNum;
- int64_t reqOffset;
+ // msg info
+ int64_t consumerId;
+ int64_t reqOffset;
+ int64_t processedVer;
+ int32_t epoch;
+ int32_t skipLogNum;
+ // rpc info
+ int64_t reqId;
+ SRpcHandleInfo rpcInfo;
+ tmr_h timerId;
+ int8_t tmrStopped;
+ // exec
+ int8_t inputStatus;
+ int8_t execStatus;
+ SStreamQ inputQ;
SRWLatch lock;
- SRpcMsg* handle;
} STqPushHandle;
+// tqExec
+
typedef struct {
- char subKey[TSDB_SUBSCRIBE_KEY_LEN];
- int64_t consumerId;
- int32_t epoch;
- int8_t subType;
- int8_t withTbName;
- int8_t withSchema;
- int8_t withTag;
- char* qmsg;
- SHashObj* pDropTbUid;
- STqPushHandle pushHandle;
- // SRWLatch lock;
- SWalReadHandle* pWalReader;
- // task number should be the same with fetch thread
+ char* qmsg;
+ qTaskInfo_t task[5];
+} STqExecCol;
+
+typedef struct {
+ int64_t suid;
+} STqExecTb;
+
+typedef struct {
+ SHashObj* pFilterOutTbUid;
+} STqExecDb;
+
+typedef struct {
+ int8_t subType;
+
STqReadHandle* pExecReader[5];
- qTaskInfo_t task[5];
-} STqExec;
+ union {
+ STqExecCol execCol;
+ STqExecTb execTb;
+ STqExecDb execDb;
+ } exec;
+} STqExecHandle;
+
+typedef struct {
+ // info
+ char subKey[TSDB_SUBSCRIBE_KEY_LEN];
+ int64_t consumerId;
+ int32_t epoch;
+
+ // reader
+ SWalReadHandle* pWalReader;
-int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec);
-int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec);
+ // push
+ STqPushHandle pushHandle;
+
+ // exec
+ STqExecHandle execHandle;
+} STqHandle;
struct STQ {
char* path;
- SHashObj* pushMgr; // consumerId -> STqExec*
- SHashObj* execs; // subKey -> STqExec
- SHashObj* pStreamTasks;
+ SHashObj* pushMgr; // consumerId -> STqHandle*
+ SHashObj* handles; // subKey -> STqHandle
+ SHashObj* pStreamTasks; // taksId -> SStreamTask
SVnode* pVnode;
SWal* pWal;
- TDB* pTdb;
+ TDB* pMetaStore;
+ TTB* pExecStore;
};
typedef struct {
@@ -106,18 +144,29 @@ typedef struct {
static STqMgmt tqMgmt = {0};
-// init once
-int tqInit();
-void tqCleanUp();
+// tqRead
+int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** pHeadWithCkSum);
-// tqOffset
-STqOffsetStore* STqOffsetOpen(STqOffsetCfg*);
-void STqOffsetClose(STqOffsetStore*);
+// tqExec
+int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId);
+int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp);
-int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey);
-int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset);
-int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey);
-int32_t tqOffsetPersistAll(STqOffsetStore* pStore);
+// tqMeta
+int32_t tqMetaOpen(STQ* pTq);
+int32_t tqMetaClose(STQ* pTq);
+int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle);
+int32_t tqMetaDeleteHandle(STQ* pTq, const char* key);
+
+// tqSink
+void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
+
+// tqOffset
+STqOffsetStore* tqOffsetOpen(STqOffsetCfg*);
+void tqOffsetClose(STqOffsetStore*);
+int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey);
+int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset);
+int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey);
+int32_t tqOffsetPersistAll(STqOffsetStore* pStore);
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index 1195f9e2b397c00e4d02ead5db574d2d8252f1f9..a62b4c4409ae0a4561c5150e4d6bd669698e666c 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -32,16 +32,29 @@ extern "C" {
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSDB ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
+typedef struct TSDBROW TSDBROW;
+typedef struct TSDBKEY TSDBKEY;
+typedef struct SDelOp SDelOp;
+
+static int tsdbKeyCmprFn(const void *p1, const void *p2);
+
+// tsdbMemTable2.c ==============================================================================================
+typedef struct SMemTable SMemTable;
+
+int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTable);
+void tsdbMemTableDestroy2(SMemTable *pMemTable);
+
// tsdbMemTable ================
+typedef struct STsdbRow STsdbRow;
typedef struct STbData STbData;
typedef struct STsdbMemTable STsdbMemTable;
typedef struct SMergeInfo SMergeInfo;
typedef struct STable STable;
int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable);
-void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable);
-int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols,
- TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo);
+void tsdbMemTableDestroy(STsdbMemTable *pMemTable);
+int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead,
+ SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo);
// tsdbCommit ================
@@ -79,13 +92,14 @@ struct STsdb {
struct STable {
uint64_t tid;
uint64_t uid;
- STSchema *pSchema;
+ STSchema *pSchema; // latest schema
+ STSchema *pCacheSchema; // cached cache
};
#define TABLE_TID(t) (t)->tid
#define TABLE_UID(t) (t)->uid
-int tsdbPrepareCommit(STsdb *pTsdb);
+int tsdbPrepareCommit(STsdb *pTsdb);
typedef enum {
TSDB_FILE_HEAD = 0, // .head
TSDB_FILE_DATA, // .data
@@ -179,8 +193,17 @@ struct STsdbFS {
int tsdbLockRepo(STsdb *pTsdb);
int tsdbUnlockRepo(STsdb *pTsdb);
-static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STable *pTable, bool lock, bool copy, int32_t version) {
- return pTable->pSchema;
+static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy,
+ int32_t version) {
+ if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) {
+ return pTable->pSchema;
+ }
+
+ if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) {
+ taosMemoryFreeClear(pTable->pCacheSchema);
+ pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version);
+ }
+ return pTable->pCacheSchema;
}
// tsdbMemTable.h
@@ -835,6 +858,42 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS *pFs) {
return 0;
}
+struct TSDBROW {
+ int64_t version;
+ STSRow2 tsRow;
+};
+
+struct TSDBKEY {
+ int64_t version;
+ TSKEY ts;
+};
+
+struct SDelOp {
+ int64_t version;
+ TSKEY sKey; // included
+ TSKEY eKey; // included
+ SDelOp *pNext;
+};
+
+static FORCE_INLINE int tsdbKeyCmprFn(const void *p1, const void *p2) {
+ TSDBKEY *pKey1 = (TSDBKEY *)p1;
+ TSDBKEY *pKey2 = (TSDBKEY *)p2;
+
+ if (pKey1->ts < pKey2->ts) {
+ return -1;
+ } else if (pKey1->ts > pKey2->ts) {
+ return 1;
+ }
+
+ if (pKey1->version < pKey2->version) {
+ return -1;
+ } else if (pKey1->version > pKey2->version) {
+ return 1;
+ }
+
+ return 0;
+}
+
#endif
#ifdef __cplusplus
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index eb3382ac4cd46a602a214b09b5a8debeaf15087f..5f4f7e70daf089d22fa4e80978e787d10dc08c09 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -81,9 +81,10 @@ int32_t vnodeSyncCommit(SVnode* pVnode);
int32_t vnodeAsyncCommit(SVnode* pVnode);
// vnodeSync.c
-int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
-void vnodeSyncStart(SVnode* pVnode);
-void vnodeSyncClose(SVnode* pVnode);
+int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
+void vnodeSyncStart(SVnode* pVnode);
+void vnodeSyncClose(SVnode* pVnode);
+int32_t vnodeSyncAlter(SVnode* pVnode, SRpcMsg* pMsg);
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 24b3f458b1962089e05d8eadcf2b23b177956109..d3b5f29aac9e1ed96d8fe3569065ecaa8a1a1706 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -47,15 +47,17 @@
extern "C" {
#endif
-typedef struct SVnodeInfo SVnodeInfo;
-typedef struct SMeta SMeta;
-typedef struct SSma SSma;
-typedef struct STsdb STsdb;
-typedef struct STQ STQ;
-typedef struct SVState SVState;
-typedef struct SVBufPool SVBufPool;
-typedef struct SQWorker SQHandle;
-typedef struct STsdbKeepCfg STsdbKeepCfg;
+typedef struct SVnodeInfo SVnodeInfo;
+typedef struct SMeta SMeta;
+typedef struct SSma SSma;
+typedef struct STsdb STsdb;
+typedef struct STQ STQ;
+typedef struct SVState SVState;
+typedef struct SVBufPool SVBufPool;
+typedef struct SQWorker SQHandle;
+typedef struct STsdbKeepCfg STsdbKeepCfg;
+typedef struct SMetaSnapshotReader SMetaSnapshotReader;
+typedef struct STsdbSnapshotReader STsdbSnapshotReader;
#define VNODE_META_DIR "meta"
#define VNODE_TSDB_DIR "tsdb"
@@ -67,8 +69,10 @@ typedef struct STsdbKeepCfg STsdbKeepCfg;
#define VNODE_RSMA2_DIR "rsma2"
// vnd.h
-void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
-void vnodeBufPoolFree(SVBufPool* pPool, void* p);
+void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
+void vnodeBufPoolFree(SVBufPool* pPool, void* p);
+int32_t vnodeRealloc(void** pp, int32_t size);
+void vnodeFree(void* p);
// meta
typedef struct SMCtbCursor SMCtbCursor;
@@ -83,10 +87,11 @@ int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* p
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq);
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
-int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq);
+int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp *pMetaRsp);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline);
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver);
int metaGetTableEntryByName(SMetaReader* pReader, const char* name);
+tb_uid_t metaGetTableEntryUidByName(SMeta* pMeta, const char* name);
int metaGetTbNum(SMeta* pMeta);
SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid);
void metaCloseCtbCursor(SMCtbCursor* pCtbCur);
@@ -95,6 +100,10 @@ STSma* metaGetSmaInfoByIndex(SMeta* pMeta, int64_t indexUid);
STSmaWrapper* metaGetSmaInfoByTable(SMeta* pMeta, tb_uid_t uid, bool deepCopy);
SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid);
SArray* metaGetSmaTbUids(SMeta* pMeta);
+int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever);
+int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader);
+int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData);
+void* metaGetIdx(SMeta* pMeta);
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
@@ -107,13 +116,17 @@ int tsdbCommit(STsdb* pTsdb);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
int tsdbInsertTableData(STsdb* pTsdb, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkRsp* pRsp);
-tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
+tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId,
uint64_t taskId);
-tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
+tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId,
void* pMemRef);
-int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo);
+int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever);
+int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader);
+int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData);
// tq
+int tqInit();
+void tqCleanUp();
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
@@ -134,11 +147,11 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t smaOpen(SVnode* pVnode);
int32_t smaClose(SSma* pSma);
-int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version);
+int32_t tdUpdateExpireWindow(SSma* pSma, const SSubmitReq* pMsg, int64_t version);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
-int32_t tdProcessRSmaCreate(SSma* pSma, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb);
+int32_t tdProcessRSmaCreate(SVnode* pVnode, SVCreateStbReq* pReq);
int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore);
@@ -228,6 +241,8 @@ struct SVnode {
#define VND_RSMA1(vnd) ((vnd)->pSma->pRSmaTsdb1)
#define VND_RSMA2(vnd) ((vnd)->pSma->pRSmaTsdb2)
#define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions)
+#define VND_IS_RSMA(v) ((v)->config.isRsma == 1)
+#define VND_IS_TSMA(v) ((v)->config.isTsma == 1)
struct STbUidStore {
tb_uid_t suid;
@@ -260,11 +275,6 @@ struct SSma {
#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb1)
#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb2)
-static FORCE_INLINE bool vnodeIsRollup(SVnode* pVnode) {
- SRetention* pRetention = &(pVnode->config.tsdbCfg.retentions[0]);
- return (pRetention->freq > 0 && pRetention->keep > 0);
-}
-
// sma
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c
index 8a4db3100d31695af2889367088c9a0e16bb6236..db99257ea707d68858887d34cdc29077e099eec3 100644
--- a/source/dnode/vnode/src/meta/metaEntry.c
+++ b/source/dnode/vnode/src/meta/metaEntry.c
@@ -24,18 +24,19 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
if (tEncodeCStr(pCoder, pME->name) < 0) return -1;
if (pME->type == TSDB_SUPER_TABLE) {
- if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schema) < 0) return -1;
+ if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaRow) < 0) return -1;
if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
} else if (pME->type == TSDB_CHILD_TABLE) {
if (tEncodeI64(pCoder, pME->ctbEntry.ctime) < 0) return -1;
if (tEncodeI32(pCoder, pME->ctbEntry.ttlDays) < 0) return -1;
if (tEncodeI64(pCoder, pME->ctbEntry.suid) < 0) return -1;
- if (tEncodeBinary(pCoder, pME->ctbEntry.pTags, kvRowLen(pME->ctbEntry.pTags)) < 0) return -1;
+ debugCheckTags((STag*)pME->ctbEntry.pTags); // TODO: remove after debug
+ if (tEncodeTag(pCoder, (const STag *)pME->ctbEntry.pTags) < 0) return -1;
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1;
if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1;
if (tEncodeI32v(pCoder, pME->ntbEntry.ncid) < 0) return -1;
- if (tEncodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1;
+ if (tEncodeSSchemaWrapper(pCoder, &pME->ntbEntry.schemaRow) < 0) return -1;
} else if (pME->type == TSDB_TSMA_TABLE) {
if (tEncodeTSma(pCoder, pME->smaEntry.tsma) < 0) return -1;
} else {
@@ -47,7 +48,6 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
}
int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
- uint32_t len;
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI64(pCoder, &pME->version) < 0) return -1;
@@ -56,18 +56,19 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
if (tDecodeCStr(pCoder, &pME->name) < 0) return -1;
if (pME->type == TSDB_SUPER_TABLE) {
- if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schema) < 0) return -1;
+ if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaRow) < 0) return -1;
if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
} else if (pME->type == TSDB_CHILD_TABLE) {
if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1;
if (tDecodeI64(pCoder, &pME->ctbEntry.suid) < 0) return -1;
- if (tDecodeBinary(pCoder, &pME->ctbEntry.pTags, &len) < 0) return -1; // (TODO)
+ if (tDecodeTag(pCoder, (STag **)&pME->ctbEntry.pTags) < 0) return -1; // (TODO)
+ debugCheckTags((STag*)pME->ctbEntry.pTags); // TODO: remove after debug
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1;
if (tDecodeI32v(pCoder, &pME->ntbEntry.ncid) < 0) return -1;
- if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schema) < 0) return -1;
+ if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schemaRow) < 0) return -1;
} else if (pME->type == TSDB_TSMA_TABLE) {
pME->smaEntry.tsma = tDecoderMalloc(pCoder, sizeof(STSma));
if (!pME->smaEntry.tsma) {
diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c
index f23e7f88056d6a397a5979bda11dd4f080ba0212..86637d28504bf7169c4b3f2ab4ae145af1e10661 100644
--- a/source/dnode/vnode/src/meta/metaOpen.c
+++ b/source/dnode/vnode/src/meta/metaOpen.c
@@ -53,42 +53,42 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
// open env
ret = tdbOpen(pMeta->path, pVnode->config.szPage, pVnode->config.szCache, &pMeta->pEnv);
if (ret < 0) {
- metaError("vgId:%d failed to open meta env since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta env since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pTbDb
ret = tdbTbOpen("table.db", sizeof(STbDbKey), -1, tbDbKeyCmpr, pMeta->pEnv, &pMeta->pTbDb);
if (ret < 0) {
- metaError("vgId:%d failed to open meta table db since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta table db since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pSkmDb
ret = tdbTbOpen("schema.db", sizeof(SSkmDbKey), -1, skmDbKeyCmpr, pMeta->pEnv, &pMeta->pSkmDb);
if (ret < 0) {
- metaError("vgId:%d failed to open meta schema db since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta schema db since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pUidIdx
ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(int64_t), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pNameIdx
ret = tdbTbOpen("name.idx", -1, sizeof(tb_uid_t), NULL, pMeta->pEnv, &pMeta->pNameIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta name index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta name index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pCtbIdx
ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
@@ -100,14 +100,14 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
taosMkDir(indexFullPath);
ret = indexOpen(indexOptsCreate(), indexFullPath, (SIndex **)&pMeta->pTagIvtIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
#else
ret = tdbTbOpen("tag.idx", -1, 0, tagIdxKeyCmpr, pMeta->pEnv, &pMeta->pTagIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
#endif
@@ -115,24 +115,24 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
// open pTtlIdx
ret = tdbTbOpen("ttl.idx", sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pMeta->pEnv, &pMeta->pTtlIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open pSmaIdx
ret = tdbTbOpen("sma.idx", sizeof(SSmaIdxKey), 0, smaIdxKeyCmpr, pMeta->pEnv, &pMeta->pSmaIdx);
if (ret < 0) {
- metaError("vgId:%d failed to open meta sma index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta sma index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open index
if (metaOpenIdx(pMeta) < 0) {
- metaError("vgId:%d failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno));
+ metaError("vgId:%d, failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
- metaDebug("vgId:%d meta is opened", TD_VID(pVnode));
+ metaDebug("vgId:%d, meta is opened", TD_VID(pVnode));
*ppMeta = pMeta;
return 0;
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index c19190e68a6bd54a106a9de1278d8870989864dc..ca31e1b32ca3b1972f05f1c3854767b50ff13e2b 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -31,7 +31,7 @@ void metaReaderClear(SMetaReader *pReader) {
}
int metaGetTableEntryByVersion(SMetaReader *pReader, int64_t version, tb_uid_t uid) {
- SMeta *pMeta = pReader->pMeta;
+ SMeta * pMeta = pReader->pMeta;
STbDbKey tbDbKey = {.version = version, .uid = uid};
// query table.db
@@ -54,7 +54,7 @@ _err:
}
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
- SMeta *pMeta = pReader->pMeta;
+ SMeta * pMeta = pReader->pMeta;
int64_t version;
// query uid.idx
@@ -68,7 +68,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
}
int metaGetTableEntryByName(SMetaReader *pReader, const char *name) {
- SMeta *pMeta = pReader->pMeta;
+ SMeta * pMeta = pReader->pMeta;
tb_uid_t uid;
// query name.idx
@@ -81,6 +81,19 @@ int metaGetTableEntryByName(SMetaReader *pReader, const char *name) {
return metaGetTableEntryByUid(pReader, uid);
}
+tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
+ void * pData = NULL;
+ int nData = 0;
+ tb_uid_t uid = 0;
+
+ if (tdbTbGet(pMeta->pNameIdx, name, strlen(name) + 1, &pData, &nData) == 0) {
+ uid = *(tb_uid_t *)pData;
+ tdbFree(pData);
+ }
+
+ return 0;
+}
+
int metaReadNext(SMetaReader *pReader) {
SMeta *pMeta = pReader->pMeta;
@@ -121,7 +134,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
int metaTbCursorNext(SMTbCursor *pTbCur) {
int ret;
- void *pBuf;
+ void * pBuf;
STbCfg tbCfg;
for (;;) {
@@ -142,52 +155,61 @@ int metaTbCursorNext(SMTbCursor *pTbCur) {
}
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) {
- void *pKey = NULL;
- void *pVal = NULL;
- int kLen = 0;
- int vLen = 0;
- int ret;
- SSkmDbKey skmDbKey;
- SSchemaWrapper *pSW = NULL;
- SSchema *pSchema = NULL;
- void *pBuf;
- SDecoder coder = {0};
-
- // fetch
- skmDbKey.uid = uid;
- skmDbKey.sver = sver;
- pKey = &skmDbKey;
- kLen = sizeof(skmDbKey);
+ void * pData = NULL;
+ int nData = 0;
+ int64_t version;
+ SSchemaWrapper schema = {0};
+ SSchemaWrapper *pSchema = NULL;
+ SDecoder dc = {0};
+
metaRLock(pMeta);
- ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen);
- metaULock(pMeta);
- if (ret < 0) {
- return NULL;
- }
+ if (sver < 0) {
+ if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) {
+ goto _err;
+ }
- // decode
- pBuf = pVal;
- pSW = taosMemoryMalloc(sizeof(SSchemaWrapper));
+ version = *(int64_t *)pData;
- tDecoderInit(&coder, pVal, vLen);
- tDecodeSSchemaWrapper(&coder, pSW);
- pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols);
- memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols);
- tDecoderClear(&coder);
+ tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData);
- pSW->pSchema = pSchema;
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pData, nData);
+ metaDecodeEntry(&dc, &me);
+ if (me.type == TSDB_SUPER_TABLE) {
+ pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow);
+ } else if (me.type == TSDB_NORMAL_TABLE) {
+ pSchema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow);
+ } else {
+ ASSERT(0);
+ }
+ tDecoderClear(&dc);
+ } else {
+ if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) {
+ goto _err;
+ }
- tdbFree(pVal);
+ tDecoderInit(&dc, pData, nData);
+ tDecodeSSchemaWrapper(&dc, &schema);
+ pSchema = tCloneSSchemaWrapper(&schema);
+ tDecoderClear(&dc);
+ }
- return pSW;
+ metaULock(pMeta);
+ tdbFree(pData);
+ return pSchema;
+
+_err:
+ metaULock(pMeta);
+ tdbFree(pData);
+ return NULL;
}
struct SMCtbCursor {
- SMeta *pMeta;
- TBC *pCur;
+ SMeta * pMeta;
+ TBC * pCur;
tb_uid_t suid;
- void *pKey;
- void *pVal;
+ void * pKey;
+ void * pVal;
int kLen;
int vLen;
};
@@ -259,10 +281,10 @@ tb_uid_t metaCtbCursorNext(SMCtbCursor *pCtbCur) {
STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
tb_uid_t quid;
SMetaReader mr = {0};
- STSchema *pTSchema = NULL;
+ STSchema * pTSchema = NULL;
SSchemaWrapper *pSW = NULL;
STSchemaBuilder sb = {0};
- SSchema *pSchema;
+ SSchema * pSchema;
metaReaderInit(&mr, pMeta, 0);
metaGetTableEntryByUid(&mr, uid);
@@ -278,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
pSW = metaGetTableSchema(pMeta, quid, sver, 0);
if (!pSW) return NULL;
- tdInitTSchemaBuilder(&sb, sver);
+ tdInitTSchemaBuilder(&sb, pSW->version);
for (int i = 0; i < pSW->nCols; i++) {
pSchema = pSW->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
@@ -299,11 +321,11 @@ int metaGetTbNum(SMeta *pMeta) {
}
typedef struct {
- SMeta *pMeta;
- TBC *pCur;
+ SMeta * pMeta;
+ TBC * pCur;
tb_uid_t uid;
- void *pKey;
- void *pVal;
+ void * pKey;
+ void * pVal;
int kLen;
int vLen;
} SMSmaCursor;
@@ -375,7 +397,7 @@ tb_uid_t metaSmaCursorNext(SMSmaCursor *pSmaCur) {
STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
STSmaWrapper *pSW = NULL;
- SArray *pSmaIds = NULL;
+ SArray * pSmaIds = NULL;
if (!(pSmaIds = metaGetSmaIdsByTable(pMeta, uid))) {
return NULL;
@@ -399,11 +421,11 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
metaReaderInit(&mr, pMeta, 0);
int64_t smaId;
int smaIdx = 0;
- STSma *pTSma = NULL;
+ STSma * pTSma = NULL;
for (int i = 0; i < pSW->number; ++i) {
smaId = *(tb_uid_t *)taosArrayGet(pSmaIds, i);
if (metaGetTableEntryByUid(&mr, smaId) < 0) {
- metaWarn("vgId:%d no entry for tbId: %" PRIi64 ", smaId: %" PRIi64, TD_VID(pMeta->pVnode), uid, smaId);
+ metaWarn("vgId:%d, no entry for tbId: %" PRIi64 ", smaId: %" PRIi64, TD_VID(pMeta->pVnode), uid, smaId);
continue;
}
pTSma = pSW->tSma + smaIdx;
@@ -442,16 +464,16 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
_err:
metaReaderClear(&mr);
taosArrayDestroy(pSmaIds);
- tdFreeTSmaWrapper(pSW, deepCopy);
+ tFreeTSmaWrapper(pSW, deepCopy);
return NULL;
}
STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) {
- STSma *pTSma = NULL;
+ STSma * pTSma = NULL;
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
if (metaGetTableEntryByUid(&mr, indexUid) < 0) {
- metaWarn("vgId:%d failed to get table entry for smaId: %" PRIi64, TD_VID(pMeta->pVnode), indexUid);
+ metaWarn("vgId:%d, failed to get table entry for smaId: %" PRIi64, TD_VID(pMeta->pVnode), indexUid);
metaReaderClear(&mr);
return NULL;
}
@@ -469,7 +491,7 @@ STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) {
}
SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
- SArray *pUids = NULL;
+ SArray * pUids = NULL;
SSmaIdxKey *pSmaIdxKey = NULL;
SMSmaCursor *pCur = metaOpenSmaCursor(pMeta, uid);
@@ -507,7 +529,7 @@ SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
}
SArray *metaGetSmaTbUids(SMeta *pMeta) {
- SArray *pUids = NULL;
+ SArray * pUids = NULL;
SSmaIdxKey *pSmaIdxKey = NULL;
tb_uid_t lastUid = 0;
@@ -551,7 +573,101 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
#endif
-const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid) {
+const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
ASSERT(pEntry->type == TSDB_CHILD_TABLE);
- return tdGetKVRowValOfCol((const SKVRow)pEntry->ctbEntry.pTags, cid);
+ STag *tag = (STag *)pEntry->ctbEntry.pTags;
+ if (type == TSDB_DATA_TYPE_JSON){
+ if(tag->nTag == 0){
+ return NULL;
+ }
+ return tag;
+ }
+ bool find = tTagGet(tag, val);
+
+ if(!find){
+ return NULL;
+ }
+ return val;
+}
+
+typedef struct {
+ SMeta * pMeta;
+ TBC * pCur;
+ tb_uid_t suid;
+ int16_t cid;
+ int16_t type;
+ void * pKey;
+ void * pVal;
+ int32_t kLen;
+ int32_t vLen;
+} SIdxCursor;
+
+int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
+ SIdxCursor *pCursor = NULL;
+
+ char *tagData = param->val;
+
+ int32_t ret = 0, valid = 0;
+ pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
+ pCursor->pMeta = pMeta;
+ pCursor->suid = param->suid;
+ pCursor->cid = param->cid;
+ pCursor->type = param->type;
+
+ metaRLock(pMeta);
+ ret = tdbTbcOpen(pMeta->pTagIdx, &pCursor->pCur, NULL);
+ if (ret < 0) {
+ goto END;
+ }
+ STagIdxKey *pKey = NULL;
+ int32_t nKey = 0;
+
+ int32_t nTagData = 0;
+ if(IS_VAR_DATA_TYPE(param->type)){
+ nTagData = strlen(param->val);
+ }else{
+ nTagData = tDataTypes[param->type].bytes;
+ }
+ ret = metaCreateTagIdxKey(pCursor->suid, pCursor->cid, param->val, nTagData, pCursor->type,
+ param->reverse ? INT64_MAX : INT64_MIN, &pKey, &nKey);
+ if (ret != 0) {
+ goto END;
+ }
+ int cmp = 0;
+ if (tdbTbcMoveTo(pCursor->pCur, pKey, nKey, &cmp) < 0) {
+ goto END;
+ }
+ void * entryKey = NULL, *entryVal = NULL;
+ int32_t nEntryKey, nEntryVal;
+ while (1) {
+ valid = tdbTbcGet(pCursor->pCur, (const void **)&entryKey, &nEntryKey, (const void **)&entryVal, &nEntryVal);
+ if (valid < 0) {
+ break;
+ }
+ STagIdxKey *p = entryKey;
+ if (p != NULL) {
+ int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type);
+ if (cmp == 0) {
+ // match
+ tb_uid_t tuid = *(tb_uid_t *)(p->data + tDataTypes[pCursor->type].bytes);
+ taosArrayPush(pUids, &tuid);
+ } else if (cmp == 1) {
+ // not match but should continue to iter
+ } else {
+ // not match and no more result
+ break;
+ }
+ }
+ valid = param->reverse ? tdbTbcMoveToPrev(pCursor->pCur) : tdbTbcMoveToNext(pCursor->pCur);
+ if (valid < 0) {
+ break;
+ }
+ }
+END:
+ if (pCursor->pMeta) metaULock(pCursor->pMeta);
+ if (pCursor->pCur) tdbTbcClose(pCursor->pCur);
+
+ taosMemoryFree(pCursor);
+
+ return ret;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c
index 75595d83a64941e0caf6f2f399345c09a226286e..fde9d30346da782129739592ab3c34bfdb964379 100644
--- a/source/dnode/vnode/src/meta/metaSma.c
+++ b/source/dnode/vnode/src/meta/metaSma.c
@@ -57,12 +57,12 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) {
if (metaHandleSmaEntry(pMeta, &me) < 0) goto _err;
- metaDebug("vgId:%d tsma is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid);
+ metaDebug("vgId:%d, tsma is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid);
return 0;
_err:
- metaError("vgId:%d failed to create tsma: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pCfg->indexName,
+ metaError("vgId:%d, failed to create tsma: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pCfg->indexName,
pCfg->indexUid, tstrerror(terrno));
return -1;
}
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
new file mode 100644
index 0000000000000000000000000000000000000000..5757039d55d410808b4eeb57d2e09286b7939004
--- /dev/null
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "meta.h"
+
+struct SMetaSnapshotReader {
+ SMeta* pMeta;
+ TBC* pTbc;
+ int64_t sver;
+ int64_t ever;
+};
+
+int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever) {
+ int32_t code = 0;
+ int32_t c = 0;
+ SMetaSnapshotReader* pMetaReader = NULL;
+
+ pMetaReader = (SMetaSnapshotReader*)taosMemoryCalloc(1, sizeof(*pMetaReader));
+ if (pMetaReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pMetaReader->pMeta = pMeta;
+ pMetaReader->sver = sver;
+ pMetaReader->ever = ever;
+ code = tdbTbcOpen(pMeta->pTbDb, &pMetaReader->pTbc, NULL);
+ if (code) {
+ goto _err;
+ }
+
+ code = tdbTbcMoveTo(pMetaReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
+ if (code) {
+ goto _err;
+ }
+
+ *ppReader = pMetaReader;
+ return code;
+
+_err:
+ *ppReader = NULL;
+ return code;
+}
+
+int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader) {
+ if (pReader) {
+ tdbTbcClose(pReader->pTbc);
+ taosMemoryFree(pReader);
+ }
+ return 0;
+}
+
+int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nDatap) {
+ const void* pKey = NULL;
+ const void* pData = NULL;
+ int32_t nKey = 0;
+ int32_t nData = 0;
+ int32_t code = 0;
+
+ for (;;) {
+ code = tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData);
+ if (code || ((STbDbKey*)pData)->version > pReader->ever) {
+ return TSDB_CODE_VND_READ_END;
+ }
+
+ if (((STbDbKey*)pData)->version < pReader->sver) {
+ continue;
+ }
+
+ break;
+ }
+
+ // copy the data
+ if (vnodeRealloc(ppData, nData) < 0) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ return code;
+ }
+
+ memcpy(*ppData, pData, nData);
+ *nDatap = nData;
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index a792343380aa5799d0ae303eb5b37ace65419f04..3b88a833958d466e15762648c98012251cfc5796 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -23,6 +23,25 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
+static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
+
+static int metaUpdateMetaRsp(tb_uid_t uid, char* tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) {
+ pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema));
+ if (NULL == pMetaRsp->pSchemas) {
+ terrno = TSDB_CODE_VND_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ strcpy(pMetaRsp->tbName, tbName);
+ pMetaRsp->numOfColumns = pSchema->nCols;
+ pMetaRsp->tableType = TSDB_NORMAL_TABLE;
+ pMetaRsp->sversion = pSchema->version;
+ pMetaRsp->tuid = uid;
+
+ memcpy(pMetaRsp->pSchemas, pSchema->pSchema, pSchema->nCols * sizeof(SSchema));
+
+ return 0;
+}
int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
SMetaEntry me = {0};
@@ -55,80 +74,87 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
me.type = TSDB_SUPER_TABLE;
me.uid = pReq->suid;
me.name = pReq->name;
- me.stbEntry.schema = pReq->schema;
+ me.stbEntry.schemaRow = pReq->schemaRow;
me.stbEntry.schemaTag = pReq->schemaTag;
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
- metaDebug("vgId:%d super table is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
+ metaDebug("vgId:%d, super table is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
return 0;
_err:
- metaError("vgId:%d failed to create super table: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
+ metaError("vgId:%d, failed to create super table: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
pReq->suid, tstrerror(terrno));
return -1;
}
int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
- TBC *pNameIdxc = NULL;
- TBC *pUidIdxc = NULL;
- TBC *pCtbIdxc = NULL;
- SCtbIdxKey *pCtbIdxKey;
- const void *pKey = NULL;
- int nKey;
- const void *pData = NULL;
- int nData;
- int c, ret;
-
- // prepare uid idx cursor
- tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
- ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
- if (ret < 0 || c != 0) {
- terrno = TSDB_CODE_VND_TB_NOT_EXIST;
- tdbTbcClose(pUidIdxc);
- goto _err;
- }
-
- // prepare name idx cursor
- tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn);
- ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c);
- if (ret < 0 || c != 0) {
- ASSERT(0);
+ void *pKey = NULL;
+ int nKey = 0;
+ void *pData = NULL;
+ int nData = 0;
+ int c = 0;
+ int rc = 0;
+
+ // check if super table exists
+ rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
+ if (rc < 0 || *(tb_uid_t *)pData != pReq->suid) {
+ terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
+ return -1;
}
- tdbTbcDelete(pUidIdxc);
- tdbTbcDelete(pNameIdxc);
- tdbTbcClose(pUidIdxc);
- tdbTbcClose(pNameIdxc);
+ // drop all child tables
+ TBC *pCtbIdxc = NULL;
+ SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t));
- // loop to drop each child table
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
- ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
- if (ret < 0 || (c < 0 && tdbTbcMoveToNext(pCtbIdxc) < 0)) {
+ rc = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
+ if (rc < 0) {
tdbTbcClose(pCtbIdxc);
- goto _exit;
+ metaWLock(pMeta);
+ goto _drop_super_table;
}
for (;;) {
- tdbTbcGet(pCtbIdxc, &pKey, &nKey, NULL, NULL);
- pCtbIdxKey = (SCtbIdxKey *)pKey;
+ rc = tdbTbcNext(pCtbIdxc, &pKey, &nKey, NULL, NULL);
+ if (rc < 0) break;
- if (pCtbIdxKey->suid > pReq->suid) break;
+ if (((SCtbIdxKey *)pKey)->suid < pReq->suid) {
+ continue;
+ } else if (((SCtbIdxKey *)pKey)->suid > pReq->suid) {
+ break;
+ }
+
+ taosArrayPush(pArray, &(((SCtbIdxKey *)pKey)->uid));
+ }
- // drop the child table (TODO)
+ tdbTbcClose(pCtbIdxc);
- if (tdbTbcMoveToNext(pCtbIdxc) < 0) break;
+ metaWLock(pMeta);
+
+ for (int32_t iChild = 0; iChild < taosArrayGetSize(pArray); iChild++) {
+ tb_uid_t uid = *(tb_uid_t *)taosArrayGet(pArray, iChild);
+ metaDropTableByUid(pMeta, uid, NULL);
}
+ taosArrayDestroy(pArray);
+
+ // drop super table
+_drop_super_table:
+ tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData);
+ tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey),
+ &pMeta->txn);
+ tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn);
+ tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
+
+ metaULock(pMeta);
+
_exit:
- metaDebug("vgId:%d super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
+ tdbFree(pKey);
+ tdbFree(pData);
+ metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
return 0;
-
-_err:
- metaError("vgId:%d failed to drop super table %s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
- pReq->suid, tstrerror(terrno));
- return -1;
}
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
@@ -174,15 +200,13 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
nStbEntry.type = TSDB_SUPER_TABLE;
nStbEntry.uid = pReq->suid;
nStbEntry.name = pReq->name;
- nStbEntry.stbEntry.schema = pReq->schema;
+ nStbEntry.stbEntry.schemaRow = pReq->schemaRow;
nStbEntry.stbEntry.schemaTag = pReq->schemaTag;
metaWLock(pMeta);
// compare two entry
- if (oStbEntry.stbEntry.schema.sver != pReq->schema.sver) {
- if (oStbEntry.stbEntry.schema.nCols != pReq->schema.nCols) {
- metaSaveToSkmDb(pMeta, &nStbEntry);
- }
+ if (oStbEntry.stbEntry.schemaRow.version != pReq->schemaRow.version) {
+ metaSaveToSkmDb(pMeta, &nStbEntry);
}
// if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) {
@@ -239,145 +263,86 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
} else {
me.ntbEntry.ctime = pReq->ctime;
me.ntbEntry.ttlDays = pReq->ttl;
- me.ntbEntry.schema = pReq->ntb.schema;
- me.ntbEntry.ncid = me.ntbEntry.schema.pSchema[me.ntbEntry.schema.nCols - 1].colId + 1;
+ me.ntbEntry.schemaRow = pReq->ntb.schemaRow;
+ me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1;
}
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
- metaDebug("vgId:%d table %s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid,
+ metaDebug("vgId:%d, table %s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid,
pReq->type);
return 0;
_err:
- metaError("vgId:%d failed to create table:%s type:%s since %s", TD_VID(pMeta->pVnode), pReq->name,
+ metaError("vgId:%d, failed to create table:%s type:%s since %s", TD_VID(pMeta->pVnode), pReq->name,
pReq->type == TSDB_CHILD_TABLE ? "child table" : "normal table", tstrerror(terrno));
return -1;
}
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
- TBC *pTbDbc = NULL;
- TBC *pUidIdxc = NULL;
- TBC *pNameIdxc = NULL;
- const void *pData;
- int nData;
- tb_uid_t uid;
- int64_t tver;
- SMetaEntry me = {0};
- SDecoder coder = {0};
- int8_t type;
- int64_t ctime;
- tb_uid_t suid;
- int c = 0, ret;
-
- // search & delete the name idx
- tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn);
- ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c);
- if (ret < 0 || !tdbTbcIsValid(pNameIdxc) || c) {
- tdbTbcClose(pNameIdxc);
+ void *pData = NULL;
+ int nData = 0;
+ int rc = 0;
+ tb_uid_t uid;
+ int type;
+
+ rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
+ if (rc < 0) {
terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
return -1;
}
-
- ret = tdbTbcGet(pNameIdxc, NULL, NULL, &pData, &nData);
- if (ret < 0) {
- ASSERT(0);
- return -1;
- }
-
uid = *(tb_uid_t *)pData;
- tdbTbcDelete(pNameIdxc);
- tdbTbcClose(pNameIdxc);
-
- // search & delete uid idx
- tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
- ret = tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
- if (ret < 0 || c != 0) {
- ASSERT(0);
- return -1;
- }
-
- ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
- if (ret < 0) {
- ASSERT(0);
- return -1;
- }
-
- tver = *(int64_t *)pData;
- tdbTbcDelete(pUidIdxc);
- tdbTbcClose(pUidIdxc);
+ metaWLock(pMeta);
+ metaDropTableByUid(pMeta, uid, &type);
+ metaULock(pMeta);
- // search and get meta entry
- tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
- ret = tdbTbcMoveTo(pTbDbc, &(STbDbKey){.uid = uid, .version = tver}, sizeof(STbDbKey), &c);
- if (ret < 0 || c != 0) {
- ASSERT(0);
- return -1;
+ if (type == TSDB_CHILD_TABLE && tbUids) {
+ taosArrayPush(tbUids, &uid);
}
- ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
- if (ret < 0) {
- ASSERT(0);
- return -1;
- }
+ tdbFree(pData);
+ return 0;
+}
- // decode entry
- void *pDataCopy = taosMemoryMalloc(nData); // remove the copy (todo)
- memcpy(pDataCopy, pData, nData);
- tDecoderInit(&coder, pDataCopy, nData);
- ret = metaDecodeEntry(&coder, &me);
- if (ret < 0) {
- ASSERT(0);
- return -1;
- }
+static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
+ void *pData = NULL;
+ int nData = 0;
+ int rc = 0;
+ int64_t version;
+ SMetaEntry e = {0};
+ SDecoder dc = {0};
- type = me.type;
- if (type == TSDB_CHILD_TABLE) {
- ctime = me.ctbEntry.ctime;
- suid = me.ctbEntry.suid;
- taosArrayPush(tbUids, &me.uid);
- } else if (type == TSDB_NORMAL_TABLE) {
- ctime = me.ntbEntry.ctime;
- suid = 0;
- } else {
- ASSERT(0);
- }
+ rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
+ version = *(int64_t *)pData;
- taosMemoryFree(pDataCopy);
- tDecoderClear(&coder);
- tdbTbcClose(pTbDbc);
+ tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
- if (type == TSDB_CHILD_TABLE) {
- // remove the pCtbIdx
- TBC *pCtbIdxc = NULL;
- tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
+ tDecoderInit(&dc, pData, nData);
+ metaDecodeEntry(&dc, &e);
- ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = suid, .uid = uid}, sizeof(SCtbIdxKey), &c);
- if (ret < 0 || c != 0) {
- ASSERT(0);
- return -1;
- }
+ if (type) *type = e.type;
- tdbTbcDelete(pCtbIdxc);
- tdbTbcClose(pCtbIdxc);
-
- // remove tags from pTagIdx (todo)
- } else if (type == TSDB_NORMAL_TABLE) {
- // remove from pSkmDb
- } else {
- ASSERT(0);
+ tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pMeta->txn);
+ tdbTbDelete(pMeta->pNameIdx, e.name, strlen(e.name) + 1, &pMeta->txn);
+ tdbTbDelete(pMeta->pUidIdx, &uid, sizeof(uid), &pMeta->txn);
+ if (e.type == TSDB_CHILD_TABLE) {
+ tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), &pMeta->txn);
+ } else if (e.type == TSDB_NORMAL_TABLE) {
+ // drop schema.db (todo)
+ // drop ttl.idx (todo)
+ } else if (e.type == TSDB_SUPER_TABLE) {
+ // drop schema.db (todo)
}
- // remove from ttl (todo)
- if (ctime > 0) {
- }
+ tDecoderClear(&dc);
+ tdbFree(pData);
return 0;
}
-static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
- void *pVal = NULL;
+static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq, STableMetaRsp *pMetaRsp) {
+ void * pVal = NULL;
int nVal = 0;
const void *pData = NULL;
int nData = 0;
@@ -432,7 +397,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
}
// search the column to add/drop/update
- pSchema = &entry.ntbEntry.schema;
+ pSchema = &entry.ntbEntry.schemaRow;
int32_t iCol = 0;
for (;;) {
pColumn = NULL;
@@ -453,16 +418,16 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_COL_ALREADY_EXISTS;
goto _err;
}
- pSchema->sver++;
+ pSchema->version++;
pSchema->nCols++;
pNewSchema = taosMemoryMalloc(sizeof(SSchema) * pSchema->nCols);
memcpy(pNewSchema, pSchema->pSchema, sizeof(SSchema) * (pSchema->nCols - 1));
pSchema->pSchema = pNewSchema;
- pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].bytes = pAlterTbReq->bytes;
- pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].type = pAlterTbReq->type;
- pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].flags = pAlterTbReq->flags;
- pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].colId = entry.ntbEntry.ncid++;
- strcpy(pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].name, pAlterTbReq->colName);
+ pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].bytes = pAlterTbReq->bytes;
+ pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].type = pAlterTbReq->type;
+ pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].flags = pAlterTbReq->flags;
+ pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId = entry.ntbEntry.ncid++;
+ strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName);
break;
case TSDB_ALTER_TABLE_DROP_COLUMN:
if (pColumn == NULL) {
@@ -473,7 +438,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
goto _err;
}
- pSchema->sver++;
+ pSchema->version++;
tlen = (pSchema->nCols - iCol - 1) * sizeof(SSchema);
if (tlen) {
memmove(pColumn, pColumn + 1, tlen);
@@ -489,7 +454,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
goto _err;
}
- pSchema->sver++;
+ pSchema->version++;
pColumn->bytes = pAlterTbReq->colModBytes;
break;
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
@@ -497,7 +462,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS;
goto _err;
}
- pSchema->sver++;
+ pSchema->version++;
strcpy(pColumn->name, pAlterTbReq->colNewName);
break;
}
@@ -516,6 +481,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
metaULock(pMeta);
+ metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp);
+
if (pNewSchema) taosMemoryFree(pNewSchema);
tDecoderClear(&dc);
tdbTbcClose(pTbDbc);
@@ -608,37 +575,47 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
// TODO : need to update tag index
}
ctbEntry.version = version;
- if(pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON){
+ if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) {
ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal);
- if(ctbEntry.ctbEntry.pTags == NULL){
+ if (ctbEntry.ctbEntry.pTags == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ memcpy((void *)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
+ } else {
+ const STag *pOldTag = (const STag *)ctbEntry.ctbEntry.pTags;
+ STag *pNewTag = NULL;
+ SArray *pTagArray = taosArrayInit(pTagSchema->nCols, sizeof(STagVal));
+ if (!pTagArray) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- memcpy((void*)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
- }else{
- SKVRowBuilder kvrb = {0};
- const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags;
- SKVRow pNewTag = NULL;
-
- tdInitKVRowBuilder(&kvrb);
for (int32_t i = 0; i < pTagSchema->nCols; i++) {
SSchema *pCol = &pTagSchema->pSchema[i];
if (iCol == i) {
- tdAddColToKVRow(&kvrb, pCol->colId, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
+ STagVal val = {0};
+ val.type = pCol->type;
+ val.cid = pCol->colId;
+ if (IS_VAR_DATA_TYPE(pCol->type)) {
+ val.pData = pAlterTbReq->pTagVal;
+ val.nData = pAlterTbReq->nTagVal;
+ } else {
+ memcpy(&val.i64, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
+ }
+ taosArrayPush(pTagArray, &val);
} else {
- void *p = tdGetKVRowValOfCol(pOldTag, pCol->colId);
- if (p) {
- if (IS_VAR_DATA_TYPE(pCol->type)) {
- tdAddColToKVRow(&kvrb, pCol->colId, p, varDataTLen(p));
- } else {
- tdAddColToKVRow(&kvrb, pCol->colId, p, pCol->bytes);
- }
+ STagVal val = {.cid = pCol->colId};
+ if (tTagGet(pOldTag, &val)) {
+ taosArrayPush(pTagArray, &val);
}
}
}
-
- ctbEntry.ctbEntry.pTags = tdGetKVRowFromBuilder(&kvrb);
- tdDestroyKVRowBuilder(&kvrb);
+ if ((terrno = tTagNew(pTagArray, pTagSchema->version, false, &pNewTag)) < 0) {
+ taosArrayDestroy(pTagArray);
+ goto _err;
+ }
+ ctbEntry.ctbEntry.pTags = (uint8_t *)pNewTag;
+ taosArrayDestroy(pTagArray);
}
// save to table.db
@@ -649,7 +626,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
tDecoderClear(&dc1);
tDecoderClear(&dc2);
- if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void*)ctbEntry.ctbEntry.pTags);
+ if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf);
if (stbEntry.pBuf) tdbFree(stbEntry.pBuf);
tdbTbcClose(pTbDbc);
@@ -672,13 +649,13 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
return 0;
}
-int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) {
+int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq, STableMetaRsp *pMetaRsp) {
switch (pReq->action) {
case TSDB_ALTER_TABLE_ADD_COLUMN:
case TSDB_ALTER_TABLE_DROP_COLUMN:
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
- return metaAlterTableColumn(pMeta, version, pReq);
+ return metaAlterTableColumn(pMeta, version, pReq, pMetaRsp);
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
return metaUpdateTableTagVal(pMeta, version, pReq);
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
@@ -774,17 +751,17 @@ static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn);
}
-static int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int8_t type, tb_uid_t uid,
- STagIdxKey **ppTagIdxKey, int32_t *nTagIdxKey) {
- int32_t nTagData = 0;
+int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
+ STagIdxKey **ppTagIdxKey, int32_t *nTagIdxKey) {
+ // int32_t nTagData = 0;
- if (pTagData) {
- if (IS_VAR_DATA_TYPE(type)) {
- nTagData = varDataTLen(pTagData);
- } else {
- nTagData = tDataTypes[type].bytes;
- }
- }
+ // if (pTagData) {
+ // if (IS_VAR_DATA_TYPE(type)) {
+ // nTagData = varDataTLen(pTagData);
+ // } else {
+ // nTagData = tDataTypes[type].bytes;
+ // }
+ // }
*nTagIdxKey = sizeof(STagIdxKey) + nTagData + sizeof(tb_uid_t);
*ppTagIdxKey = (STagIdxKey *)taosMemoryMalloc(*nTagIdxKey);
@@ -816,6 +793,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
int32_t nTagIdxKey;
const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0];
const void *pTagData = NULL; //
+ int32_t nTagData = 0;
SDecoder dc = {0};
// get super table
@@ -828,7 +806,21 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
metaDecodeEntry(&dc, &stbEntry);
pTagColumn = &stbEntry.stbEntry.schemaTag.pSchema[0];
- pTagData = tdGetKVRowValOfCol((const SKVRow)pCtbEntry->ctbEntry.pTags, pTagColumn->colId);
+
+ STagVal tagVal = {.cid = pTagColumn->colId};
+ if (pTagColumn->type != TSDB_DATA_TYPE_JSON) {
+ tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal);
+ if (IS_VAR_DATA_TYPE(pTagColumn->type)) {
+ pTagData = tagVal.pData;
+ nTagData = (int32_t)tagVal.nData;
+ } else {
+ pTagData = &(tagVal.i64);
+ nTagData = tDataTypes[pTagColumn->type].bytes;
+ }
+ } else {
+ // pTagData = pCtbEntry->ctbEntry.pTags;
+ // nTagData = ((const STag *)pCtbEntry->ctbEntry.pTags)->len;
+ }
// update tag index
#ifdef USE_INVERTED_INDEX
@@ -843,8 +835,8 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
int ret = indexPut((SIndex *)pMeta->pTagIvtIdx, tmGroup, tuid);
indexMultiTermDestroy(tmGroup);
#else
- if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, pTagColumn->type, pCtbEntry->uid,
- &pTagIdxKey, &nTagIdxKey) < 0) {
+ if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, nTagData, pTagColumn->type,
+ pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) {
return -1;
}
tdbTbInsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn);
@@ -864,15 +856,15 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
const SSchemaWrapper *pSW;
if (pME->type == TSDB_SUPER_TABLE) {
- pSW = &pME->stbEntry.schema;
+ pSW = &pME->stbEntry.schemaRow;
} else if (pME->type == TSDB_NORMAL_TABLE) {
- pSW = &pME->ntbEntry.schema;
+ pSW = &pME->ntbEntry.schemaRow;
} else {
ASSERT(0);
}
skmDbKey.uid = pME->uid;
- skmDbKey.sver = pSW->sver;
+ skmDbKey.sver = pSW->version;
// encode schema
int32_t ret = 0;
@@ -933,3 +925,11 @@ _err:
metaULock(pMeta);
return -1;
}
+// refactor later
+void *metaGetIdx(SMeta *pMeta) {
+#ifdef USE_INVERTED_INDEX
+ return pMeta->pTagIvtIdx;
+#else
+ return pMeta->pTagIdx;
+#endif
+}
diff --git a/source/dnode/vnode/src/sma/sma.c b/source/dnode/vnode/src/sma/sma.c
index 0e7ce385a1c2aa225d21201af2fcc7f0ffd72d79..04f65275d7d20ab41564a8e8c6e67c908b3bc649 100644
--- a/source/dnode/vnode/src/sma/sma.c
+++ b/source/dnode/vnode/src/sma/sma.c
@@ -15,13 +15,12 @@
#include "sma.h"
-
// TODO: Who is responsible for resource allocate and release?
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) {
- smaWarn("vgId:%d insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
// TODO: destroy SSDataBlocks(msg)
return code;
@@ -31,16 +30,16 @@ int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdProcessTSmaCreateImpl(pSma, version, msg)) < 0) {
- smaWarn("vgId:%d create tsma failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ smaWarn("vgId:%d, create tsma failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
// TODO: destroy SSDataBlocks(msg)
return code;
}
-int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version) {
+int32_t tdUpdateExpireWindow(SSma* pSma, const SSubmitReq* pMsg, int64_t version) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdUpdateExpiredWindowImpl(pSma, pMsg, version)) < 0) {
- smaWarn("vgId:%d update expired sma window failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ smaWarn("vgId:%d, update expired sma window failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
return code;
}
@@ -48,7 +47,15 @@ int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version) {
int32_t tdGetTSmaData(SSma* pSma, char* pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdGetTSmaDataImpl(pSma, pData, indexUid, querySKey, nMaxResult)) < 0) {
- smaWarn("vgId:%d get tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ smaWarn("vgId:%d, get tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ }
+ return code;
+}
+
+int32_t smaGetTSmaDays(SVnodeCfg* pCfg, void* pCont, uint32_t contLen, int32_t *days) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((code = tdGetTSmaDaysImpl(pCfg, pCont, contLen, days)) < 0) {
+ smaWarn("vgId:%d get tSma days failed since %s", pCfg->vgId, tstrerror(terrno));
}
return code;
}
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
index 8285b74e509f53a8ed3a9d2e5745d2f56135087e..179f573e8d72c3ae6938edb31e61ef6b9ec8a675 100644
--- a/source/dnode/vnode/src/sma/smaEnv.c
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -222,7 +222,7 @@ int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
if (!pStat) return 0;
int ref = T_REF_INC(pStat);
- smaDebug("vgId:%d ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
+ smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
return 0;
}
@@ -230,7 +230,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
if (!pStat) return 0;
int ref = T_REF_DEC(pStat);
- smaDebug("vgId:%d unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
+ smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
return 0;
}
@@ -278,7 +278,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) {
void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem) {
if (pSmaStatItem) {
- tdDestroyTSma(pSmaStatItem->pTSma);
+ tDestroyTSma(pSmaStatItem->pTSma);
taosMemoryFreeClear(pSmaStatItem->pTSma);
taosHashCleanup(pSmaStatItem->expiredWindows);
taosMemoryFreeClear(pSmaStatItem);
@@ -321,7 +321,7 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
int32_t tdLockSma(SSma *pSma) {
int code = taosThreadMutexLock(&pSma->mutex);
if (code != 0) {
- smaError("vgId:%d failed to lock td since %s", SMA_VID(pSma), strerror(errno));
+ smaError("vgId:%d, failed to lock td since %s", SMA_VID(pSma), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
@@ -334,7 +334,7 @@ int32_t tdUnLockSma(SSma *pSma) {
pSma->locked = false;
int code = taosThreadMutexUnlock(&pSma->mutex);
if (code != 0) {
- smaError("vgId:%d failed to unlock td since %s", SMA_VID(pSma), strerror(errno));
+ smaError("vgId:%d, failed to unlock td since %s", SMA_VID(pSma), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
@@ -376,7 +376,7 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
if (did.level < 0 || did.id < 0) {
tdUnLockSma(pSma);
- smaError("vgId:%d init sma env failed since invalid did(%d,%d)", SMA_VID(pSma), did.level, did.id);
+ smaError("vgId:%d, init sma env failed since invalid did(%d,%d)", SMA_VID(pSma), did.level, did.id);
return TSDB_CODE_FAILED;
}
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index 2a74fe78cbc66a3873857347df010190554e1e76..dde6578054ac43965b9c2300dd2d118baea1d25e 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -104,7 +104,7 @@ int32_t smaOpen(SVnode *pVnode) {
taosThreadMutexInit(&pSma->mutex, NULL);
pSma->locked = false;
- if (vnodeIsRollup(pVnode)) {
+ if (VND_IS_RSMA(pVnode)) {
STsdbKeepCfg keepCfg = {0};
for (int i = 0; i < TSDB_RETENTION_MAX; ++i) {
if (i == TSDB_RETENTION_L0) {
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 88af049d0bd298e58e51286e0980fd13a7872734..80c8d20572bc9ef6658d3bc46116874e9ff68a42 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -18,7 +18,7 @@
static FORCE_INLINE int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo,
- STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level);
+ STSchema *pTSchema, tb_uid_t suid, int8_t level);
struct SRSmaInfo {
void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
@@ -58,30 +58,30 @@ static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SA
if (!suid || !tbUids) {
terrno = TSDB_CODE_INVALID_PTR;
- smaError("vgId:%d failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
return TSDB_CODE_FAILED;
}
pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), suid, sizeof(tb_uid_t));
if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- smaError("vgId:%d failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid);
+ smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid);
terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
return TSDB_CODE_FAILED;
}
if (pRSmaInfo->taskInfo[0] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[0], tbUids, true) != 0)) {
- smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
return TSDB_CODE_FAILED;
} else {
- smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
+ smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
}
if (pRSmaInfo->taskInfo[1] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[1], tbUids, true) != 0)) {
- smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
return TSDB_CODE_FAILED;
} else {
- smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
+ smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
pRSmaInfo->taskInfo[1], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
}
@@ -165,16 +165,19 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
* @param pReq
* @return int32_t
*/
-int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) {
+int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq *pReq) {
+ SSma *pSma = pVnode->pSma;
+ SMeta *pMeta = pVnode->pMeta;
+ SMsgCb *pMsgCb = &pVnode->msgCb;
if (!pReq->rollup) {
- smaTrace("vgId:%d return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ smaTrace("vgId:%d, return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
return TSDB_CODE_SUCCESS;
}
SRSmaParam *param = &pReq->pRSmaParam;
if ((param->qmsg1Len == 0) && (param->qmsg2Len == 0)) {
- smaWarn("vgId:%d no qmsg1/qmsg2 for rollup stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ smaWarn("vgId:%d, no qmsg1/qmsg2 for rollup stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
return TSDB_CODE_SUCCESS;
}
@@ -189,7 +192,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg
pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t));
if (pRSmaInfo) {
- smaWarn("vgId:%d rsma info already exists for stb: %s, %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ smaWarn("vgId:%d, rsma info already exists for stb: %s, %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
return TSDB_CODE_SUCCESS;
}
@@ -210,6 +213,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg
.reader = pReadHandle,
.meta = pMeta,
.pMsgCb = pMsgCb,
+ .vnode = pVnode,
};
if (param->qmsg1) {
@@ -234,7 +238,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg
TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
} else {
- smaDebug("vgId:%d register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), pReq->suid);
+ smaDebug("vgId:%d, register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), pReq->suid);
}
return TSDB_CODE_SUCCESS;
@@ -364,17 +368,17 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
}
static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo,
- STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level) {
+ STSchema *pTSchema, tb_uid_t suid, int8_t level) {
SArray *pResult = NULL;
if (!taskInfo) {
- smaDebug("vgId:%d no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
+ smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
return TSDB_CODE_SUCCESS;
}
- smaDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid);
+ smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid);
- qSetStreamInput(taskInfo, pMsg, inputType);
+ qSetStreamInput(taskInfo, pMsg, inputType, true);
while (1) {
SSDataBlock *output = NULL;
uint64_t ts;
@@ -399,7 +403,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3
blockDebugShowData(pResult);
STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2);
SSubmitReq *pReq = NULL;
- if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), uid, suid) != 0) {
+ if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) != 0) {
taosArrayDestroy(pResult);
return TSDB_CODE_FAILED;
}
@@ -410,7 +414,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3
}
taosMemoryFreeClear(pReq);
} else {
- smaWarn("vgId:%d no rsma % " PRIi8 " data generated since %s", SMA_VID(pSma), level, tstrerror(terrno));
+ smaDebug("vgId:%d, no rsma % " PRIi8 " data generated since %s", SMA_VID(pSma), level, tstrerror(terrno));
}
taosArrayDestroy(pResult);
@@ -418,38 +422,36 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3
return TSDB_CODE_SUCCESS;
}
-static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) {
+static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
// only applicable when rsma env exists
return TSDB_CODE_SUCCESS;
}
- ASSERT(uid != 0); // TODO: remove later
-
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaInfo *pRSmaInfo = NULL;
pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- smaDebug("vgId:%d no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
+ smaDebug("vgId:%d, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
return TSDB_CODE_SUCCESS;
}
if (!pRSmaInfo->taskInfo[0]) {
- smaDebug("vgId:%d no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
+ smaDebug("vgId:%d, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
return TSDB_CODE_SUCCESS;
}
if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
// TODO: use the proper schema instead of 0, and cache STSchema in cache
- STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1);
+ STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1);
if (!pTSchema) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return TSDB_CODE_FAILED;
}
- tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1);
- tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2);
+ tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, TSDB_RETENTION_L1);
+ tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, TSDB_RETENTION_L2);
taosMemoryFree(pTSchema);
}
@@ -468,12 +470,12 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
tdFetchSubmitReqSuids(pMsg, &uidStore);
if (uidStore.suid != 0) {
- tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid, uidStore.uid);
+ tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid);
void *pIter = taosHashIterate(uidStore.uidHash, NULL);
while (pIter) {
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid, 0);
+ tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid);
pIter = taosHashIterate(uidStore.uidHash, pIter);
}
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index f771e73c8aa4210fd01b5c871877cbdaeb0fb2bc..b72be06455d8181dca8a27ea1c58cfa72ddef39f 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -326,13 +326,13 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
if (!pDataBlocks) {
terrno = TSDB_CODE_INVALID_PTR;
- smaWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma));
+ smaWarn("vgId:%d, insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma));
return terrno;
}
if (taosArrayGetSize(pDataBlocks) <= 0) {
terrno = TSDB_CODE_INVALID_PARA;
- smaWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma));
+ smaWarn("vgId:%d, insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma));
return TSDB_CODE_FAILED;
}
@@ -487,11 +487,11 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
smaCloseDBF(&tSmaH.dFile);
}
tdSetTSmaDataFile(&tSmaH, indexUid, fid);
- smaDebug("@@@ vgId:%d write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32
+ smaDebug("vgId:%d, write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32
" queryKey:%" PRIi64,
SMA_VID(pSma), tSmaH.dFile.path, minutePerFile, tSmaH.interval, storageLevel, testSkey);
if (smaOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) {
- smaWarn("vgId:%d open DB file %s failed since %s", SMA_VID(pSma),
+ smaWarn("vgId:%d, open DB file %s failed since %s", SMA_VID(pSma),
tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno));
tdDestroyTSmaWriteH(&tSmaH);
tdUnRefSmaStat(pSma, pStat);
@@ -501,7 +501,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
if (tdInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) {
- smaWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64
+ smaWarn("vgId:%d, insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64
" since %s",
SMA_VID(pSma), indexUid, skey, groupId, tstrerror(terrno));
tdSmaEndCommit(pEnv);
@@ -510,14 +510,14 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
return TSDB_CODE_FAILED;
}
- smaDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64,
+ smaDebug("vgId:%d, insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64,
SMA_VID(pSma), indexUid, skey, groupId);
// TODO:tsdbEndTSmaCommit();
// Step 3: reset the SSmaStat
tdResetExpiredWindow(pSma, pStat, indexUid, skey);
} else {
- smaWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64,
+ smaWarn("vgId:%d, invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64,
SMA_VID(pSma), skey, tlen, indexUid);
}
}
@@ -532,7 +532,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid) {
int32_t code = TSDB_CODE_SUCCESS;
if ((code = tdDropTSmaDataImpl(pSma, indexUid)) < 0) {
- smaWarn("vgId:%d drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ smaWarn("vgId:%d, drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
return code;
}
@@ -553,11 +553,11 @@ static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyL
// TODO: insert tsma data blocks into B+Tree(TTB)
if (smaSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) {
- smaWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail",
+ smaWarn("vgId:%d, insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail",
SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
return TSDB_CODE_FAILED;
}
- smaDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed",
+ smaDebug("vgId:%d, insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed",
SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
@@ -565,7 +565,7 @@ static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyL
void *data = tdGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize);
ASSERT(data != NULL);
for (uint32_t v = 0; v < valueSize; v += 8) {
- smaWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v));
+ smaWarn("vgId:%d, insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v));
}
#endif
return TSDB_CODE_SUCCESS;
@@ -594,11 +594,11 @@ static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUi
if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) {
// error handling
tdUnRefSmaStat(pSma, pStat);
- smaWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey,
+ smaWarn("vgId:%d, remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey,
indexUid);
return TSDB_CODE_FAILED;
}
- smaDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma),
+ smaDebug("vgId:%d, remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma),
skey, indexUid);
// TODO: use a standalone interface to received state upate notification from stream computing module.
/**
@@ -612,7 +612,7 @@ static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUi
} else {
// error handling
tdUnRefSmaStat(pSma, pStat);
- smaWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid);
+ smaWarn("vgId:%d, expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid);
return TSDB_CODE_FAILED;
}
@@ -632,19 +632,19 @@ static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) {
// clear local cache
if (pEnv) {
- smaDebug("vgId:%d drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid);
+ smaDebug("vgId:%d, drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid);
SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) {
if (tdSmaStatIsDropped(pItem)) {
- smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ smaDebug("vgId:%d, tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
}
tdWLockSmaEnv(pEnv);
if (tdSmaStatIsDropped(pItem)) {
tdUnLockSmaEnv(pEnv);
- smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ smaDebug("vgId:%d, tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
}
tdSmaStatSetDropped(pItem);
@@ -654,19 +654,19 @@ static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) {
int32_t refVal = INT32_MAX;
while (true) {
if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) {
- smaDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ smaDebug("vgId:%d, drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
break;
}
- smaDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ smaDebug("vgId:%d, wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
taosSsleep(1);
if (++nSleep > SMA_DROP_EXPIRED_TIME) {
- smaDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal);
+ smaDebug("vgId:%d, drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal);
break;
};
}
tdFreeSmaStatItem(pItem);
- smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid);
+ smaDebug("vgId:%d, getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid);
}
}
// clear sma data files
@@ -690,7 +690,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
if (!pEnv) {
terrno = TSDB_CODE_INVALID_PTR;
- smaWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma));
+ smaWarn("vgId:%d, getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma));
return TSDB_CODE_FAILED;
}
@@ -703,7 +703,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
// it's NULL.
tdUnRefSmaStat(pSma, pStat);
terrno = TSDB_CODE_TDB_INVALID_ACTION;
- smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid);
+ smaDebug("vgId:%d, getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid);
return TSDB_CODE_FAILED;
}
@@ -722,17 +722,17 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
if (!tdSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query
tdUnRefSmaStat(pSma, pStat);
terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- smaWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid,
+ smaWarn("vgId:%d, getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid,
tstrerror(terrno), smaStat);
return TSDB_CODE_FAILED;
}
if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) {
// TODO: mark this window as expired.
- smaDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
+ smaDebug("vgId:%d, skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
indexUid);
} else {
- smaDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
+ smaDebug("vgId:%d, skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
indexUid);
}
@@ -750,7 +750,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
smaDebug("### vgId:%d read from DBF %s days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi8 " queryKey:%" PRIi64,
SMA_VID(pSma), tReadH.dFile.path, tReadH.days, tReadH.interval, tReadH.storageLevel, querySKey);
if (smaOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) {
- smaWarn("vgId:%d open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno));
+ smaWarn("vgId:%d, open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
@@ -759,13 +759,13 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
int64_t queryGroupId = 0;
tdEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey);
- smaDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path,
+ smaDebug("vgId:%d, get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path,
*(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN);
void *result = NULL;
int32_t valueSize = 0;
if (!(result = smaGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) {
- smaWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s",
+ smaWarn("vgId:%d, get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s",
SMA_VID(pSma), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno));
smaCloseDBF(&tReadH.dFile);
return TSDB_CODE_FAILED;
@@ -774,7 +774,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query
#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
for (uint32_t v = 0; v < valueSize; v += 8) {
- smaWarn("vgId:%d get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v));
+ smaWarn("vgId:%d, get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v));
}
#endif
taosMemoryFreeClear(result); // TODO: fill the result to output
@@ -828,7 +828,7 @@ int32_t tdDropTSma(SSma *pSma, char *pMsg) {
// TODO: send msg to stream computing to drop tSma
// if ((send msg to stream computing) < 0) {
- // tdDestroyTSma(&vCreateSmaReq);
+ // tDestroyTSma(&vCreateSmaReq);
// return -1;
// }
//
@@ -888,7 +888,7 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde
terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META;
taosHashCleanup(pItem->expiredWindows);
taosMemoryFree(pItem);
- smaWarn("vgId:%d set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
+ smaWarn("vgId:%d, set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
indexUid, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
@@ -915,12 +915,12 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde
taosHashCleanup(pItem->expiredWindows);
taosMemoryFreeClear(pItem->pTSma);
taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid));
- smaWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid,
+ smaWarn("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid,
winSKey);
return TSDB_CODE_FAILED;
}
- smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid,
+ smaDebug("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid,
winSKey);
return TSDB_CODE_SUCCESS;
}
@@ -932,21 +932,21 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde
* @param msg SSubmitReq
* @return int32_t
*/
-int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version) {
+int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version) {
// no time-range-sma, just return success
if (atomic_load_16(&SMA_TSMA_NUM(pSma)) <= 0) {
- smaTrace("vgId:%d not update expire window since no tSma", SMA_VID(pSma));
+ smaTrace("vgId:%d, not update expire window since no tSma", SMA_VID(pSma));
return TSDB_CODE_SUCCESS;
}
if (!SMA_META(pSma)) {
terrno = TSDB_CODE_INVALID_PTR;
- smaError("vgId:%d update expire window failed since no meta ptr", SMA_VID(pSma));
+ smaError("vgId:%d, update expire window failed since no meta ptr", SMA_VID(pSma));
return TSDB_CODE_FAILED;
}
if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) < 0) {
- smaError("vgId:%d init sma env failed since %s", SMA_VID(pSma), terrstr(terrno));
+ smaError("vgId:%d, init sma env failed since %s", SMA_VID(pSma), terrstr(terrno));
terrno = TSDB_CODE_TDB_INIT_FAILED;
return TSDB_CODE_FAILED;
}
@@ -982,25 +982,25 @@ int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version)
SSubmitBlkIter blkIter = {0};
if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) < 0) {
- pSW = tdFreeTSmaWrapper(pSW, false);
+ pSW = tFreeTSmaWrapper(pSW, false);
break;
}
while (true) {
STSRow *row = tGetSubmitBlkNext(&blkIter);
if (!row) {
- pSW = tdFreeTSmaWrapper(pSW, false);
+ pSW = tFreeTSmaWrapper(pSW, false);
break;
}
if (!pSW || (pTSma && (pTSma->tableUid != msgIter.suid))) {
if (pSW) {
- pSW = tdFreeTSmaWrapper(pSW, false);
+ pSW = tFreeTSmaWrapper(pSW, false);
}
if (!(pSW = metaGetSmaInfoByTable(SMA_META(pSma), msgIter.suid, false))) {
break;
}
if ((pSW->number) <= 0 || !pSW->tSma) {
- pSW = tdFreeTSmaWrapper(pSW, false);
+ pSW = tFreeTSmaWrapper(pSW, false);
break;
}
@@ -1020,12 +1020,12 @@ int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version)
if (lastWinSKey != winSKey) {
lastWinSKey = winSKey;
if (tdSetExpiredWindow(pSma, pItemsHash, pTSma->indexUid, winSKey, version) < 0) {
- pSW = tdFreeTSmaWrapper(pSW, false);
+ pSW = tFreeTSmaWrapper(pSW, false);
tdUnRefSmaStat(pSma, pStat);
return TSDB_CODE_FAILED;
}
} else {
- smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated",
+ smaDebug("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated",
SMA_VID(pSma), pTSma->indexUid, winSKey);
}
}
diff --git a/source/dnode/vnode/src/sma/smaTimeRange2.c b/source/dnode/vnode/src/sma/smaTimeRange2.c
new file mode 100644
index 0000000000000000000000000000000000000000..5ef171c7991c47494efa265274852c48e0bac6b7
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaTimeRange2.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+#include "tsdb.h"
+
+typedef STsdbCfg STSmaKeepCfg;
+
+#undef _TEST_SMA_PRINT_DEBUG_LOG_
+#define SMA_STORAGE_MINUTES_MAX 86400
+#define SMA_STORAGE_MINUTES_DAY 1440
+#define SMA_STORAGE_MINUTES_MIN 1440
+#define SMA_STORAGE_TSDB_MINUTES 86400
+#define SMA_STORAGE_TSDB_TIMES 10
+#define SMA_STORAGE_SPLIT_FACTOR 14400 // least records in tsma file TODO: the feasible value?
+#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8
+#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds
+
+#define SMA_STATE_ITEM_HASH_SLOT 32
+
+typedef struct {
+ SSma *pSma;
+ SDBFile dFile;
+ const SArray *pDataBlocks; // sma data
+ int64_t interval; // interval with the precision of DB
+} STSmaWriteH;
+
+typedef struct {
+ int32_t iter;
+ int32_t fid;
+} SmaFsIter;
+
+typedef struct {
+ STsdb *pTsdb;
+ SSma *pSma;
+ SDBFile dFile;
+ int64_t interval; // interval with the precision of DB
+ int32_t blockSize; // size of SMA block item
+ int32_t days;
+ int8_t storageLevel;
+ SmaFsIter smaFsIter;
+} STSmaReadH;
+
+typedef enum {
+ SMA_STORAGE_LEVEL_TSDB = 0, // use days of self-defined e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f200.tsma
+ SMA_STORAGE_LEVEL_DFILESET = 1 // use days of TS data e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f1906.tsma
+} ESmaStorageLevel;
+
+// static func
+
+static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted);
+static int32_t tdGetSmaStorageLevel(STSmaKeepCfg *pCfg, int64_t interval);
+static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval,
+ int8_t intervalUnit);
+static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit);
+static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH);
+static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel);
+static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid);
+static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey);
+static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey);
+static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
+ TXN *txn);
+// expired window
+
+static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey, int64_t version);
+static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey);
+static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid);
+
+/**
+ * @brief Judge the tsma file split days
+ *
+ * @param pCfg
+ * @param pCont
+ * @param contLen
+ * @param days unit is minute
+ * @return int32_t
+ */
+int32_t tdGetTSmaDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) {
+ SDecoder coder = {0};
+ tDecoderInit(&coder, pCont, contLen);
+
+ STSma tsma = {0};
+ if (tDecodeSVCreateTSmaReq(&coder, &tsma) < 0) {
+ terrno = TSDB_CODE_MSG_DECODE_ERROR;
+ goto _err;
+ }
+ STsdbCfg *pTsdbCfg = &pCfg->tsdbCfg;
+ int64_t mInterval = convertTimeFromPrecisionToUnit(tsma.interval, pTsdbCfg->precision, TIME_UNIT_MINUTE);
+ int64_t records = pTsdbCfg->days / mInterval;
+
+ if (records >= SMA_STORAGE_SPLIT_FACTOR) {
+ *days = pTsdbCfg->days;
+ } else {
+ int64_t daysPerFile = mInterval * SMA_STORAGE_MINUTES_DAY * 2;
+
+ if (daysPerFile > SMA_STORAGE_MINUTES_MAX) {
+ *days = SMA_STORAGE_MINUTES_MAX;
+ } else {
+ *days = (int32_t)daysPerFile;
+ }
+
+ if(*days < pTsdbCfg->days) {
+ *days = pTsdbCfg->days;
+ }
+ }
+ tDecoderClear(&coder);
+ return 0;
+_err:
+ tDecoderClear(&coder);
+ return -1;
+}
+
+// read data
+
+// implementation
+
+/**
+ * @brief
+ *
+ * @param pSmaH
+ * @param pSma
+ * @param interval
+ * @param intervalUnit
+ * @return int32_t
+ */
+static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit) {
+ STSmaKeepCfg *pCfg = SMA_TSDB_CFG(pSma);
+ pSmaH->pSma = pSma;
+ pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true);
+ pSmaH->storageLevel = tdGetSmaStorageLevel(pCfg, interval);
+ pSmaH->days = tdGetTSmaDays(pSma, pSmaH->interval, pSmaH->storageLevel);
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Init of tSma FS
+ *
+ * @param pReadH
+ * @param indexUid
+ * @param skey
+ * @return int32_t
+ */
+static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey) {
+ SSma *pSma = pSmaH->pSma;
+
+ int32_t fid = (int32_t)(TSDB_KEY_FID(skey, pSmaH->days, SMA_TSDB_CFG(pSma)->precision));
+ char tSmaFile[TSDB_FILENAME_LEN] = {0};
+ snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid);
+ pSmaH->dFile.path = strdup(tSmaFile);
+ pSmaH->smaFsIter.iter = 0;
+ pSmaH->smaFsIter.fid = fid;
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Set and open tSma file if it has key locates in queryWin.
+ *
+ * @param pReadH
+ * @param param
+ * @param queryWin
+ * @return true
+ * @return false
+ */
+static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) {
+ // SArray *smaFs = pReadH->pTsdb->fs->cstatus->sf;
+ // int32_t nSmaFs = taosArrayGetSize(smaFs);
+
+ smaCloseDBF(&pReadH->dFile);
+
+#if 0
+ while (pReadH->smaFsIter.iter < nSmaFs) {
+ void *pSmaFile = taosArrayGet(smaFs, pReadH->smaFsIter.iter);
+ if (pSmaFile) { // match(indexName, queryWindow)
+ // TODO: select the file by index_name ...
+ pReadH->dFile = pSmaFile;
+ ++pReadH->smaFsIter.iter;
+ break;
+ }
+ ++pReadH->smaFsIter.iter;
+ }
+
+ if (pReadH->pDFile) {
+ tdDebug("vg%d: smaFile %s matched", REPO_ID(pReadH->pTsdb), "[pSmaFile dir]");
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+/**
+ * @brief Approximate value for week/month/year.
+ *
+ * @param interval
+ * @param intervalUnit
+ * @param precision
+ * @param adjusted Interval already adjusted according to DB precision
+ * @return int64_t
+ */
+static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted) {
+ if (adjusted) {
+ return interval;
+ }
+
+ switch (intervalUnit) {
+ case TIME_UNIT_YEAR: // approximate value
+ interval *= 365 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_MONTH: // approximate value
+ interval *= 30 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_WEEK: // approximate value
+ interval *= 7 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_DAY: // the interval for tSma calculation must <= day
+ interval *= 86400 * 1e3;
+ break;
+ case TIME_UNIT_HOUR:
+ interval *= 3600 * 1e3;
+ break;
+ case TIME_UNIT_MINUTE:
+ interval *= 60 * 1e3;
+ break;
+ case TIME_UNIT_SECOND:
+ interval *= 1e3;
+ break;
+ default:
+ break;
+ }
+
+ switch (precision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval / 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
+ return interval / 1e6;
+ } else { // ms
+ return interval;
+ }
+ break;
+ case TSDB_TIME_PRECISION_MICRO:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval / 1e3;
+ } else { // ms
+ return interval * 1e3;
+ }
+ break;
+ case TSDB_TIME_PRECISION_NANO:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval * 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval;
+ } else { // ms
+ return interval * 1e6;
+ }
+ break;
+ default: // ms
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval / 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval / 1e6;
+ } else { // ms
+ return interval;
+ }
+ break;
+ }
+ return interval;
+}
+
+static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval,
+ int8_t intervalUnit) {
+ pSmaH->pSma = pSma;
+ pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true);
+ pSmaH->pDataBlocks = pDataBlocks;
+ pSmaH->dFile.fid = SMA_IVLD_FID;
+ return TSDB_CODE_SUCCESS;
+}
+
+static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH) {
+ if (pSmaH) {
+ smaCloseDBF(&pSmaH->dFile);
+ }
+}
+
+static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid) {
+ SSma *pSma = pSmaH->pSma;
+ ASSERT(!pSmaH->dFile.path && !pSmaH->dFile.pDB);
+
+ pSmaH->dFile.fid = fid;
+ char tSmaFile[TSDB_FILENAME_LEN] = {0};
+ snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid);
+ pSmaH->dFile.path = strdup(tSmaFile);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief
+ *
+ * @param pSma
+ * @param interval Interval calculated by DB's precision
+ * @param storageLevel
+ * @return int32_t
+ */
+static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel) {
+ STsdbCfg *pCfg = SMA_TSDB_CFG(pSma);
+ int32_t daysPerFile = pCfg->days; // unit is minute
+
+ if (storageLevel == SMA_STORAGE_LEVEL_TSDB) {
+ int32_t minutes = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]);
+ if (minutes > SMA_STORAGE_TSDB_MINUTES) {
+ daysPerFile = SMA_STORAGE_TSDB_MINUTES;
+ }
+ }
+
+ return daysPerFile;
+}
+
+/**
+ * @brief Judge the tSma storage level
+ *
+ * @param pCfg
+ * @param interval
+ * @return int32_t
+ */
+static int32_t tdGetSmaStorageLevel(STSmaKeepCfg *pCfg, int64_t interval) {
+ int64_t mInterval = convertTimeFromPrecisionToUnit(interval, pCfg->precision, TIME_UNIT_MINUTE);
+ if (pCfg->days / mInterval >= SMA_STORAGE_SPLIT_FACTOR) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ return SMA_STORAGE_LEVEL_TSDB;
+}
+
+/**
+ * @brief Insert/Update Time-range-wise SMA data.
+ * - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g.
+ * v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files.
+ * - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The
+ * days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d).
+ * - The destination file of one data block for some interval is determined by its start TS key.
+ *
+ * @param pSma
+ * @param msg
+ * @return int32_t
+ */
+int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
+ STsdbCfg *pCfg = SMA_TSDB_CFG(pSma);
+ const SArray *pDataBlocks = (const SArray *)msg;
+ int64_t testSkey = TSKEY_INITIAL_VAL;
+
+ // TODO: destroy SSDataBlocks(msg)
+
+ // For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus
+ // the sma data would arrive ahead of the update-expired-window msg.
+ if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!pDataBlocks) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma));
+ return terrno;
+ }
+
+ if (taosArrayGetSize(pDataBlocks) <= 0) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ smaWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ SSmaEnv *pEnv = SMA_TSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SSmaStatItem *pItem = NULL;
+
+ tdRefSmaStat(pSma, pStat);
+
+ if (pStat && SMA_STAT_ITEMS(pStat)) {
+ pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
+ }
+
+ if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tdSmaStatIsDropped(pItem)) {
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+
+ STSma *pTSma = pItem->pTSma;
+ STSmaWriteH tSmaH = {0};
+
+ if (tdInitTSmaWriteH(&tSmaH, pSma, pDataBlocks, pTSma->interval, pTSma->intervalUnit) != 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ char rPath[TSDB_FILENAME_LEN] = {0};
+ char aPath[TSDB_FILENAME_LEN] = {0};
+ snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid);
+ tfsAbsoluteName(SMA_TFS(pSma), SMA_ENV_DID(pEnv), rPath, aPath);
+ if (!taosCheckExistFile(aPath)) {
+ if (tfsMkdirRecurAt(SMA_TFS(pSma), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) {
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ // Step 1: Judge the storage level and days
+ int32_t storageLevel = tdGetSmaStorageLevel(pCfg, tSmaH.interval);
+ int32_t minutePerFile = tdGetTSmaDays(pSma, tSmaH.interval, storageLevel);
+
+ char smaKey[SMA_KEY_LEN] = {0}; // key: skey + groupId
+ char dataBuf[512] = {0}; // val: aggr data // TODO: handle 512 buffer?
+ void *pDataBuf = NULL;
+ int32_t sz = taosArrayGetSize(pDataBlocks);
+ for (int32_t i = 0; i < sz; ++i) {
+ SSDataBlock *pDataBlock = taosArrayGet(pDataBlocks, i);
+ int32_t colNum = pDataBlock->info.numOfCols;
+ int32_t rows = pDataBlock->info.rows;
+ int32_t rowSize = pDataBlock->info.rowSize;
+ int64_t groupId = pDataBlock->info.groupId;
+ for (int32_t j = 0; j < rows; ++j) {
+ printf("|");
+ TSKEY skey = TSKEY_INITIAL_VAL; // the start key of TS window by interval
+ void *pSmaKey = &smaKey;
+ bool isStartKey = false;
+
+ int32_t tlen = 0; // reset the len
+ pDataBuf = &dataBuf; // reset the buf
+ for (int32_t k = 0; k < colNum; ++k) {
+ SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
+ void *var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
+ switch (pColInfoData->info.type) {
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!isStartKey) {
+ isStartKey = true;
+ skey = *(TSKEY *)var;
+ testSkey = skey;
+ printf("= skey %" PRIi64 " groupId = %" PRIi64 "|", skey, groupId);
+ tdEncodeTSmaKey(groupId, skey, &pSmaKey);
+ } else {
+ printf(" %" PRIi64 " |", *(int64_t *)var);
+ tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
+ break;
+ }
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_UTINYINT:
+ printf(" %15d |", *(uint8_t *)var);
+ tlen += taosEncodeFixedU8(&pDataBuf, *(uint8_t *)var);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ printf(" %15d |", *(int8_t *)var);
+ tlen += taosEncodeFixedI8(&pDataBuf, *(int8_t *)var);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ printf(" %15d |", *(int16_t *)var);
+ tlen += taosEncodeFixedI16(&pDataBuf, *(int16_t *)var);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ printf(" %15d |", *(uint16_t *)var);
+ tlen += taosEncodeFixedU16(&pDataBuf, *(uint16_t *)var);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ printf(" %15d |", *(int32_t *)var);
+ tlen += taosEncodeFixedI32(&pDataBuf, *(int32_t *)var);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ printf(" %15f |", *(float *)var);
+ tlen += taosEncodeBinary(&pDataBuf, var, sizeof(float));
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ printf(" %15u |", *(uint32_t *)var);
+ tlen += taosEncodeFixedU32(&pDataBuf, *(uint32_t *)var);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ printf(" %15ld |", *(int64_t *)var);
+ tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ printf(" %15lf |", *(double *)var);
+ tlen += taosEncodeBinary(&pDataBuf, var, sizeof(double));
+ case TSDB_DATA_TYPE_UBIGINT:
+ printf(" %15lu |", *(uint64_t *)var);
+ tlen += taosEncodeFixedU64(&pDataBuf, *(uint64_t *)var);
+ break;
+ case TSDB_DATA_TYPE_NCHAR: {
+ char tmpChar[100] = {0};
+ strncpy(tmpChar, varDataVal(var), varDataLen(var));
+ printf(" %s |", tmpChar);
+ tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
+ break;
+ }
+ case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
+ char tmpChar[100] = {0};
+ strncpy(tmpChar, varDataVal(var), varDataLen(var));
+ printf(" %s |", tmpChar);
+ tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
+ break;
+ }
+ case TSDB_DATA_TYPE_VARBINARY:
+ // TODO: add binary/varbinary
+ TASSERT(0);
+ default:
+ printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
+ TASSERT(0);
+ break;
+ }
+ }
+ printf("\n");
+ // if ((tlen > 0) && (skey != TSKEY_INITIAL_VAL)) {
+ if (tlen > 0) {
+ int32_t fid = (int32_t)(TSDB_KEY_FID(skey, minutePerFile, pCfg->precision));
+
+ // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index
+ // file
+ // - Set and open the DFile or the B+Tree file
+ // TODO: tsdbStartTSmaCommit();
+ if (fid != tSmaH.dFile.fid) {
+ if (tSmaH.dFile.fid != SMA_IVLD_FID) {
+ tdSmaEndCommit(pEnv);
+ smaCloseDBF(&tSmaH.dFile);
+ }
+ tdSetTSmaDataFile(&tSmaH, indexUid, fid);
+ smaDebug("@@@ vgId:%d write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32
+ " queryKey:%" PRIi64,
+ SMA_VID(pSma), tSmaH.dFile.path, minutePerFile, tSmaH.interval, storageLevel, testSkey);
+ if (smaOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) {
+ smaWarn("vgId:%d open DB file %s failed since %s", SMA_VID(pSma),
+ tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno));
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ tdSmaBeginCommit(pEnv);
+ }
+
+ if (tdInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) {
+ smaWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64
+ " since %s",
+ SMA_VID(pSma), indexUid, skey, groupId, tstrerror(terrno));
+ tdSmaEndCommit(pEnv);
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+
+ smaDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64,
+ SMA_VID(pSma), indexUid, skey, groupId);
+ // TODO:tsdbEndTSmaCommit();
+
+ // Step 3: reset the SSmaStat
+ tdResetExpiredWindow(pSma, pStat, indexUid, skey);
+ } else {
+ smaWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64,
+ SMA_VID(pSma), skey, tlen, indexUid);
+ }
+ }
+ }
+ tdSmaEndCommit(pEnv); // TODO: not commit for every insert
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((code = tdDropTSmaDataImpl(pSma, indexUid)) < 0) {
+ smaWarn("vgId:%d drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ }
+ return code;
+}
+
+/**
+ * @brief Insert TSma data blocks to DB File build by B+Tree
+ *
+ * @param pSmaH
+ * @param smaKey tableUid-colId-skeyOfWindow(8-2-8)
+ * @param keyLen
+ * @param pData
+ * @param dataLen
+ * @return int32_t
+ */
+static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
+ TXN *txn) {
+ SDBFile *pDBFile = &pSmaH->dFile;
+
+ // TODO: insert tsma data blocks into B+Tree(TTB)
+ if (smaSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) {
+ smaWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail",
+ SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
+ return TSDB_CODE_FAILED;
+ }
+ smaDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed",
+ SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
+
+#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
+ uint32_t valueSize = 0;
+ void *data = tdGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize);
+ ASSERT(data != NULL);
+ for (uint32_t v = 0; v < valueSize; v += 8) {
+ smaWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v));
+ }
+#endif
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief When sma data received from stream computing, make the relative expired window valid.
+ *
+ * @param pSma
+ * @param pStat
+ * @param indexUid
+ * @param skey
+ * @return int32_t
+ */
+static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey) {
+ SSmaStatItem *pItem = NULL;
+
+ tdRefSmaStat(pSma, pStat);
+
+ if (pStat && SMA_STAT_ITEMS(pStat)) {
+ pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
+ }
+ if ((pItem) && ((pItem = *(SSmaStatItem **)pItem))) {
+ // pItem resides in hash buffer all the time unless drop sma index
+ // TODO: multithread protect
+ if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) {
+ // error handling
+ tdUnRefSmaStat(pSma, pStat);
+ smaWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey,
+ indexUid);
+ return TSDB_CODE_FAILED;
+ }
+ smaDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma),
+ skey, indexUid);
+ // TODO: use a standalone interface to received state upate notification from stream computing module.
+ /**
+ * @brief state
+ * - When SMA env init in TSDB, its status is TSDB_SMA_STAT_OK.
+ * - In startup phase of stream computing module, it should notify the SMA env in TSDB to expired if needed(e.g.
+ * when batch data caculation not finised)
+ * - When TSDB_SMA_STAT_OK, the stream computing module should also notify that to the SMA env in TSDB.
+ */
+ pItem->state = TSDB_SMA_STAT_OK;
+ } else {
+ // error handling
+ tdUnRefSmaStat(pSma, pStat);
+ smaWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid);
+ return TSDB_CODE_FAILED;
+ }
+
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Drop tSma data and local cache
+ * - insert/query reference
+ * @param pSma
+ * @param msg
+ * @return int32_t
+ */
+static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) {
+ SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma));
+
+ // clear local cache
+ if (pEnv) {
+ smaDebug("vgId:%d drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid);
+
+ SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
+ if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) {
+ if (tdSmaStatIsDropped(pItem)) {
+ smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
+ }
+
+ tdWLockSmaEnv(pEnv);
+ if (tdSmaStatIsDropped(pItem)) {
+ tdUnLockSmaEnv(pEnv);
+ smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
+ }
+ tdSmaStatSetDropped(pItem);
+ tdUnLockSmaEnv(pEnv);
+
+ int32_t nSleep = 0;
+ int32_t refVal = INT32_MAX;
+ while (true) {
+ if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) {
+ smaDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ break;
+ }
+ smaDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ taosSsleep(1);
+ if (++nSleep > SMA_DROP_EXPIRED_TIME) {
+ smaDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal);
+ break;
+ };
+ }
+
+ tdFreeSmaStatItem(pItem);
+ smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid);
+ }
+ }
+ // clear sma data files
+ // TODO:
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief
+ *
+ * @param pSma Return the data between queryWin and fill the pData.
+ * @param pData
+ * @param indexUid
+ * @param pQuerySKey
+ * @param nMaxResult The query invoker should control the nMaxResult need to return to avoid OOM.
+ * @return int32_t
+ */
+int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
+ SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma));
+ SSmaStat *pStat = NULL;
+
+ if (!pEnv) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ pStat = SMA_ENV_STAT(pEnv);
+
+ tdRefSmaStat(pSma, pStat);
+ SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
+ if (!pItem || !(pItem = *(SSmaStatItem **)pItem)) {
+ // Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if
+ // it's NULL.
+ tdUnRefSmaStat(pSma, pStat);
+ terrno = TSDB_CODE_TDB_INVALID_ACTION;
+ smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_FAILED;
+ }
+
+#if 0
+ int32_t nQueryWin = taosArrayGetSize(pQuerySKey);
+ for (int32_t n = 0; n < nQueryWin; ++n) {
+ TSKEY skey = taosArrayGet(pQuerySKey, n);
+ if (taosHashGet(pItem->expiredWindows, &skey, sizeof(TSKEY))) {
+ // TODO: mark this window as expired.
+ }
+ }
+#endif
+
+#if 1
+ int8_t smaStat = 0;
+ if (!tdSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query
+ tdUnRefSmaStat(pSma, pStat);
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ smaWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid,
+ tstrerror(terrno), smaStat);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) {
+ // TODO: mark this window as expired.
+ smaDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
+ indexUid);
+ } else {
+ smaDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
+ indexUid);
+ }
+
+ STSma *pTSma = pItem->pTSma;
+#endif
+
+#if 1
+ STSmaReadH tReadH = {0};
+ tdInitTSmaReadH(&tReadH, pSma, pTSma->interval, pTSma->intervalUnit);
+ smaCloseDBF(&tReadH.dFile);
+
+ tdUnRefSmaStat(pSma, pStat);
+
+ tdInitTSmaFile(&tReadH, indexUid, querySKey);
+ smaDebug("### vgId:%d read from DBF %s days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi8 " queryKey:%" PRIi64,
+ SMA_VID(pSma), tReadH.dFile.path, tReadH.days, tReadH.interval, tReadH.storageLevel, querySKey);
+ if (smaOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) {
+ smaWarn("vgId:%d open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno));
+ return TSDB_CODE_FAILED;
+ }
+
+ char smaKey[SMA_KEY_LEN] = {0};
+ void *pSmaKey = &smaKey;
+ int64_t queryGroupId = 0;
+ tdEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey);
+
+ smaDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path,
+ *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN);
+
+ void *result = NULL;
+ int32_t valueSize = 0;
+ if (!(result = smaGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) {
+ smaWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s",
+ SMA_VID(pSma), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno));
+ smaCloseDBF(&tReadH.dFile);
+ return TSDB_CODE_FAILED;
+ }
+#endif
+
+#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
+ for (uint32_t v = 0; v < valueSize; v += 8) {
+ smaWarn("vgId:%d get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v));
+ }
+#endif
+ taosMemoryFreeClear(result); // TODO: fill the result to output
+
+#if 0
+ int32_t nResult = 0;
+ int64_t lastKey = 0;
+
+ while (true) {
+ if (nResult >= nMaxResult) {
+ break;
+ }
+
+ // set and open the file according to the STSma param
+ if (tdSetAndOpenTSmaFile(&tReadH, queryWin)) {
+ char bTree[100] = "\0";
+ while (strncmp(bTree, "has more nodes", 100) == 0) {
+ if (nResult >= nMaxResult) {
+ break;
+ }
+ // tdGetDataFromBTree(bTree, queryWin, lastKey)
+ // fill the pData
+ ++nResult;
+ }
+ }
+ }
+#endif
+ // read data from file and fill the result
+ smaCloseDBF(&tReadH.dFile);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
+ SSmaCfg *pCfg = (SSmaCfg *)pMsg;
+
+ if (metaCreateTSma(SMA_META(pSma), version, pCfg) < 0) {
+ return -1;
+ }
+
+ tdTSmaAdd(pSma, 1);
+ return 0;
+}
+
+int32_t tdDropTSma(SSma *pSma, char *pMsg) {
+#if 0
+ SVDropTSmaReq vDropSmaReq = {0};
+ if (!tDeserializeSVDropTSmaReq(pMsg, &vDropSmaReq)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ // TODO: send msg to stream computing to drop tSma
+ // if ((send msg to stream computing) < 0) {
+ // tDestroyTSma(&vCreateSmaReq);
+ // return -1;
+ // }
+ //
+
+ if (metaDropTSma(SMA_META(pSma), vDropSmaReq.indexUid) < 0) {
+ // TODO: handle error
+ return -1;
+ }
+
+ if (tdDropTSmaData(pSma, vDropSmaReq.indexUid) < 0) {
+ // TODO: handle error
+ return -1;
+ }
+
+ tdTSmaSub(pSma, 1);
+#endif
+
+ // TODO: return directly or go on follow steps?
+ return TSDB_CODE_SUCCESS;
+}
+
+static SSmaStatItem *tdNewSmaStatItem(int8_t state) {
+ SSmaStatItem *pItem = NULL;
+
+ pItem = (SSmaStatItem *)taosMemoryCalloc(1, sizeof(SSmaStatItem));
+ if (!pItem) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pItem->state = state;
+ pItem->expiredWindows = taosHashInit(SMA_STATE_ITEM_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP),
+ true, HASH_ENTRY_LOCK);
+ if (!pItem->expiredWindows) {
+ taosMemoryFreeClear(pItem);
+ return NULL;
+ }
+
+ return pItem;
+}
+
+static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey,
+ int64_t version) {
+ SSmaStatItem *pItem = taosHashGet(pItemsHash, &indexUid, sizeof(indexUid));
+ if (!pItem) {
+ // TODO: use TSDB_SMA_STAT_EXPIRED and update by stream computing later
+ pItem = tdNewSmaStatItem(TSDB_SMA_STAT_OK); // TODO use the real state
+ if (!pItem) {
+ // Response to stream computing: OOM
+ // For query, if the indexUid not found, the TSDB should tell query module to query raw TS data.
+ return TSDB_CODE_FAILED;
+ }
+
+ // cache smaMeta
+ STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid);
+ if (!pTSma) {
+ terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META;
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFree(pItem);
+ smaWarn("vgId:%d set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
+ indexUid, tstrerror(terrno));
+ return TSDB_CODE_FAILED;
+ }
+ pItem->pTSma = pTSma;
+
+ if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) {
+ // If error occurs during put smaStatItem, free the resources of pItem
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFree(pItem);
+ return TSDB_CODE_FAILED;
+ }
+ } else if (!(pItem = *(SSmaStatItem **)pItem)) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (taosHashPut(pItem->expiredWindows, &winSKey, sizeof(TSKEY), &version, sizeof(version)) != 0) {
+ // If error occurs during taosHashPut expired windows, remove the smaIndex from pSma->pSmaStat, thus TSDB would
+ // tell query module to query raw TS data.
+ // N.B.
+ // 1) It is assumed to be extemely little probability event of fail to taosHashPut.
+ // 2) This would solve the inconsistency to some extent, but not completely, unless we record all expired
+ // windows failed to put into hash table.
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFreeClear(pItem->pTSma);
+ taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid));
+ smaWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid,
+ winSKey);
+ return TSDB_CODE_FAILED;
+ }
+
+ smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid,
+ winSKey);
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Update expired window according to msg from stream computing module.
+ *
+ * @param pSma
+ * @param msg SSubmitReq
+ * @return int32_t
+ */
+int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version) {
+ // no time-range-sma, just return success
+ if (atomic_load_16(&SMA_TSMA_NUM(pSma)) <= 0) {
+ smaTrace("vgId:%d not update expire window since no tSma", SMA_VID(pSma));
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (!SMA_META(pSma)) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaError("vgId:%d update expire window failed since no meta ptr", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) < 0) {
+ smaError("vgId:%d init sma env failed since %s", SMA_VID(pSma), terrstr(terrno));
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ // Firstly, assume that tSma can only be created on super table/normal table.
+ // getActiveTimeWindow
+
+ SSmaEnv *pEnv = SMA_TSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SHashObj *pItemsHash = SMA_ENV_STAT_ITEMS(pEnv);
+
+ TASSERT(pEnv && pStat && pItemsHash);
+
+ // basic procedure
+ // TODO: optimization
+ tdRefSmaStat(pSma, pStat);
+
+ SSubmitMsgIter msgIter = {0};
+ SSubmitBlk *pBlock = NULL;
+ SInterval interval = {0};
+ TSKEY lastWinSKey = INT64_MIN;
+
+ if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ while (true) {
+ tGetSubmitMsgNext(&msgIter, &pBlock);
+ if (!pBlock) break;
+
+ STSmaWrapper *pSW = NULL;
+ STSma *pTSma = NULL;
+
+ SSubmitBlkIter blkIter = {0};
+ if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) < 0) {
+ pSW = tFreeTSmaWrapper(pSW, false);
+ break;
+ }
+
+ while (true) {
+ STSRow *row = tGetSubmitBlkNext(&blkIter);
+ if (!row) {
+ pSW = tFreeTSmaWrapper(pSW, false);
+ break;
+ }
+ if (!pSW || (pTSma && (pTSma->tableUid != msgIter.suid))) {
+ if (pSW) {
+ pSW = tFreeTSmaWrapper(pSW, false);
+ }
+ if (!(pSW = metaGetSmaInfoByTable(SMA_META(pSma), msgIter.suid, false))) {
+ break;
+ }
+ if ((pSW->number) <= 0 || !pSW->tSma) {
+ pSW = tFreeTSmaWrapper(pSW, false);
+ break;
+ }
+
+ pTSma = pSW->tSma;
+
+ interval.interval = pTSma->interval;
+ interval.intervalUnit = pTSma->intervalUnit;
+ interval.offset = pTSma->offset;
+ interval.precision = SMA_TSDB_CFG(pSma)->precision;
+ interval.sliding = pTSma->sliding;
+ interval.slidingUnit = pTSma->slidingUnit;
+ }
+
+ // TODO: process multiple tsma for one table uid
+ TSKEY winSKey = taosTimeTruncate(TD_ROW_KEY(row), &interval, interval.precision);
+
+ if (lastWinSKey != winSKey) {
+ lastWinSKey = winSKey;
+ if (tdSetExpiredWindow(pSma, pItemsHash, pTSma->indexUid, winSKey, version) < 0) {
+ pSW = tFreeTSmaWrapper(pSW, false);
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ } else {
+ smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated",
+ SMA_VID(pSma), pTSma->indexUid, winSKey);
+ }
+ }
+ }
+
+ tdUnRefSmaStat(pSma, pStat);
+
+ return TSDB_CODE_SUCCESS;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index bd48ed9b4c3cf0f91bec701e7167964b4473ad07..310b59b2e82c20bfbbb6dc5256f3393c67ae3ca3 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -55,247 +55,67 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
pTq->path = strdup(path);
pTq->pVnode = pVnode;
pTq->pWal = pWal;
- if (tdbOpen(path, 4096, 1, &pTq->pTdb) < 0) {
- ASSERT(0);
- }
- pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
+ pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
+ if (tqMetaOpen(pTq) < 0) {
+ ASSERT(0);
+ }
+
return pTq;
}
void tqClose(STQ* pTq) {
if (pTq) {
taosMemoryFreeClear(pTq->path);
- taosHashCleanup(pTq->execs);
+ taosHashCleanup(pTq->handles);
taosHashCleanup(pTq->pStreamTasks);
taosHashCleanup(pTq->pushMgr);
- tdbClose(pTq->pTdb);
+ tqMetaClose(pTq);
taosMemoryFree(pTq);
}
// TODO
}
-int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec) {
- if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeCStr(pEncoder, pExec->subKey) < 0) return -1;
- if (tEncodeI64(pEncoder, pExec->consumerId) < 0) return -1;
- if (tEncodeI32(pEncoder, pExec->epoch) < 0) return -1;
- if (tEncodeI8(pEncoder, pExec->subType) < 0) return -1;
- if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1;
- if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1;
- if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1;
- if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- if (tEncodeCStr(pEncoder, pExec->qmsg) < 0) return -1;
- // TODO encode modified exec
- }
- tEndEncode(pEncoder);
- return pEncoder->pos;
-}
-
-int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec) {
- if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeCStrTo(pDecoder, pExec->subKey) < 0) return -1;
- if (tDecodeI64(pDecoder, &pExec->consumerId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pExec->epoch) < 0) return -1;
- if (tDecodeI8(pDecoder, &pExec->subType) < 0) return -1;
- if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1;
- if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1;
- if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1;
- if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- if (tDecodeCStrAlloc(pDecoder, &pExec->qmsg) < 0) return -1;
- // TODO decode modified exec
- }
- tEndDecode(pDecoder);
- return 0;
-}
-int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
- void* pIter = NULL;
- while (1) {
- pIter = taosHashIterate(pTq->execs, pIter);
- if (pIter == NULL) break;
- STqExec* pExec = (STqExec*)pIter;
- if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- if (!isAdd) {
- int32_t sz = taosArrayGetSize(tbUidList);
- for (int32_t i = 0; i < sz; i++) {
- int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
- taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0);
- }
- }
- } else {
- for (int32_t i = 0; i < 5; i++) {
- int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
- ASSERT(code == 0);
- }
- }
- }
- return 0;
-}
-
-int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
- if (msgType != TDMT_VND_SUBMIT) return 0;
- void* pIter = NULL;
- STqExec* pExec = NULL;
- SSubmitReq* pReq = (SSubmitReq*)msg;
- int32_t workerId = 4;
- int64_t fetchOffset = ver;
-
- while (1) {
- pIter = taosHashIterate(pTq->pushMgr, pIter);
- if (pIter == NULL) break;
- pExec = *(STqExec**)pIter;
-
- taosWLockLatch(&pExec->pushHandle.lock);
-
- SRpcMsg* pMsg = atomic_load_ptr(&pExec->pushHandle.handle);
- ASSERT(pMsg);
-
- SMqDataBlkRsp rsp = {0};
- rsp.reqOffset = pExec->pushHandle.reqOffset;
- rsp.blockData = taosArrayInit(0, sizeof(void*));
- rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
-
- if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- qTaskInfo_t task = pExec->task[workerId];
- ASSERT(task);
- qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK);
- while (1) {
- SSDataBlock* pDataBlock = NULL;
- uint64_t ts = 0;
- if (qExecTask(task, &pDataBlock, &ts) < 0) {
- ASSERT(0);
- }
- if (pDataBlock == NULL) break;
-
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(pDataBlock->info.numOfCols != 0);
-
- int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock);
- void* buf = taosMemoryCalloc(1, dataStrLen);
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
- pRetrieve->useconds = ts;
- pRetrieve->precision = TSDB_DEFAULT_PRECISION;
- pRetrieve->compressed = 0;
- pRetrieve->completed = 1;
- pRetrieve->numOfRows = htonl(pDataBlock->info.rows);
-
- // TODO enable compress
- int32_t actualLen = 0;
- blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false);
- actualLen += sizeof(SRetrieveTableRsp);
- ASSERT(actualLen <= dataStrLen);
- taosArrayPush(rsp.blockDataLen, &actualLen);
- taosArrayPush(rsp.blockData, &buf);
- rsp.blockNum++;
- }
- } else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- STqReadHandle* pReader = pExec->pExecReader[workerId];
- tqReadHandleSetMsg(pReader, pReq, 0);
- while (tqNextDataBlock(pReader)) {
- SSDataBlock block = {0};
- if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
- &block.info.numOfCols) < 0) {
- ASSERT(0);
- }
- int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block);
- void* buf = taosMemoryCalloc(1, dataStrLen);
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
- /*pRetrieve->useconds = 0;*/
- pRetrieve->precision = TSDB_DEFAULT_PRECISION;
- pRetrieve->compressed = 0;
- pRetrieve->completed = 1;
- pRetrieve->numOfRows = htonl(block.info.rows);
-
- // TODO enable compress
- int32_t actualLen = 0;
- blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false);
- actualLen += sizeof(SRetrieveTableRsp);
- ASSERT(actualLen <= dataStrLen);
- taosArrayPush(rsp.blockDataLen, &actualLen);
- taosArrayPush(rsp.blockData, &buf);
- rsp.blockNum++;
- }
- } else {
- ASSERT(0);
- }
-
- if (rsp.blockNum == 0) {
- taosWUnLockLatch(&pExec->pushHandle.lock);
- continue;
- }
-
- ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
- ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
-
- rsp.rspOffset = fetchOffset;
-
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp);
- void* buf = rpcMallocCont(tlen);
- if (buf == NULL) {
- pMsg->code = -1;
- return -1;
- }
-
- ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
- ((SMqRspHead*)buf)->epoch = pExec->pushHandle.epoch;
- ((SMqRspHead*)buf)->consumerId = pExec->pushHandle.consumerId;
-
- void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqDataBlkRsp(&abuf, &rsp);
-
- SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0};
- tmsgSendRsp(&resp);
-
- atomic_store_ptr(&pExec->pushHandle.handle, NULL);
- taosWUnLockLatch(&pExec->pushHandle.lock);
-
- tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
- TD_VID(pTq->pVnode), fetchOffset, pExec->pushHandle.consumerId, pExec->pushHandle.epoch, rsp.blockNum,
- rsp.reqOffset, rsp.rspOffset);
-
- // TODO destroy
- taosArrayDestroy(rsp.blockData);
- taosArrayDestroy(rsp.blockDataLen);
+int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp) {
+ int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, pRsp);
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
}
- return 0;
-}
-
-int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
- if (msgType == TDMT_VND_SUBMIT) {
- if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0;
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ ((SMqRspHead*)buf)->epoch = pReq->epoch;
+ ((SMqRspHead*)buf)->consumerId = pReq->consumerId;
- if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) {
- // TODO handle sma error
- }
- void* data = taosMemoryMalloc(msgLen);
- if (data == NULL) {
- return -1;
- }
- memcpy(data, msg, msgLen);
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+ tEncodeSMqDataBlkRsp(&abuf, pRsp);
- tqProcessStreamTrigger(pTq, data);
- }
+ SRpcMsg resp = {
+ .info = pMsg->info,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&resp);
- return 0;
-}
+ tqDebug("vg %d from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, pRsp->reqOffset, pRsp->rspOffset);
-int tqCommit(STQ* pTq) {
- // do nothing
return 0;
}
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
- int64_t waitTime = pReq->waitTime;
+ int64_t timeout = pReq->timeout;
int32_t reqEpoch = pReq->epoch;
int64_t fetchOffset;
+ int32_t code = 0;
// get offset to fetch message
if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) {
@@ -309,12 +129,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset);
- STqExec* pExec = taosHashGet(pTq->execs, pReq->subKey, strlen(pReq->subKey));
- ASSERT(pExec);
+ STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
+ ASSERT(pHandle);
- int32_t consumerEpoch = atomic_load_32(&pExec->epoch);
+ int32_t consumerEpoch = atomic_load_32(&pHandle->epoch);
while (consumerEpoch < reqEpoch) {
- consumerEpoch = atomic_val_compare_exchange_32(&pExec->epoch, consumerEpoch, reqEpoch);
+ consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch);
}
SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048);
@@ -322,240 +142,103 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
return -1;
}
- walSetReaderCapacity(pExec->pWalReader, 2048);
+ walSetReaderCapacity(pHandle->pWalReader, 2048);
SMqDataBlkRsp rsp = {0};
rsp.reqOffset = pReq->currentOffset;
- rsp.withSchema = pExec->withSchema;
rsp.blockData = taosArrayInit(0, sizeof(void*));
rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
- rsp.blockSchema = taosArrayInit(0, sizeof(void*));
- rsp.blockTbName = taosArrayInit(0, sizeof(void*));
- int8_t withTbName = pExec->withTbName;
- if (pReq->withTbName != -1) {
- withTbName = pReq->withTbName;
+ rsp.withTbName = pReq->withTbName;
+ if (rsp.withTbName) {
+ rsp.blockTbName = taosArrayInit(0, sizeof(void*));
+ }
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ rsp.withSchema = false;
+ rsp.withTag = false;
+ } else {
+ rsp.withSchema = true;
+ rsp.withTag = false;
+ rsp.blockSchema = taosArrayInit(0, sizeof(void*));
}
- rsp.withTbName = withTbName;
while (1) {
- consumerEpoch = atomic_load_32(&pExec->epoch);
+ consumerEpoch = atomic_load_32(&pHandle->epoch);
if (consumerEpoch > reqEpoch) {
- tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d discard req epoch %d",
- consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch);
+ tqWarn("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d, discard req epoch %d",
+ consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch);
break;
}
- taosThreadMutexLock(&pExec->pWalReader->mutex);
-
- if (walFetchHead(pExec->pWalReader, fetchOffset, pHeadWithCkSum) < 0) {
- tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch,
- TD_VID(pTq->pVnode), fetchOffset);
- taosThreadMutexUnlock(&pExec->pWalReader->mutex);
+ if (tqFetchLog(pTq, pHandle, &fetchOffset, &pHeadWithCkSum) < 0) {
+ // TODO add push mgr
break;
}
- if (pHeadWithCkSum->head.msgType != TDMT_VND_SUBMIT) {
- ASSERT(walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum) == 0);
- } else {
- ASSERT(walFetchBody(pExec->pWalReader, &pHeadWithCkSum) == 0);
- }
-
SWalReadHead* pHead = &pHeadWithCkSum->head;
- taosThreadMutexUnlock(&pExec->pWalReader->mutex);
-
-#if 0
- SWalReadHead* pHead;
- if (walReadWithHandle_s(pExec->pWalReader, fetchOffset, &pHead) < 0) {
- // TODO: no more log, set timer to wait blocking time
- // if data inserted during waiting, launch query and
- // response to user
- tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch,
- TD_VID(pTq->pVnode), fetchOffset);
-
-#if 0
- // add to pushMgr
- taosWLockLatch(&pExec->pushHandle.lock);
-
- pExec->pushHandle.consumerId = consumerId;
- pExec->pushHandle.epoch = reqEpoch;
- pExec->pushHandle.reqOffset = rsp.reqOffset;
- pExec->pushHandle.skipLogNum = rsp.skipLogNum;
- pExec->pushHandle.handle = pMsg;
-
- taosWUnLockLatch(&pExec->pushHandle.lock);
-
- // TODO add timer
-
- // TODO: the pointer will always be valid?
- taosHashPut(pTq->pushMgr, &consumerId, sizeof(int64_t), &pExec, sizeof(void*));
- taosArrayDestroy(rsp.blockData);
- taosArrayDestroy(rsp.blockDataLen);
- return 0;
-#endif
-
- break;
- }
-#endif
-
tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), fetchOffset, pHead->msgType);
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- // table subscribe
- if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- qTaskInfo_t task = pExec->task[workerId];
- ASSERT(task);
- qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK);
- while (1) {
- SSDataBlock* pDataBlock = NULL;
- uint64_t ts = 0;
- if (qExecTask(task, &pDataBlock, &ts) < 0) {
- ASSERT(0);
- }
- if (pDataBlock == NULL) break;
-
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(pDataBlock->info.numOfCols != 0);
-
- int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock);
- void* buf = taosMemoryCalloc(1, dataStrLen);
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
- pRetrieve->useconds = ts;
- pRetrieve->precision = TSDB_DEFAULT_PRECISION;
- pRetrieve->compressed = 0;
- pRetrieve->completed = 1;
- pRetrieve->numOfRows = htonl(pDataBlock->info.rows);
-
- // TODO enable compress
- int32_t actualLen = 0;
- blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false);
- actualLen += sizeof(SRetrieveTableRsp);
- ASSERT(actualLen <= dataStrLen);
- taosArrayPush(rsp.blockDataLen, &actualLen);
- taosArrayPush(rsp.blockData, &buf);
-
- if (pExec->withSchema) {
- SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
- taosArrayPush(rsp.blockSchema, &pSW);
- }
-
- if (withTbName) {
- SMetaReader mr = {0};
- metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
- int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
- if (metaGetTableEntryByUid(&mr, uid) < 0) {
- ASSERT(0);
- }
- char* tbName = strdup(mr.me.name);
- taosArrayPush(rsp.blockTbName, &tbName);
- metaReaderClear(&mr);
- }
-
- rsp.blockNum++;
- }
- // db subscribe
- } else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- rsp.withSchema = 1;
- STqReadHandle* pReader = pExec->pExecReader[workerId];
- tqReadHandleSetMsg(pReader, pCont, 0);
- while (tqNextDataBlockFilterOut(pReader, pExec->pDropTbUid)) {
- SSDataBlock block = {0};
- if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
- &block.info.numOfCols) < 0) {
- if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
- ASSERT(0);
- }
- int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block);
- void* buf = taosMemoryCalloc(1, dataStrLen);
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
- /*pRetrieve->useconds = 0;*/
- pRetrieve->precision = TSDB_DEFAULT_PRECISION;
- pRetrieve->compressed = 0;
- pRetrieve->completed = 1;
- pRetrieve->numOfRows = htonl(block.info.rows);
-
- // TODO enable compress
- int32_t actualLen = 0;
- blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false);
- actualLen += sizeof(SRetrieveTableRsp);
- ASSERT(actualLen <= dataStrLen);
- taosArrayPush(rsp.blockDataLen, &actualLen);
- taosArrayPush(rsp.blockData, &buf);
- if (withTbName) {
- SMetaReader mr = {0};
- metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
- if (metaGetTableEntryByUid(&mr, block.info.uid) < 0) {
- ASSERT(0);
- }
- char* tbName = strdup(mr.me.name);
- taosArrayPush(rsp.blockTbName, &tbName);
- metaReaderClear(&mr);
- }
-
- SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
- taosArrayPush(rsp.blockSchema, &pSW);
-
- rsp.blockNum++;
- }
- } else {
- ASSERT(0);
+
+ if (tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId) < 0) {
+ /*ASSERT(0);*/
}
+ } else {
+ // TODO
+ ASSERT(0);
}
// TODO batch optimization:
// TODO continue scan until meeting batch requirement
- if (rsp.blockNum != 0) break;
- rsp.skipLogNum++;
- fetchOffset++;
+ if (rsp.blockNum > 0 /* threshold */) {
+ break;
+ } else {
+ fetchOffset++;
+ }
}
taosMemoryFree(pHeadWithCkSum);
+
ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
-
- if (rsp.blockNum != 0)
- rsp.rspOffset = fetchOffset;
- else
- rsp.rspOffset = fetchOffset - 1;
-
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp);
- void* buf = rpcMallocCont(tlen);
- if (buf == NULL) {
- pMsg->code = -1;
- return -1;
+ if (rsp.withSchema) {
+ ASSERT(taosArrayGetSize(rsp.blockSchema) == rsp.blockNum);
}
- ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
- ((SMqRspHead*)buf)->epoch = pReq->epoch;
- ((SMqRspHead*)buf)->consumerId = consumerId;
-
- void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqDataBlkRsp(&abuf, &rsp);
+ rsp.rspOffset = fetchOffset;
- SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
- tmsgSendRsp(&resp);
-
- tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
- TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset);
+ if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) {
+ code = -1;
+ }
- // TODO destroy
+ // TODO wrap in destroy func
taosArrayDestroy(rsp.blockData);
taosArrayDestroy(rsp.blockDataLen);
- taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
- taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree);
- return 0;
+ if (rsp.withSchema) {
+ taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ }
+
+ if (rsp.withTbName) {
+ taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree);
+ }
+
+ return code;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
- int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey));
+ int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
+
+ if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) {
+ ASSERT(0);
+ }
return 0;
}
@@ -564,80 +247,67 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
SMqRebVgReq req = {0};
tDecodeSMqRebVgReq(msg, &req);
// todo lock
- STqExec* pExec = taosHashGet(pTq->execs, req.subKey, strlen(req.subKey));
- if (pExec == NULL) {
+ STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey));
+ if (pHandle == NULL) {
ASSERT(req.oldConsumerId == -1);
ASSERT(req.newConsumerId != -1);
- STqExec exec = {0};
- pExec = &exec;
+ STqHandle tqHandle = {0};
+ pHandle = &tqHandle;
/*taosInitRWLatch(&pExec->lock);*/
- memcpy(pExec->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
- pExec->consumerId = req.newConsumerId;
- pExec->epoch = -1;
+ memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
+ pHandle->consumerId = req.newConsumerId;
+ pHandle->epoch = -1;
- pExec->subType = req.subType;
- pExec->withTbName = req.withTbName;
- pExec->withSchema = req.withSchema;
- pExec->withTag = req.withTag;
+ pHandle->execHandle.subType = req.subType;
- pExec->qmsg = req.qmsg;
- req.qmsg = NULL;
-
- pExec->pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
- if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
+ pHandle->pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
+ for (int32_t i = 0; i < 5; i++) {
+ pHandle->execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
+ }
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ pHandle->execHandle.exec.execCol.qmsg = req.qmsg;
+ req.qmsg = NULL;
for (int32_t i = 0; i < 5; i++) {
- pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
-
SReadHandle handle = {
- .reader = pExec->pExecReader[i],
+ .reader = pHandle->execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta,
.pMsgCb = &pTq->pVnode->msgCb,
};
- pExec->task[i] = qCreateStreamExecTaskInfo(pExec->qmsg, &handle);
- ASSERT(pExec->task[i]);
+ pHandle->execHandle.exec.execCol.task[i] =
+ qCreateStreamExecTaskInfo(pHandle->execHandle.exec.execCol.qmsg, &handle);
+ ASSERT(pHandle->execHandle.exec.execCol.task[i]);
+ }
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ pHandle->execHandle.exec.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ pHandle->execHandle.exec.execTb.suid = req.suid;
+ SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
+ tsdbGetCtbIdList(pTq->pVnode->pMeta, req.suid, tbUidList);
+ tqDebug("vg %d, tq try get suid: %ld", pTq->pVnode->config.vgId, req.suid);
+ for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
+ int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
+ tqDebug("vg %d, idx %d, uid: %ld", pTq->pVnode->config.vgId, i, tbUid);
}
- } else {
for (int32_t i = 0; i < 5; i++) {
- pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
+ tqReadHandleSetTbUidList(pHandle->execHandle.pExecReader[i], tbUidList);
}
- pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ taosArrayDestroy(tbUidList);
}
- taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec));
- return 0;
+ taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
} else {
- /*if (req.newConsumerId != -1) {*/
- /*taosWLockLatch(&pExec->lock);*/
- ASSERT(pExec->consumerId == req.oldConsumerId);
+ /*ASSERT(pExec->consumerId == req.oldConsumerId);*/
// TODO handle qmsg and exec modification
- atomic_store_32(&pExec->epoch, -1);
- atomic_store_64(&pExec->consumerId, req.newConsumerId);
- atomic_add_fetch_32(&pExec->epoch, 1);
- /*taosWUnLockLatch(&pExec->lock);*/
- return 0;
- /*} else {*/
- // TODO
- /*taosHashRemove(pTq->tqMetaNew, req.subKey, strlen(req.subKey));*/
- /*return 0;*/
- /*}*/
+ atomic_store_32(&pHandle->epoch, -1);
+ atomic_store_64(&pHandle->consumerId, req.newConsumerId);
+ atomic_add_fetch_32(&pHandle->epoch, 1);
}
-}
-void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
- const SArray* pRes = (const SArray*)data;
- SVnode* pVnode = (SVnode*)vnode;
-
- ASSERT(pTask->tbSink.pTSchema);
- SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, pVnode->config.vgId);
- /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/
- // build write msg
- SRpcMsg msg = {
- .msgType = TDMT_VND_SUBMIT,
- .pCont = pReq,
- .contLen = ntohl(pReq->length),
- };
-
- ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0);
+ if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
+ // TODO
+ }
+ return 0;
}
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) {
@@ -708,27 +378,15 @@ FAIL:
}
int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) {
- void* pIter = NULL;
- bool failed = false;
+ void* pIter = NULL;
+ bool failed = false;
+ SStreamDataSubmit* pSubmit = NULL;
- SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
+ pSubmit = streamDataSubmitNew(pReq);
if (pSubmit == NULL) {
failed = true;
- goto SET_TASK_FAIL;
- }
- pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
- if (pSubmit->dataRef == NULL) {
- failed = true;
- goto SET_TASK_FAIL;
}
- pSubmit->type = STREAM_INPUT__DATA_SUBMIT;
- /*pSubmit->sourceVer = ver;*/
- /*pSubmit->sourceVg = pTq->pVnode->config.vgId;*/
- pSubmit->data = pReq;
- *pSubmit->dataRef = 1;
-
-SET_TASK_FAIL:
while (1) {
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
if (pIter == NULL) break;
@@ -742,8 +400,12 @@ SET_TASK_FAIL:
continue;
}
- streamDataSubmitRefInc(pSubmit);
- taosWriteQitem(pTask->inputQ, pSubmit);
+ SStreamDataSubmit* pSubmitClone = streamSubmitRefClone(pSubmit);
+ if (pSubmitClone == NULL) {
+ atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
+ continue;
+ }
+ taosWriteQitem(pTask->inputQ, pSubmitClone);
int8_t execStatus = atomic_load_8(&pTask->status);
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
@@ -766,18 +428,12 @@ SET_TASK_FAIL:
}
}
- if (!failed) {
+ if (pSubmit) {
streamDataSubmitRefDec(pSubmit);
- return 0;
- } else {
- if (pSubmit) {
- if (pSubmit->dataRef) {
- taosMemoryFree(pSubmit->dataRef);
- }
- taosFreeQitem(pSubmit);
- }
- return -1;
+ taosFreeQitem(pSubmit);
}
+
+ return failed ? -1 : 0;
}
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
diff --git a/source/dnode/vnode/src/tq/tqCommit.c b/source/dnode/vnode/src/tq/tqCommit.c
index e31566f3faca14b0955b851f654247355f500630..7b116bff2e942bf1a461458ea443548e708756eb 100644
--- a/source/dnode/vnode/src/tq/tqCommit.c
+++ b/source/dnode/vnode/src/tq/tqCommit.c
@@ -14,3 +14,8 @@
*/
#include "tq.h"
+
+int tqCommit(STQ* pTq) {
+ // do nothing
+ return 0;
+}
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
new file mode 100644
index 0000000000000000000000000000000000000000..b8fec34b57f49ed732f3a2f3820ec50b367937fb
--- /dev/null
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tq.h"
+
+static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataBlkRsp* pRsp) {
+ int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
+ void* buf = taosMemoryCalloc(1, dataStrLen);
+ if (buf == NULL) return -1;
+
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
+ pRetrieve->useconds = 0;
+ pRetrieve->precision = TSDB_DEFAULT_PRECISION;
+ pRetrieve->compressed = 0;
+ pRetrieve->completed = 1;
+ pRetrieve->numOfRows = htonl(pBlock->info.rows);
+
+ // TODO enable compress
+ int32_t actualLen = 0;
+ blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false);
+ actualLen += sizeof(SRetrieveTableRsp);
+ ASSERT(actualLen <= dataStrLen);
+ taosArrayPush(pRsp->blockDataLen, &actualLen);
+ taosArrayPush(pRsp->blockData, &buf);
+ return 0;
+}
+
+static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, int32_t workerId, SMqDataBlkRsp* pRsp) {
+ SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ return 0;
+}
+
+static int32_t tqAddTbNameToRsp(const STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId) {
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
+ int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
+ if (metaGetTableEntryByUid(&mr, uid) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+ char* tbName = strdup(mr.me.name);
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ metaReaderClear(&mr);
+ return 0;
+}
+
+int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId) {
+ if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
+ qTaskInfo_t task = pExec->exec.execCol.task[workerId];
+ ASSERT(task);
+ qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
+ while (1) {
+ SSDataBlock* pDataBlock = NULL;
+ uint64_t ts = 0;
+ if (qExecTask(task, &pDataBlock, &ts) < 0) {
+ ASSERT(0);
+ }
+ if (pDataBlock == NULL) break;
+
+ ASSERT(pDataBlock->info.rows != 0);
+ ASSERT(pDataBlock->info.numOfCols != 0);
+
+ tqAddBlockDataToRsp(pDataBlock, pRsp);
+ if (pRsp->withTbName) {
+ tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
+ }
+ pRsp->blockNum++;
+ }
+ } else if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
+ pRsp->withSchema = 1;
+ STqReadHandle* pReader = pExec->pExecReader[workerId];
+ tqReadHandleSetMsg(pReader, pReq, 0);
+ while (tqNextDataBlock(pReader)) {
+ SSDataBlock block = {0};
+ if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
+ &block.info.numOfCols) < 0) {
+ if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
+ ASSERT(0);
+ }
+ tqAddBlockDataToRsp(&block, pRsp);
+ if (pRsp->withTbName) {
+ tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
+ }
+ tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
+ pRsp->blockNum++;
+ }
+ } else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
+ pRsp->withSchema = 1;
+ STqReadHandle* pReader = pExec->pExecReader[workerId];
+ tqReadHandleSetMsg(pReader, pReq, 0);
+ while (tqNextDataBlockFilterOut(pReader, pExec->exec.execDb.pFilterOutTbUid)) {
+ SSDataBlock block = {0};
+ if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
+ &block.info.numOfCols) < 0) {
+ if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
+ ASSERT(0);
+ }
+ tqAddBlockDataToRsp(&block, pRsp);
+ if (pRsp->withTbName) {
+ tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
+ }
+ tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
+ pRsp->blockNum++;
+ }
+ }
+ if (pRsp->blockNum == 0) {
+ pRsp->skipLogNum++;
+ return -1;
+ }
+ return 0;
+}
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
new file mode 100644
index 0000000000000000000000000000000000000000..9447c4007b87cd9dd256c555df1ac4eb431edaee
--- /dev/null
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#include "tdbInt.h"
+#include "tq.h"
+
+static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
+ if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
+ if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ if (tEncodeCStr(pEncoder, pHandle->execHandle.exec.execCol.qmsg) < 0) return -1;
+ }
+ tEndEncode(pEncoder);
+ return pEncoder->pos;
+}
+
+static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.exec.execCol.qmsg) < 0) return -1;
+ }
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_t kLen2) {
+ return strcmp(pKey1, pKey2);
+}
+
+int32_t tqMetaOpen(STQ* pTq) {
+ if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbOpen("handles", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) {
+ ASSERT(0);
+ }
+
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
+ ASSERT(0);
+ }
+
+ TBC* pCur;
+ if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ void* pKey;
+ int kLen;
+ void* pVal;
+ int vLen;
+
+ tdbTbcMoveToFirst(pCur);
+ SDecoder decoder;
+
+ while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
+ STqHandle handle;
+ tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
+ tDecodeSTqHandle(&decoder, &handle);
+ handle.pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
+ for (int32_t i = 0; i < 5; i++) {
+ handle.execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
+ }
+ if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ for (int32_t i = 0; i < 5; i++) {
+ SReadHandle reader = {
+ .reader = handle.execHandle.pExecReader[i],
+ .meta = pTq->pVnode->pMeta,
+ .pMsgCb = &pTq->pVnode->msgCb,
+ };
+ handle.execHandle.exec.execCol.task[i] =
+ qCreateStreamExecTaskInfo(handle.execHandle.exec.execCol.qmsg, &reader);
+ ASSERT(handle.execHandle.exec.execCol.task[i]);
+ }
+ } else {
+ handle.execHandle.exec.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ }
+ taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle));
+ }
+
+ if (tdbTxnClose(&txn) < 0) {
+ ASSERT(0);
+ }
+ return 0;
+}
+
+int32_t tqMetaClose(STQ* pTq) {
+ tdbClose(pTq->pMetaStore);
+ return 0;
+}
+
+int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
+ int32_t code;
+ int32_t vlen;
+ tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
+ ASSERT(code == 0);
+
+ void* buf = taosMemoryCalloc(1, vlen);
+ if (buf == NULL) {
+ ASSERT(0);
+ }
+
+ SEncoder encoder;
+ tEncoderInit(&encoder, buf, vlen);
+
+ if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
+ ASSERT(0);
+ }
+
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ tEncoderClear(&encoder);
+ taosMemoryFree(buf);
+ return 0;
+}
+
+int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) {
+ /*ASSERT(0);*/
+ }
+
+ if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ return 0;
+}
diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c
index 90f512611b1100bc79a6e85784ad87ebe10380c2..4d83a67579f89c24bde1c4724fdaacd1666bcfdd 100644
--- a/source/dnode/vnode/src/tq/tqOffset.c
+++ b/source/dnode/vnode/src/tq/tqOffset.c
@@ -30,7 +30,7 @@ struct STqOffsetStore {
SHashObj* pHash; // SHashObj
};
-STqOffsetStore* STqOffsetOpen(STqOffsetCfg* pCfg) {
+STqOffsetStore* tqOffsetOpen(STqOffsetCfg* pCfg) {
STqOffsetStore* pStore = taosMemoryMalloc(sizeof(STqOffsetStore));
if (pStore == NULL) {
return NULL;
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index f2f48bbc8a69a022d0fc6b8a88c5a9a55d0b4ad6..d94c3e387a09dd891825a6d6ed11b96a248d9605 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -12,3 +12,244 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
+
+#include "tq.h"
+
+void tqTmrRspFunc(void* param, void* tmrId) {
+ STqHandle* pHandle = (STqHandle*)param;
+ atomic_store_8(&pHandle->pushHandle.tmrStopped, 1);
+}
+
+static int32_t tqLoopExecFromQueue(STQ* pTq, STqHandle* pHandle, SStreamDataSubmit** ppSubmit, SMqDataBlkRsp* pRsp) {
+ SStreamDataSubmit* pSubmit = *ppSubmit;
+ while (pSubmit != NULL) {
+ ASSERT(pSubmit->ver == pHandle->pushHandle.processedVer + 1);
+ if (tqDataExec(pTq, &pHandle->execHandle, pSubmit->data, pRsp, 0) < 0) {
+ /*ASSERT(0);*/
+ }
+ // update processed
+ atomic_store_64(&pHandle->pushHandle.processedVer, pSubmit->ver);
+ streamQSetSuccess(&pHandle->pushHandle.inputQ);
+ streamDataSubmitRefDec(pSubmit);
+ if (pRsp->blockNum > 0) {
+ *ppSubmit = pSubmit;
+ return 0;
+ } else {
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ }
+ }
+ *ppSubmit = pSubmit;
+ return -1;
+}
+
+int32_t tqExecFromInputQ(STQ* pTq, STqHandle* pHandle) {
+ SMqDataBlkRsp rsp = {0};
+ // 1. guard and set status executing
+ int8_t execStatus =
+ atomic_val_compare_exchange_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ if (execStatus == TASK_STATUS__IDLE) {
+ SStreamDataSubmit* pSubmit = NULL;
+ // 2. check processedVer
+ // 2.1. if not missed, get msg from queue
+ // 2.2. if missed, scan wal
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ while (pHandle->pushHandle.processedVer <= pSubmit->ver) {
+ // read from wal
+ }
+ while (pHandle->pushHandle.processedVer > pSubmit->ver + 1) {
+ streamQSetSuccess(&pHandle->pushHandle.inputQ);
+ streamDataSubmitRefDec(pSubmit);
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ if (pSubmit == NULL) break;
+ }
+ // 3. exec, after each success, update processed ver
+ // first run
+ if (tqLoopExecFromQueue(pTq, pHandle, &pSubmit, &rsp) == 0) {
+ goto SEND_RSP;
+ }
+ // set exec status closing
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__CLOSING);
+ // second run
+ if (tqLoopExecFromQueue(pTq, pHandle, &pSubmit, &rsp) == 0) {
+ goto SEND_RSP;
+ }
+ // set exec status idle
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE);
+ }
+SEND_RSP:
+ // 4. if get result
+ // 4.1 set exec input status blocked and exec status idle
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE);
+ // 4.2 rpc send
+ rsp.rspOffset = pHandle->pushHandle.processedVer;
+ /*if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) {*/
+ /*return -1;*/
+ /*}*/
+ // 4.3 clear rpc info
+ memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo));
+ return 0;
+}
+
+int32_t tqOpenPushHandle(STQ* pTq, STqHandle* pHandle) {
+ memset(&pHandle->pushHandle, 0, sizeof(STqPushHandle));
+ pHandle->pushHandle.inputQ.queue = taosOpenQueue();
+ pHandle->pushHandle.inputQ.qall = taosAllocateQall();
+ if (pHandle->pushHandle.inputQ.queue == NULL || pHandle->pushHandle.inputQ.qall == NULL) {
+ if (pHandle->pushHandle.inputQ.queue) {
+ taosCloseQueue(pHandle->pushHandle.inputQ.queue);
+ }
+ if (pHandle->pushHandle.inputQ.qall) {
+ taosFreeQall(pHandle->pushHandle.inputQ.qall);
+ }
+ return -1;
+ }
+ return 0;
+}
+
+int32_t tqPreparePush(STQ* pTq, STqHandle* pHandle, int64_t reqId, const SRpcHandleInfo* pInfo, int64_t processedVer,
+ int64_t timeout) {
+ memcpy(&pHandle->pushHandle.rpcInfo, pInfo, sizeof(SRpcHandleInfo));
+ atomic_store_64(&pHandle->pushHandle.reqId, reqId);
+ atomic_store_64(&pHandle->pushHandle.processedVer, processedVer);
+ atomic_store_8(&pHandle->pushHandle.inputStatus, TASK_INPUT_STATUS__NORMAL);
+ atomic_store_8(&pHandle->pushHandle.tmrStopped, 0);
+ taosTmrReset(tqTmrRspFunc, (int32_t)timeout, pHandle, tqMgmt.timer, &pHandle->pushHandle.timerId);
+ return 0;
+}
+
+int32_t tqEnqueue(STqHandle* pHandle, SStreamDataSubmit* pSubmit) {
+ int8_t inputStatus = atomic_load_8(&pHandle->pushHandle.inputStatus);
+ if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
+ SStreamDataSubmit* pSubmitClone = streamSubmitRefClone(pSubmit);
+ if (pSubmitClone == NULL) {
+ return -1;
+ }
+ taosWriteQitem(pHandle->pushHandle.inputQ.queue, pSubmitClone);
+ return 0;
+ }
+ return -1;
+}
+
+int32_t tqSendExecReq(STQ* pTq, STqHandle* pHandle) {
+ //
+ return 0;
+}
+
+int32_t tqEnqueueAll(STQ* pTq, SSubmitReq* pReq) {
+ void* pIter = NULL;
+ SStreamDataSubmit* pSubmit = streamDataSubmitNew(pReq);
+ if (pSubmit == NULL) {
+ return -1;
+ }
+
+ while (1) {
+ pIter = taosHashIterate(pTq->handles, pIter);
+ if (pIter == NULL) break;
+ STqHandle* pHandle = (STqHandle*)pIter;
+ if (tqEnqueue(pHandle, pSubmit) < 0) {
+ continue;
+ }
+ int8_t execStatus = atomic_load_8(&pHandle->pushHandle.execStatus);
+ if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
+ tqSendExecReq(pTq, pHandle);
+ }
+ }
+
+ streamDataSubmitRefDec(pSubmit);
+
+ return 0;
+}
+
+int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
+ if (msgType != TDMT_VND_SUBMIT) return 0;
+ void* pIter = NULL;
+ STqHandle* pHandle = NULL;
+ SSubmitReq* pReq = (SSubmitReq*)msg;
+ int32_t workerId = 4;
+ int64_t fetchOffset = ver;
+
+ while (1) {
+ pIter = taosHashIterate(pTq->pushMgr, pIter);
+ if (pIter == NULL) break;
+ pHandle = *(STqHandle**)pIter;
+
+ taosWLockLatch(&pHandle->pushHandle.lock);
+
+ SMqDataBlkRsp rsp = {0};
+ rsp.reqOffset = pHandle->pushHandle.reqOffset;
+ rsp.blockData = taosArrayInit(0, sizeof(void*));
+ rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
+
+ if (msgType == TDMT_VND_SUBMIT) {
+ tqDataExec(pTq, &pHandle->execHandle, pReq, &rsp, workerId);
+ } else {
+ // TODO
+ ASSERT(0);
+ }
+
+ if (rsp.blockNum == 0) {
+ taosWUnLockLatch(&pHandle->pushHandle.lock);
+ continue;
+ }
+
+ ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
+ ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
+
+ rsp.rspOffset = fetchOffset;
+
+ int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp);
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ // todo free
+ return -1;
+ }
+
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ ((SMqRspHead*)buf)->epoch = pHandle->pushHandle.epoch;
+ ((SMqRspHead*)buf)->consumerId = pHandle->pushHandle.consumerId;
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+ tEncodeSMqDataBlkRsp(&abuf, &rsp);
+
+ SRpcMsg resp = {
+ .info = pHandle->pushHandle.rpcInfo,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&resp);
+
+ memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo));
+ taosWUnLockLatch(&pHandle->pushHandle.lock);
+
+ tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
+ TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum,
+ rsp.reqOffset, rsp.rspOffset);
+
+ // TODO destroy
+ taosArrayDestroy(rsp.blockData);
+ taosArrayDestroy(rsp.blockDataLen);
+ }
+
+ return 0;
+}
+
+int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
+ if (msgType == TDMT_VND_SUBMIT) {
+ if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0;
+
+ if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) {
+ // TODO handle sma error
+ }
+ void* data = taosMemoryMalloc(msgLen);
+ if (data == NULL) {
+ return -1;
+ }
+ memcpy(data, msg, msgLen);
+
+ tqProcessStreamTrigger(pTq, data);
+ }
+
+ return 0;
+}
+
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index be8d786de2e7c015f938e87431129f5b2d067d00..8909a00c72faf0e7ea9df06819c571af28921da8 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -15,6 +15,48 @@
#include "tq.h"
+int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** ppHeadWithCkSum) {
+ int32_t code = 0;
+ taosThreadMutexLock(&pHandle->pWalReader->mutex);
+ int64_t offset = *fetchOffset;
+
+ while (1) {
+ if (walFetchHead(pHandle->pWalReader, offset, *ppHeadWithCkSum) < 0) {
+ tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", pHandle->consumerId,
+ pHandle->epoch, TD_VID(pTq->pVnode), offset);
+ *fetchOffset = offset - 1;
+ code = -1;
+ goto END;
+ }
+
+ if ((*ppHeadWithCkSum)->head.msgType == TDMT_VND_SUBMIT) {
+ code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum);
+
+ if (code < 0) {
+ ASSERT(0);
+ *fetchOffset = offset;
+ code = -1;
+ goto END;
+ }
+ *fetchOffset = offset;
+ code = 0;
+ goto END;
+ } else {
+ code = walSkipFetchBody(pHandle->pWalReader, *ppHeadWithCkSum);
+ if (code < 0) {
+ ASSERT(0);
+ *fetchOffset = offset;
+ code = -1;
+ goto END;
+ }
+ offset++;
+ }
+ }
+END:
+ taosThreadMutexUnlock(&pHandle->pWalReader->mutex);
+ return code;
+}
+
STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
STqReadHandle* pReadHandle = taosMemoryMalloc(sizeof(STqReadHandle));
if (pReadHandle == NULL) {
@@ -24,7 +66,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
pReadHandle->pMsg = NULL;
pReadHandle->ver = -1;
pReadHandle->pColIdList = NULL;
- pReadHandle->sver = -1;
+ pReadHandle->cachedSchemaVer = -1;
pReadHandle->cachedSchemaUid = -1;
pReadHandle->pSchema = NULL;
pReadHandle->pSchemaWrapper = NULL;
@@ -83,16 +125,16 @@ bool tqNextDataBlockFilterOut(STqReadHandle* pHandle, SHashObj* filterOutUids) {
int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid,
int32_t* pNumOfRows, int16_t* pNumOfCols) {
- /*int32_t sversion = pHandle->pBlock->sversion;*/
- // TODO set to real sversion
*pUid = 0;
- int32_t sversion = 1;
- if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) {
+ // TODO set to real sversion
+ /*int32_t sversion = 1;*/
+ int32_t sversion = htonl(pHandle->pBlock->sversion);
+ if (pHandle->cachedSchemaVer != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) {
pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion);
if (pHandle->pSchema == NULL) {
tqWarn("cannot found tsschema for table: uid: %ld (suid: %ld), version %d, possibly dropped table",
- pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->sver);
+ pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->cachedSchemaVer);
/*ASSERT(0);*/
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
@@ -102,12 +144,12 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p
pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true);
if (pHandle->pSchemaWrapper == NULL) {
tqWarn("cannot found schema wrapper for table: suid: %ld, version %d, possibly dropped table",
- pHandle->msgIter.suid, pHandle->sver);
+ pHandle->msgIter.suid, pHandle->cachedSchemaVer);
/*ASSERT(0);*/
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
}
- pHandle->sver = sversion;
+ pHandle->cachedSchemaVer = sversion;
pHandle->cachedSchemaUid = pHandle->msgIter.suid;
}
@@ -256,3 +298,38 @@ int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList)
return 0;
}
+
+int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
+ void* pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pTq->handles, pIter);
+ if (pIter == NULL) break;
+ STqHandle* pExec = (STqHandle*)pIter;
+ if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ for (int32_t i = 0; i < 5; i++) {
+ int32_t code = qUpdateQualifiedTableId(pExec->execHandle.exec.execCol.task[i], tbUidList, isAdd);
+ ASSERT(code == 0);
+ }
+ } else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ if (!isAdd) {
+ int32_t sz = taosArrayGetSize(tbUidList);
+ for (int32_t i = 0; i < sz; i++) {
+ int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
+ taosHashPut(pExec->execHandle.exec.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
+ }
+ }
+ } else {
+ // tq update id
+ }
+ }
+ while (1) {
+ pIter = taosHashIterate(pTq->pStreamTasks, pIter);
+ if (pIter == NULL) break;
+ SStreamTask* pTask = (SStreamTask*)pIter;
+ if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
+ int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
+ ASSERT(code == 0);
+ }
+ }
+ return 0;
+}
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
new file mode 100644
index 0000000000000000000000000000000000000000..5c0bf971fb8702ffbb73ed92feb8c97d1f4032d1
--- /dev/null
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tq.h"
+
+void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
+ const SArray* pRes = (const SArray*)data;
+ SVnode* pVnode = (SVnode*)vnode;
+
+ ASSERT(pTask->tbSink.pTSchema);
+ SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
+ pTask->tbSink.stbFullName, pVnode->config.vgId);
+ /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/
+ // build write msg
+ SRpcMsg msg = {
+ .msgType = TDMT_VND_SUBMIT,
+ .pCont = pReq,
+ .contLen = ntohl(pReq->length),
+ };
+
+ ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0);
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index 93ec6028f86e10dfcf91db7a79ed03e64d2f55db..3990fd7165aa87d82b1ed6b06e12aa47e02f1f32 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -84,8 +84,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
static void tsdbResetCommitTable(SCommitH *pCommith);
static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError);
static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo);
-static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
- TSKEY maxKey, int maxRows, int8_t update);
+static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter,
+ SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update);
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf);
int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) {
@@ -108,7 +108,7 @@ int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) {
tsdbInitDFileSet(pRepo, &nSet, did, pSet->fid, FS_TXN_VERSION(pfs));
if (tsdbCopyDFileSet(pSet, &nSet) < 0) {
- tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid,
+ tsdbError("vgId:%d, failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno));
return -1;
}
@@ -117,7 +117,7 @@ int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) {
return -1;
}
- tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid,
+ tsdbInfo("vgId:%d, FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id);
} else {
// On a correct level
@@ -158,7 +158,7 @@ int tsdbCommit(STsdb *pRepo) {
tsdbSeekCommitIter(&commith, commith.rtn.minKey);
while ((pSet = tsdbFSIterNext(&(commith.fsIter)))) {
if (pSet->fid < commith.rtn.minFid) {
- tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
+ tsdbInfo("vgId:%d, FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
} else {
break;
@@ -224,23 +224,23 @@ void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn) {
pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->days, pCfg->precision));
pRtn->midFid = (int)(TSDB_KEY_FID(midKey, pCfg->days, pCfg->precision));
pRtn->maxFid = (int)(TSDB_KEY_FID(maxKey, pCfg->days, pCfg->precision));
- tsdbDebug("vgId:%d now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey,
+ tsdbDebug("vgId:%d, now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey,
pRtn->minFid, pRtn->midFid, pRtn->maxFid);
}
static void tsdbStartCommit(STsdb *pRepo) {
STsdbMemTable *pMem = pRepo->imem;
- tsdbInfo("vgId:%d start to commit", REPO_ID(pRepo));
+ tsdbInfo("vgId:%d, start to commit", REPO_ID(pRepo));
tsdbStartFSTxn(pRepo, 0, 0);
}
static void tsdbEndCommit(STsdb *pTsdb, int eno) {
tsdbEndFSTxn(pTsdb);
- tsdbMemTableDestroy(pTsdb, pTsdb->imem);
+ tsdbMemTableDestroy(pTsdb->imem);
pTsdb->imem = NULL;
- tsdbInfo("vgId:%d commit over, %s", REPO_ID(pTsdb), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed");
+ tsdbInfo("vgId:%d, commit over, %s", REPO_ID(pTsdb), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed");
}
static int tsdbInitCommitH(SCommitH *pCommith, STsdb *pRepo) {
@@ -301,7 +301,8 @@ static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key) {
SCommitIter *pIter = pCommith->iters + i;
if (pIter->pTable == NULL || pIter->pIter == NULL) continue;
- tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, key - 1, INT32_MAX, NULL, NULL, 0, true, NULL);
+ tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, key - 1, INT32_MAX, NULL, NULL, 0,
+ true, NULL);
}
}
@@ -412,7 +413,7 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
if (tsdbWriteBlockIdx(TSDB_COMMIT_HEAD_FILE(pCommith), pCommith->aBlkIdx, (void **)(&(TSDB_COMMIT_BUF(pCommith)))) <
0) {
- tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
+ tsdbError("vgId:%d, failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
tsdbCloseCommitFile(pCommith, true);
// revert the file change
tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet);
@@ -420,7 +421,7 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
}
if (tsdbUpdateDFileSetHeader(&(pCommith->wSet)) < 0) {
- tsdbError("vgId:%d failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
+ tsdbError("vgId:%d, failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
tsdbCloseCommitFile(pCommith, true);
// revert the file change
tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet);
@@ -465,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pTbData = (STbData *)pNode->pData;
pCommitIter = pCommith->iters + i;
- pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version
+ pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1);
if (pTSchema) {
pCommitIter->pIter = tSkipListCreateIter(pTbData->pData);
@@ -474,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable));
pCommitIter->pTable->uid = pTbData->uid;
pCommitIter->pTable->tid = pTbData->uid;
- pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0);
+ pCommitIter->pTable->pSchema = pTSchema;
+ pCommitIter->pTable->pCacheSchema = NULL;
}
}
tSkipListDestroyIter(pSlIter);
@@ -489,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) {
tSkipListDestroyIter(pCommith->iters[i].pIter);
if (pCommith->iters[i].pTable) {
tdFreeSchema(pCommith->iters[i].pTable->pSchema);
+ tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema);
taosMemoryFreeClear(pCommith->iters[i].pTable);
}
}
@@ -528,7 +531,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
return -1;
}
- tsdbDebug("vgId:%d FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet),
+ tsdbDebug("vgId:%d, FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet),
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
} else {
pCommith->isRFileSet = false;
@@ -540,7 +543,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
tsdbInitDFileSet(pRepo, pWSet, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)));
if (tsdbCreateDFileSet(pRepo, pWSet, true) < 0) {
- tsdbError("vgId:%d failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo),
+ tsdbError("vgId:%d, failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo),
TSDB_FSET_FID(pWSet), TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet), tstrerror(terrno));
if (pCommith->isRFileSet) {
tsdbCloseAndUnsetFSet(&(pCommith->readh));
@@ -551,7 +554,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
pCommith->isDFileSame = false;
pCommith->isLFileSame = false;
- tsdbDebug("vgId:%d FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet),
+ tsdbDebug("vgId:%d, FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet),
TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet));
} else {
did.level = TSDB_FSET_LEVEL(pSet);
@@ -564,7 +567,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
SDFile *pWHeadf = TSDB_COMMIT_HEAD_FILE(pCommith);
tsdbInitDFile(pRepo, pWHeadf, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_HEAD);
if (tsdbCreateDFile(pRepo, pWHeadf, true, TSDB_FILE_HEAD) < 0) {
- tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf),
+ tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf),
tstrerror(terrno));
if (pCommith->isRFileSet) {
@@ -579,7 +582,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
tsdbInitDFileEx(pWDataf, pRDataf);
// if (tsdbOpenDFile(pWDataf, O_WRONLY) < 0) {
if (tsdbOpenDFile(pWDataf, TD_FILE_WRITE) < 0) {
- tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf),
+ tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -600,7 +603,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
// if (tsdbOpenDFile(pWLastf, O_WRONLY) < 0) {
if (tsdbOpenDFile(pWLastf, TD_FILE_WRITE) < 0) {
- tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
+ tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -615,7 +618,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
pCommith->isLFileSame = false;
if (tsdbCreateDFile(pRepo, pWLastf, true, TSDB_FILE_LAST) < 0) {
- tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
+ tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -632,11 +635,11 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
SDFile *pWSmadF = TSDB_COMMIT_SMAD_FILE(pCommith);
if (!taosCheckExistFile(TSDB_FILE_FULL_NAME(pRSmadF))) {
- tsdbDebug("vgId:%d create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmadF));
+ tsdbDebug("vgId:%d, create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmadF));
tsdbInitDFile(pRepo, pWSmadF, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_SMAD);
if (tsdbCreateDFile(pRepo, pWSmadF, true, TSDB_FILE_SMAD) < 0) {
- tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF),
+ tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -649,7 +652,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
} else {
tsdbInitDFileEx(pWSmadF, pRSmadF);
if (tsdbOpenDFile(pWSmadF, O_RDWR) < 0) {
- tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF),
+ tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -668,7 +671,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
if ((pCommith->isLFileSame) && taosCheckExistFile(TSDB_FILE_FULL_NAME(pRSmalF))) {
tsdbInitDFileEx(pWSmalF, pRSmalF);
if (tsdbOpenDFile(pWSmalF, O_RDWR) < 0) {
- tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF),
+ tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -679,11 +682,11 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
}
}
} else {
- tsdbDebug("vgId:%d create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmalF));
+ tsdbDebug("vgId:%d, create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmalF));
tsdbInitDFile(pRepo, pWSmalF, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_SMAL);
if (tsdbCreateDFile(pRepo, pWSmalF, true, TSDB_FILE_SMAL) < 0) {
- tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF),
+ tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
@@ -884,7 +887,7 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) {
}
if (tsdbWriteBlockInfo(pCommith) < 0) {
- tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
+ tsdbError("vgId:%d, failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
return -1;
}
@@ -913,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
while (bidx < nBlocks) {
if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) {
// Set commit table
- pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version
+ pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version
if (!pTSchema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@@ -926,7 +929,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
}
if (tsdbMoveBlock(pCommith, bidx) < 0) {
- tsdbError("vgId:%d failed to move block into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
+ tsdbError("vgId:%d, failed to move block into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
taosMemoryFreeClear(pTSchema);
return -1;
@@ -936,7 +939,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
}
if (tsdbWriteBlockInfo(pCommith) < 0) {
- tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
+ tsdbError("vgId:%d, failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
taosMemoryFreeClear(pTSchema);
return -1;
@@ -947,7 +950,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
}
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) {
- STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
+ STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith), pTable, false, false, -1);
pCommith->pTable = pTable;
@@ -1206,7 +1209,7 @@ int tsdbWriteBlockImpl(STsdb *pRepo, STable *pTable, SDFile *pDFile, SDFile *pDF
pBlock->blkVer = SBlockVerLatest;
pBlock->aggrOffset = (uint64_t)offsetAggr;
- tsdbDebug("vgId:%d uid:%" PRId64 " a block of data is written to file %s, offset %" PRId64
+ tsdbDebug("vgId:%d, uid:%" PRId64 " a block of data is written to file %s, offset %" PRId64
" numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64,
REPO_ID(pRepo), TABLE_UID(pTable), TSDB_FILE_FULL_NAME(pDFile), offset, rowsToWrite, pBlock->len,
pBlock->numOfCols, pBlock->keyFirst, pBlock->keyLast);
@@ -1254,8 +1257,8 @@ static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLi
SBlock block;
while (true) {
- tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, defaultRows, pCommith->pDataCols, NULL, 0,
- pCfg->update, &mInfo);
+ tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, keyLimit, defaultRows,
+ pCommith->pDataCols, NULL, 0, pCfg->update, &mInfo);
if (pCommith->pDataCols->numOfRows <= 0) break;
@@ -1298,8 +1301,9 @@ static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) {
SSkipListIterator titer = *(pIter->pIter);
if (tsdbLoadBlockDataCols(&(pCommith->readh), pBlock, NULL, &colId, 1, false) < 0) return -1;
- tsdbLoadDataFromCache(pIter->pTable, &titer, keyLimit, INT32_MAX, NULL, pCommith->readh.pDCols[0]->cols[0].pData,
- pCommith->readh.pDCols[0]->numOfRows, pCfg->update, &mInfo);
+ tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, &titer, keyLimit, INT32_MAX, NULL,
+ pCommith->readh.pDCols[0]->cols[0].pData, pCommith->readh.pDCols[0]->numOfRows, pCfg->update,
+ &mInfo);
if (mInfo.nOperations == 0) {
// no new data to insert (all updates denied)
@@ -1313,9 +1317,9 @@ static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) {
*(pIter->pIter) = titer;
} else if (tsdbCanAddSubBlock(pCommith, pBlock, &mInfo)) {
// Add a sub-block
- tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, INT32_MAX, pCommith->pDataCols,
- pCommith->readh.pDCols[0]->cols[0].pData, pCommith->readh.pDCols[0]->numOfRows, pCfg->update,
- &mInfo);
+ tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, keyLimit, INT32_MAX,
+ pCommith->pDataCols, pCommith->readh.pDCols[0]->cols[0].pData,
+ pCommith->readh.pDCols[0]->numOfRows, pCfg->update, &mInfo);
if (pBlock->last) {
pDFile = TSDB_COMMIT_LAST_FILE(pCommith);
} else {
@@ -1420,8 +1424,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
int biter = 0;
while (true) {
- tsdbLoadAndMergeFromCache(pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows,
- pCfg->update);
+ tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols,
+ keyLimit, defaultRows, pCfg->update);
if (pCommith->pDataCols->numOfRows == 0) break;
@@ -1445,8 +1449,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
return 0;
}
-static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
- TSKEY maxKey, int maxRows, int8_t update) {
+static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter,
+ SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update) {
TSKEY key1 = INT64_MAX;
TSKEY key2 = INT64_MAX;
TSKEY lastKey = TSKEY_INITIAL_VAL;
@@ -1487,7 +1491,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
++(*iter);
} else if (key1 > key2) {
if (pSchema == NULL || schemaVersion(pSchema) != TD_ROW_SVER(row)) {
- pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, TD_ROW_SVER(row));
+ pSchema = tsdbGetTableSchemaImpl(pTsdb, pCommitIter->pTable, false, false, TD_ROW_SVER(row));
ASSERT(pSchema != NULL);
}
@@ -1527,7 +1531,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
if (TD_SUPPORT_UPDATE(update)) {
// copy mem data(Multi-Version)
if (pSchema == NULL || schemaVersion(pSchema) != TD_ROW_SVER(row)) {
- pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, TD_ROW_SVER(row));
+ pSchema = tsdbGetTableSchemaImpl(pTsdb, pCommitIter->pTable, false, false, TD_ROW_SVER(row));
ASSERT(pSchema != NULL);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbDelete.c b/source/dnode/vnode/src/tsdb/tsdbDelete.c
new file mode 100644
index 0000000000000000000000000000000000000000..6dea4a4e57392be988126c579648f39a8270b9bf
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbDelete.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c
index 6dfd73158ea15b3f36b23158b0de54a7a904725c..c0ca2f9594e4f30038ad2ad6636c97263805de00 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFS.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFS.c
@@ -260,7 +260,7 @@ int tsdbOpenFS(STsdb *pRepo) {
tsdbGetRtnSnap(pRepo, &pRepo->rtn);
if (taosCheckExistFile(current)) {
if (tsdbOpenFSFromCurrent(pRepo) < 0) {
- tsdbError("vgId:%d failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
@@ -271,19 +271,19 @@ int tsdbOpenFS(STsdb *pRepo) {
} else {
// should skip expired fileset inside of the function
if (tsdbRestoreCurrent(pRepo) < 0) {
- tsdbError("vgId:%d failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
}
if (tsdbScanAndTryFixFS(pRepo) < 0) {
- tsdbError("vgId:%d failed to scan and fix FS since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to scan and fix FS since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
// // Load meta cache if has meta file
// if ((!(pRepo->state & TSDB_STATE_BAD_META)) && tsdbLoadMetaCache(pRepo, true) < 0) {
- // tsdbError("vgId:%d failed to open FS while loading meta cache since %s", REPO_ID(pRepo), tstrerror(terrno));
+ // tsdbError("vgId:%d, failed to open FS while loading meta cache since %s", REPO_ID(pRepo), tstrerror(terrno));
// return -1;
// }
@@ -607,7 +607,7 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) {
// current file exists, try to recover
pFile = taosOpenFile(current, TD_FILE_READ);
if (pFile == NULL) {
- tsdbError("vgId:%d failed to open file %s since %s", REPO_ID(pRepo), current, strerror(errno));
+ tsdbError("vgId:%d, failed to open file %s since %s", REPO_ID(pRepo), current, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
@@ -618,20 +618,20 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) {
int nread = (int)taosReadFile(pFile, buffer, TSDB_FILE_HEAD_SIZE);
if (nread < 0) {
- tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILENAME_LEN, current,
+ tsdbError("vgId:%d, failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILENAME_LEN, current,
strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (nread < TSDB_FILE_HEAD_SIZE) {
- tsdbError("vgId:%d failed to read header of file %s, read bytes:%d", REPO_ID(pRepo), current, nread);
+ tsdbError("vgId:%d, failed to read header of file %s, read bytes:%d", REPO_ID(pRepo), current, nread);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
goto _err;
}
if (!taosCheckChecksumWhole((uint8_t *)buffer, TSDB_FILE_HEAD_SIZE)) {
- tsdbError("vgId:%d header of file %s failed checksum check", REPO_ID(pRepo), current);
+ tsdbError("vgId:%d, header of file %s failed checksum check", REPO_ID(pRepo), current);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
goto _err;
}
@@ -652,19 +652,19 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) {
nread = (int)taosReadFile(pFile, buffer, fsheader.len);
if (nread < 0) {
- tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), current, strerror(errno));
+ tsdbError("vgId:%d, failed to read file %s since %s", REPO_ID(pRepo), current, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (nread < fsheader.len) {
- tsdbError("vgId:%d failed to read %d bytes from file %s", REPO_ID(pRepo), fsheader.len, current);
+ tsdbError("vgId:%d, failed to read %d bytes from file %s", REPO_ID(pRepo), fsheader.len, current);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
goto _err;
}
if (!taosCheckChecksumWhole((uint8_t *)buffer, fsheader.len)) {
- tsdbError("vgId:%d file %s is corrupted since wrong checksum", REPO_ID(pRepo), current);
+ tsdbError("vgId:%d, file %s is corrupted since wrong checksum", REPO_ID(pRepo), current);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
goto _err;
}
@@ -694,7 +694,7 @@ static int tsdbScanAndTryFixFS(STsdb *pRepo) {
SFSStatus *pStatus = pfs->cstatus;
// if (tsdbScanAndTryFixMFile(pRepo) < 0) {
- // tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno));
+ // tsdbError("vgId:%d, failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno));
// return -1;
// }
@@ -704,7 +704,7 @@ static int tsdbScanAndTryFixFS(STsdb *pRepo) {
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pStatus->df, i);
if (tsdbScanAndTryFixDFileSet(pRepo, pSet) < 0) {
- tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
}
@@ -724,7 +724,7 @@ static int tsdbScanRootDir(STsdb *pRepo) {
tsdbGetRootDir(REPO_ID(pRepo), pRepo->dir, rootDir);
STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), rootDir);
if (tdir == NULL) {
- tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno));
+ tsdbError("vgId:%d, failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno));
return -1;
}
@@ -741,7 +741,7 @@ static int tsdbScanRootDir(STsdb *pRepo) {
// }
(void)tfsRemoveFile(pf);
- tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), pf->aname);
+ tsdbDebug("vgId:%d, invalid file %s is removed", REPO_ID(pRepo), pf->aname);
}
tfsClosedir(tdir);
@@ -758,7 +758,7 @@ static int tsdbScanDataDir(STsdb *pRepo) {
tsdbGetDataDir(REPO_ID(pRepo), pRepo->dir, dataDir);
STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), dataDir);
if (tdir == NULL) {
- tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno));
+ tsdbError("vgId:%d, failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno));
return -1;
}
@@ -767,7 +767,7 @@ static int tsdbScanDataDir(STsdb *pRepo) {
if (!tsdbIsTFileInFS(pfs, pf)) {
(void)tfsRemoveFile(pf);
- tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), pf->aname);
+ tsdbDebug("vgId:%d, invalid file %s is removed", REPO_ID(pRepo), pf->aname);
}
}
@@ -811,7 +811,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
fArray = taosArrayInit(1024, sizeof(STfsFile));
if (fArray == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir,
+ tsdbError("vgId:%d, failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir,
tstrerror(terrno));
regfree(®ex);
return -1;
@@ -819,7 +819,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
tdir = tfsOpendir(REPO_TFS(pRepo), dataDir);
if (tdir == NULL) {
- tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir,
+ tsdbError("vgId:%d, failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir,
tstrerror(terrno));
taosArrayDestroy(fArray);
regfree(®ex);
@@ -840,12 +840,12 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
}
} else if (code == REG_NOMATCH) {
// Not match
- tsdbInfo("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), pf->aname);
+ tsdbInfo("vgId:%d, invalid file %s exists, remove it", REPO_ID(pRepo), pf->aname);
(void)tfsRemoveFile(pf);
continue;
} else {
// Has other error
- tsdbError("vgId:%d failed to restore DFileSet Array while run regexec since %s", REPO_ID(pRepo), strerror(code));
+ tsdbError("vgId:%d, failed to restore DFileSet Array while run regexec since %s", REPO_ID(pRepo), strerror(code));
terrno = TAOS_SYSTEM_ERROR(code);
tfsClosedir(tdir);
taosArrayDestroy(fArray);
@@ -876,7 +876,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
SDFile *pDFile = TSDB_DFILE_IN_SET(&fset, ftype);
if (index >= taosArrayGetSize(fArray)) {
- tsdbError("vgId:%d incomplete DFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
+ tsdbError("vgId:%d, incomplete DFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
taosArrayDestroy(fArray);
return -1;
}
@@ -902,14 +902,14 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
fset.fid = tfid;
} else {
if (tfid != fset.fid) {
- tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
+ tsdbError("vgId:%d, incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
taosArrayDestroy(fArray);
return -1;
}
}
if (ttype != ftype) {
- tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
+ tsdbError("vgId:%d, incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid);
taosArrayDestroy(fArray);
return -1;
}
@@ -918,14 +918,14 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
// if (tsdbOpenDFile(pDFile, O_RDONLY) < 0) {
if (tsdbOpenDFile(pDFile, TD_FILE_READ) < 0) {
- tsdbError("vgId:%d failed to open DFile %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
+ tsdbError("vgId:%d, failed to open DFile %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
tstrerror(terrno));
taosArrayDestroy(fArray);
return -1;
}
if (tsdbLoadDFileHeader(pDFile, &(pDFile->info)) < 0) {
- tsdbError("vgId:%d failed to load DFile %s header since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
+ tsdbError("vgId:%d, failed to load DFile %s header since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
tstrerror(terrno));
taosArrayDestroy(fArray);
return -1;
@@ -943,7 +943,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
if (pDFile->info.size != file_size) {
int64_t tfsize = pDFile->info.size;
pDFile->info.size = file_size;
- tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
+ tsdbInfo("vgId:%d, file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size);
}
}
@@ -952,7 +952,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
index++;
}
- tsdbInfo("vgId:%d FSET %d is restored", REPO_ID(pRepo), fset.fid);
+ tsdbInfo("vgId:%d, FSET %d is restored", REPO_ID(pRepo), fset.fid);
taosArrayPush(pfs->cstatus->df, &fset);
}
@@ -965,18 +965,18 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
static int tsdbRestoreCurrent(STsdb *pRepo) {
// // Loop to recover mfile
// if (tsdbRestoreMeta(pRepo) < 0) {
- // tsdbError("vgId:%d failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno));
+ // tsdbError("vgId:%d, failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno));
// return -1;
// }
// Loop to recover dfile set
if (tsdbRestoreDFileSet(pRepo) < 0) {
- tsdbError("vgId:%d failed to restore DFileSet since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to restore DFileSet since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
if (tsdbSaveFSStatus(pRepo, pRepo->fs->cstatus) < 0) {
- tsdbError("vgId:%d failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
@@ -1024,11 +1024,11 @@ static void tsdbScanAndTryFixDFilesHeader(STsdb *pRepo, int32_t *nExpired) {
if (fset.fid < pRepo->rtn.minFid) {
++*nExpired;
}
- tsdbDebug("vgId:%d scan DFileSet %d header", REPO_ID(pRepo), fset.fid);
+ tsdbDebug("vgId:%d, scan DFileSet %d header", REPO_ID(pRepo), fset.fid);
// if (tsdbOpenDFileSet(&fset, O_RDWR) < 0) {
if (tsdbOpenDFileSet(&fset, TD_FILE_WRITE | TD_FILE_READ) < 0) {
- tsdbError("vgId:%d failed to open DFileSet %d since %s, continue", REPO_ID(pRepo), fset.fid, tstrerror(terrno));
+ tsdbError("vgId:%d, failed to open DFileSet %d since %s, continue", REPO_ID(pRepo), fset.fid, tstrerror(terrno));
continue;
}
@@ -1038,14 +1038,14 @@ static void tsdbScanAndTryFixDFilesHeader(STsdb *pRepo, int32_t *nExpired) {
if ((tsdbLoadDFileHeader(pDFile, &info) < 0) || pDFile->info.size != info.size ||
pDFile->info.magic != info.magic) {
if (tsdbUpdateDFileHeader(pDFile) < 0) {
- tsdbError("vgId:%d failed to update DFile header of %s since %s, continue", REPO_ID(pRepo),
+ tsdbError("vgId:%d, failed to update DFile header of %s since %s, continue", REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno));
} else {
- tsdbInfo("vgId:%d DFile header of %s is updated", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
+ tsdbInfo("vgId:%d, DFile header of %s is updated", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
TSDB_FILE_FSYNC(pDFile);
}
} else {
- tsdbDebug("vgId:%d DFile header of %s is correct", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
+ tsdbDebug("vgId:%d, DFile header of %s is correct", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
}
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c
index 04be2a48deb36ea343a55fa66a74a03ad79acce9..4198a94655835dcf6a33dfbf399767add97b6365 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFile.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFile.c
@@ -181,7 +181,7 @@ static int tsdbScanAndTryFixDFile(STsdb *pRepo, SDFile *pDFile) {
tsdbInitDFileEx(&df, pDFile);
if (!taosCheckExistFile(TSDB_FILE_FULL_NAME(pDFile))) {
- tsdbError("vgId:%d data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo),
+ tsdbError("vgId:%d, data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pDFile));
// pRepo->state |= TSDB_STATE_BAD_DATA;
TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD);
@@ -211,17 +211,17 @@ static int tsdbScanAndTryFixDFile(STsdb *pRepo, SDFile *pDFile) {
}
tsdbCloseDFile(&df);
- tsdbInfo("vgId:%d file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
+ tsdbInfo("vgId:%d, file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
file_size, pDFile->info.size);
} else if (pDFile->info.size > file_size) {
- tsdbError("vgId:%d data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it",
+ tsdbError("vgId:%d, data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it",
REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), file_size, pDFile->info.size);
// pRepo->state |= TSDB_STATE_BAD_DATA;
TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return 0;
} else {
- tsdbDebug("vgId:%d file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
+ tsdbDebug("vgId:%d, file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile));
}
return 0;
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index d8426db12719f4bc27915c07b6ec9e5235b5e47c..350e7235413cbca9f0dd0bdb1df0a938b389a5f8 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -20,7 +20,8 @@ static void tsdbFreeTbData(STbData *pTbData);
static char *tsdbGetTsTupleKey(const void *data);
static int tsdbTbDataComp(const void *arg1, const void *arg2);
static char *tsdbTbDataGetUid(const void *arg);
-static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, bool merge);
+static int tsdbAppendTableRowToCols(STsdb *pTsdb, STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row,
+ bool merge);
int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable) {
STsdbMemTable *pMemTable;
@@ -59,7 +60,7 @@ int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable) {
return 0;
}
-void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable) {
+void tsdbMemTableDestroy(STsdbMemTable *pMemTable) {
if (pMemTable) {
taosHashCleanup(pMemTable->pHashIdx);
SSkipListIterator *pIter = tSkipListCreateIter(pMemTable->pSlIdx);
@@ -88,8 +89,8 @@ void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable) {
*
* The function tries to procceed AS MUCH AS POSSIBLE.
*/
-int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols,
- TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) {
+int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead,
+ SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) {
ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0);
if (pIter == NULL) return 0;
STSchema *pSchema = NULL;
@@ -141,69 +142,6 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
} else {
fKey = tdGetKey(filterKeys[filterIter]);
}
-#if 0
- } else if (fKey > rowKey) {
- if (isRowDel) {
- pMergeInfo->rowsDeleteFailed++;
- } else {
- if (pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed >= maxRowsToRead) break;
- if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break;
-
- pMergeInfo->rowsInserted++;
- pMergeInfo->nOperations++;
- pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, rowKey);
- pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, rowKey);
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row);
- }
-
- tSkipListIterNext(pIter);
- row = tsdbNextIterRow(pIter);
- if (row == NULL || TD_ROW_KEY(row) > maxKey) {
- rowKey = INT64_MAX;
- isRowDel = false;
- } else {
- rowKey = TD_ROW_KEY(row);
- isRowDel = TD_ROW_IS_DELETED(row);
- }
- } else {
- if (isRowDel) {
- ASSERT(!keepDup);
- if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break;
- pMergeInfo->rowsDeleteSucceed++;
- pMergeInfo->nOperations++;
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row);
- } else {
- if (keepDup) {
- if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break;
- pMergeInfo->rowsUpdated++;
- pMergeInfo->nOperations++;
- pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, rowKey);
- pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, rowKey);
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row);
- } else {
- pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, fKey);
- pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, fKey);
- }
- }
-
- tSkipListIterNext(pIter);
- row = tsdbNextIterRow(pIter);
- if (row == NULL || TD_ROW_KEY(row) > maxKey) {
- rowKey = INT64_MAX;
- isRowDel = false;
- } else {
- rowKey = TD_ROW_KEY(row);
- isRowDel = TD_ROW_IS_DELETED(row);
- }
-
- filterIter++;
- if (filterIter >= nFilterKeys) {
- fKey = INT64_MAX;
- } else {
- fKey = tdGetKey(filterKeys[filterIter]);
- }
- }
-#endif
#if 1
} else if (fKey > rowKey) {
if (isRowDel) {
@@ -222,12 +160,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
if (lastKey != TSKEY_INITIAL_VAL) {
++pCols->numOfRows;
}
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false);
+ tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false);
}
lastKey = rowKey;
} else {
if (keepDup) {
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, true);
+ tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, true);
} else {
// discard
}
@@ -249,7 +187,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break;
pMergeInfo->rowsDeleteSucceed++;
pMergeInfo->nOperations++;
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false);
+ tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false);
} else {
if (keepDup) {
if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break;
@@ -262,11 +200,11 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
if (lastKey != TSKEY_INITIAL_VAL) {
++pCols->numOfRows;
}
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false);
+ tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false);
}
lastKey = rowKey;
} else {
- tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, true);
+ tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, true);
}
} else {
pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, fKey);
@@ -320,13 +258,13 @@ int tsdbInsertTableData(STsdb *pTsdb, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlo
terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
return -1;
}
- strcat(pRsp->tblFName, mr.me.name);
-
+ if (pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name);
+
if (mr.me.type == TSDB_NORMAL_TABLE) {
- sverNew = mr.me.ntbEntry.schema.sver;
+ sverNew = mr.me.ntbEntry.schemaRow.version;
} else {
metaGetTableEntryByUid(&mr, mr.me.ctbEntry.suid);
- sverNew = mr.me.stbEntry.schema.sver;
+ sverNew = mr.me.stbEntry.schemaRow.version;
}
metaReaderClear(&mr);
@@ -431,10 +369,12 @@ static char *tsdbTbDataGetUid(const void *arg) {
STbData *pTbData = (STbData *)arg;
return (char *)(&(pTbData->uid));
}
-static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, bool merge) {
+
+static int tsdbAppendTableRowToCols(STsdb *pTsdb, STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row,
+ bool merge) {
if (pCols) {
if (*ppSchema == NULL || schemaVersion(*ppSchema) != TD_ROW_SVER(row)) {
- *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, TD_ROW_SVER(row));
+ *ppSchema = tsdbGetTableSchemaImpl(pTsdb, pTable, false, false, TD_ROW_SVER(row));
if (*ppSchema == NULL) {
ASSERT(false);
return -1;
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c
index 025b2ab580163cf3e9b9031b24f1b07881d3ec61..af0fde6d62582456dc163ecdc047ef940c5e26a4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c
@@ -15,52 +15,308 @@
#include "tsdb.h"
-typedef struct SMemTable SMemTable;
-typedef struct SMemData SMemData;
-typedef struct SMemSkipList SMemSkipList;
-typedef struct SMemSkipListNode SMemSkipListNode;
-typedef struct SMemSkipListCurosr SMemSkipListCurosr;
-
-#define SL_MAX_LEVEL 5
-
-struct SMemTable {
- STsdb *pTsdb;
- TSKEY minKey;
- TSKEY maxKey;
- int64_t minVer;
- int64_t maxVer;
- int64_t nRows;
- int32_t nHash;
- int32_t nBucket;
- SMemData **pBuckets;
- SMemSkipListCurosr *pSlc;
-};
+typedef struct SMemData SMemData;
+typedef struct SMemSkipList SMemSkipList;
+typedef struct SMemSkipListNode SMemSkipListNode;
struct SMemSkipListNode {
int8_t level;
- SMemSkipListNode *forwards[1]; // Windows does not allow 0
+ SMemSkipListNode *forwards[0];
};
struct SMemSkipList {
- uint32_t seed;
- int8_t maxLevel;
- int8_t level;
- int32_t size;
- SMemSkipListNode pHead[1]; // Windows does not allow 0
+ uint32_t seed;
+ int32_t size;
+ int8_t maxLevel;
+ int8_t level;
+ SMemSkipListNode *pHead;
+ SMemSkipListNode *pTail;
};
struct SMemData {
- SMemData *pHashNext;
tb_uid_t suid;
tb_uid_t uid;
- TSKEY minKey;
- TSKEY maxKey;
- int64_t minVer;
- int64_t maxVer;
- int64_t nRows;
+ TSDBKEY minKey;
+ TSDBKEY maxKey;
+ SDelOp *delOpHead;
+ SDelOp *delOpTail;
SMemSkipList sl;
};
+struct SMemTable {
+ STsdb *pTsdb;
+ int32_t nRef;
+ TSDBKEY minKey;
+ TSDBKEY maxKey;
+ int64_t nRows;
+ SArray *pArray; // SArray
+};
+
+#define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2)
+#define SL_NODE_HALF_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l))
+#define SL_NODE_FORWARD(n, l) ((n)->forwards[l])
+#define SL_NODE_BACKWARD(n, l) ((n)->forwards[(n)->level + (l)])
+#define SL_NODE_DATA(n) (&SL_NODE_BACKWARD(n, (n)->level))
+
+#define SL_HEAD_FORWARD(sl, l) SL_NODE_FORWARD((sl)->pHead, l)
+#define SL_TAIL_BACKWARD(sl, l) SL_NODE_FORWARD((sl)->pTail, l)
+
+static int32_t tsdbGetOrCreateMemData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, SMemData **ppMemData);
+static int memDataPCmprFn(const void *p1, const void *p2);
+static int32_t tPutTSDBRow(uint8_t *p, TSDBROW *pRow);
+static int32_t tGetTSDBRow(uint8_t *p, TSDBROW *pRow);
+static int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl);
+
+// SMemTable ==============================================
+int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTable) {
+ int32_t code = 0;
+ SMemTable *pMemTable = NULL;
+
+ pMemTable = (SMemTable *)taosMemoryCalloc(1, sizeof(*pMemTable));
+ if (pMemTable == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pMemTable->pTsdb = pTsdb;
+ pMemTable->nRef = 1;
+ pMemTable->minKey = (TSDBKEY){.version = INT64_MAX, .ts = TSKEY_MAX};
+ pMemTable->maxKey = (TSDBKEY){.version = -1, .ts = TSKEY_MIN};
+ pMemTable->nRows = 0;
+ pMemTable->pArray = taosArrayInit(512, sizeof(SMemData *));
+ if (pMemTable->pArray == NULL) {
+ taosMemoryFree(pMemTable);
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ *ppMemTable = pMemTable;
+ return code;
+
+_err:
+ *ppMemTable = NULL;
+ return code;
+}
+
+void tsdbMemTableDestroy2(SMemTable *pMemTable) {
+ taosArrayDestroyEx(pMemTable->pArray, NULL /*TODO*/);
+ taosMemoryFree(pMemTable);
+}
+
+int32_t tsdbInsertTableData2(STsdb *pTsdb, int64_t version, SVSubmitBlk *pSubmitBlk) {
+ int32_t code = 0;
+ SMemTable *pMemTable = (SMemTable *)pTsdb->mem; // TODO
+ SMemData *pMemData;
+ TSDBROW row = {.version = version};
+
+ ASSERT(pMemTable);
+
+ {
+ // check if table exists (todo)
+ }
+
+ code = tsdbGetOrCreateMemData(pMemTable, pSubmitBlk->suid, pSubmitBlk->uid, &pMemData);
+ if (code) {
+ tsdbError("vgId:%d, failed to create/get table data since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ goto _err;
+ }
+
+ // do insert
+ int32_t nt;
+ uint8_t *pt;
+ int32_t n = 0;
+ uint8_t *p = pSubmitBlk->pData;
+ SVBufPool *pPool = pTsdb->pVnode->inUse;
+ int8_t level;
+ SMemSkipListNode *pNode;
+ while (n < pSubmitBlk->nData) {
+ nt = tGetTSRow(p + n, &row.tsRow);
+ n += nt;
+
+ ASSERT(n <= pSubmitBlk->nData);
+
+ // build the node
+ level = tsdbMemSkipListRandLevel(&pMemData->sl);
+ pNode = (SMemSkipListNode *)vnodeBufPoolMalloc(pPool, SL_NODE_SIZE(level) + nt + sizeof(version));
+ if (pNode == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pNode->level = level;
+ tPutTSDBRow((uint8_t *)SL_NODE_DATA(pNode), &row);
+
+ // put the node (todo)
+
+ // set info
+ if (tsdbKeyCmprFn(&row, &pMemData->minKey) < 0) pMemData->minKey = *(TSDBKEY *)&row;
+ if (tsdbKeyCmprFn(&row, &pMemData->maxKey) > 0) pMemData->maxKey = *(TSDBKEY *)&row;
+ }
+
+ if (tsdbKeyCmprFn(&pMemTable->minKey, &pMemData->minKey) < 0) pMemTable->minKey = pMemData->minKey;
+ if (tsdbKeyCmprFn(&pMemTable->maxKey, &pMemData->maxKey) > 0) pMemTable->maxKey = pMemData->maxKey;
+
+ return code;
+
+_err:
+ return code;
+}
+
+int32_t tsdbDeleteTableData2(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) {
+ int32_t code = 0;
+ SMemTable *pMemTable = (SMemTable *)pTsdb->mem; // TODO
+ SMemData *pMemData;
+ SVBufPool *pPool = pTsdb->pVnode->inUse;
+
+ ASSERT(pMemTable);
+
+ {
+ // check if table exists (todo)
+ }
+
+ code = tsdbGetOrCreateMemData(pMemTable, suid, uid, &pMemData);
+ if (code) {
+ goto _err;
+ }
+
+ // do delete
+ SDelOp *pDelOp = (SDelOp *)vnodeBufPoolMalloc(pPool, sizeof(*pDelOp));
+ if (pDelOp == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pDelOp->version = version;
+ pDelOp->sKey = sKey;
+ pDelOp->eKey = eKey;
+ pDelOp->pNext = NULL;
+ if (pMemData->delOpHead == NULL) {
+ ASSERT(pMemData->delOpTail == NULL);
+ pMemData->delOpHead = pMemData->delOpTail = pDelOp;
+ } else {
+ pMemData->delOpTail->pNext = pDelOp;
+ pMemData->delOpTail = pDelOp;
+ }
+
+ {
+ // update the state of pMemTable, pMemData, last and lastrow (todo)
+ }
+
+ tsdbDebug("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " sKey:%" PRId64 " eKey:%" PRId64
+ " since %s",
+ TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
+ return code;
+
+_err:
+ tsdbError("vgId:%d, failed to delete data from table suid:%" PRId64 " uid:%" PRId64 " sKey:%" PRId64 " eKey:%" PRId64
+ " since %s",
+ TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbGetOrCreateMemData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, SMemData **ppMemData) {
+ int32_t code = 0;
+ int32_t idx = 0;
+ SMemData *pMemDataT = &(SMemData){.suid = suid, .uid = uid};
+ SMemData *pMemData = NULL;
+ SVBufPool *pPool = pMemTable->pTsdb->pVnode->inUse;
+ int8_t maxLevel = pMemTable->pTsdb->pVnode->config.tsdbCfg.slLevel;
+
+ // get
+ idx = taosArraySearchIdx(pMemTable->pArray, &pMemDataT, memDataPCmprFn, TD_GE);
+ if (idx >= 0) {
+ pMemData = (SMemData *)taosArrayGet(pMemTable->pArray, idx);
+ if (memDataPCmprFn(&pMemDataT, &pMemData) == 0) goto _exit;
+ }
+
+ // create
+ pMemData = vnodeBufPoolMalloc(pPool, sizeof(*pMemData) + SL_NODE_HALF_SIZE(maxLevel) * 2);
+ if (pMemData == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pMemData->suid = suid;
+ pMemData->uid = uid;
+ pMemData->minKey = (TSDBKEY){.version = INT64_MAX, .ts = TSKEY_MAX};
+ pMemData->maxKey = (TSDBKEY){.version = -1, .ts = TSKEY_MIN};
+ pMemData->delOpHead = pMemData->delOpTail = NULL;
+ pMemData->sl.seed = taosRand();
+ pMemData->sl.size = 0;
+ pMemData->sl.maxLevel = maxLevel;
+ pMemData->sl.level = 0;
+ pMemData->sl.pHead = (SMemSkipListNode *)&pMemData[1];
+ pMemData->sl.pTail = (SMemSkipListNode *)POINTER_SHIFT(pMemData->sl.pHead, SL_NODE_HALF_SIZE(maxLevel));
+
+ for (int8_t iLevel = 0; iLevel < pMemData->sl.maxLevel; iLevel++) {
+ SL_HEAD_FORWARD(&pMemData->sl, iLevel) = pMemData->sl.pTail;
+ SL_TAIL_BACKWARD(&pMemData->sl, iLevel) = pMemData->sl.pHead;
+ }
+
+ if (idx < 0) idx = 0;
+ if (taosArrayInsert(pMemTable->pArray, idx, &pMemData) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+_exit:
+ *ppMemData = pMemData;
+ return code;
+
+_err:
+ *ppMemData = NULL;
+ return code;
+}
+
+static int memDataPCmprFn(const void *p1, const void *p2) {
+ SMemData *pMemData1 = *(SMemData **)p1;
+ SMemData *pMemData2 = *(SMemData **)p2;
+
+ if (pMemData1->suid < pMemData2->suid) {
+ return -1;
+ } else if (pMemData1->suid > pMemData2->suid) {
+ return 1;
+ }
+
+ if (pMemData1->uid < pMemData2->uid) {
+ return -1;
+ } else if (pMemData1->uid > pMemData2->uid) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int32_t tPutTSDBRow(uint8_t *p, TSDBROW *pRow) {
+ int32_t n = 0;
+
+ n += tPutI64(p ? p + n : p, pRow->version);
+ n += tPutTSRow(p ? p + n : p, &pRow->tsRow);
+
+ return n;
+}
+
+static int32_t tGetTSDBRow(uint8_t *p, TSDBROW *pRow) {
+ int32_t n = 0;
+
+ n += tGetI64(p + n, &pRow->version);
+ n += tGetTSRow(p + n, &pRow->tsRow);
+
+ return n;
+}
+
+static FORCE_INLINE int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl) {
+ int8_t level = 1;
+ int8_t tlevel = TMIN(pSl->maxLevel, pSl->level + 1);
+ const uint32_t factor = 4;
+
+ while ((taosRandR(&pSl->seed) % factor) == 0 && level < tlevel) {
+ level++;
+ }
+
+ return level;
+}
+
+#if 0 //====================================================================================
+
+#define SL_MAX_LEVEL 5
+
struct SMemSkipListCurosr {
SMemSkipList *pSl;
SMemSkipListNode *pNodes[SL_MAX_LEVEL];
@@ -74,12 +330,6 @@ typedef struct {
#define HASH_BUCKET(SUID, UID, NBUCKET) (TABS((SUID) + (UID)) % (NBUCKET))
-#define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2)
-#define SL_NODE_HALF_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l))
-#define SL_NODE_FORWARD(n, l) ((n)->forwards[l])
-#define SL_NODE_BACKWARD(n, l) ((n)->forwards[(n)->level + (l)])
-#define SL_NODE_DATA(n) (&SL_NODE_BACKWARD(n, (n)->level))
-
#define SL_HEAD_NODE(sl) ((sl)->pHead)
#define SL_TAIL_NODE(sl) ((SMemSkipListNode *)&SL_NODE_FORWARD(SL_HEAD_NODE(sl), (sl)->maxLevel))
#define SL_HEAD_NODE_FORWARD(n, l) SL_NODE_FORWARD(n, l)
@@ -99,50 +349,7 @@ static int32_t tsdbMemSkipListCursorMoveToNext(SMemSkipListCurosr *pSlc);
static int32_t tsdbMemSkipListCursorMoveToPrev(SMemSkipListCurosr *pSlc);
static SMemSkipListNode *tsdbMemSkipListNodeCreate(SVBufPool *pPool, SMemSkipList *pSl, const STsdbRow *pTRow);
-// SMemTable
-int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTb) {
- SMemTable *pMemTb = NULL;
-
- pMemTb = taosMemoryCalloc(1, sizeof(*pMemTb));
- if (pMemTb == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- pMemTb->pTsdb = pTsdb;
- pMemTb->minKey = TSKEY_MAX;
- pMemTb->maxKey = TSKEY_MIN;
- pMemTb->minVer = -1;
- pMemTb->maxVer = -1;
- pMemTb->nRows = 0;
- pMemTb->nHash = 0;
- pMemTb->nBucket = 1024;
- pMemTb->pBuckets = taosMemoryCalloc(pMemTb->nBucket, sizeof(*pMemTb->pBuckets));
- if (pMemTb->pBuckets == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFree(pMemTb);
- return -1;
- }
- if (tsdbMemSkipListCursorCreate(pTsdb->pVnode->config.tsdbCfg.slLevel, &pMemTb->pSlc) < 0) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFree(pMemTb->pBuckets);
- taosMemoryFree(pMemTb);
- }
-
- *ppMemTb = pMemTb;
- return 0;
-}
-
-int32_t tsdbMemTableDestroy2(STsdb *pTsdb, SMemTable *pMemTb) {
- if (pMemTb) {
- // loop to destroy the contents (todo)
- tsdbMemSkipListCursorDestroy(pMemTb->pSlc);
- taosMemoryFree(pMemTb->pBuckets);
- taosMemoryFree(pMemTb);
- }
- return 0;
-}
-
+// SMemTable ========================
int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *pSubmitBlk) {
SMemData *pMemData;
STsdb *pTsdb = pMemTb->pTsdb;
@@ -253,18 +460,6 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p
return 0;
}
-static FORCE_INLINE int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl) {
- int8_t level = 1;
- int8_t tlevel = TMIN(pSl->maxLevel, pSl->level + 1);
- const uint32_t factor = 4;
-
- while ((taosRandR(&pSl->seed) % factor) == 0 && level < tlevel) {
- level++;
- }
-
- return level;
-}
-
static FORCE_INLINE int32_t tsdbEncodeRow(SEncoder *pEncoder, const STsdbRow *pRow) {
if (tEncodeI64(pEncoder, pRow->version) < 0) return -1;
if (tEncodeBinary(pEncoder, (const uint8_t *)pRow->pRow, pRow->szRow) < 0) return -1;
@@ -377,4 +572,5 @@ static SMemSkipListNode *tsdbMemSkipListNodeCreate(SVBufPool *pPool, SMemSkipLis
}
return pNode;
-}
\ No newline at end of file
+}
+#endif
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c
index fa54c811ffc158339fda4b34cad47ba7c4f2fdac..943263e1a3c65ebf980b353e6a1b69ba52868a22 100644
--- a/source/dnode/vnode/src/tsdb/tsdbOpen.c
+++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c
@@ -74,7 +74,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee
goto _err;
}
- tsdbDebug("vgId:%d tsdb is opened for %s, days:%d, keep:%d,%d,%d", TD_VID(pVnode), pTsdb->path, pTsdb->keepCfg.days,
+ tsdbDebug("vgId:%d, tsdb is opened for %s, days:%d, keep:%d,%d,%d", TD_VID(pVnode), pTsdb->path, pTsdb->keepCfg.days,
pTsdb->keepCfg.keep0, pTsdb->keepCfg.keep1, pTsdb->keepCfg.keep2);
*ppTsdb = pTsdb;
@@ -99,7 +99,7 @@ int tsdbClose(STsdb **pTsdb) {
int tsdbLockRepo(STsdb *pTsdb) {
int code = taosThreadMutexLock(&pTsdb->mutex);
if (code != 0) {
- tsdbError("vgId:%d failed to lock tsdb since %s", REPO_ID(pTsdb), strerror(errno));
+ tsdbError("vgId:%d, failed to lock tsdb since %s", REPO_ID(pTsdb), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
@@ -112,7 +112,7 @@ int tsdbUnlockRepo(STsdb *pTsdb) {
pTsdb->repoLocked = false;
int code = taosThreadMutexUnlock(&pTsdb->mutex);
if (code != 0) {
- tsdbError("vgId:%d failed to unlock tsdb since %s", REPO_ID(pTsdb), strerror(errno));
+ tsdbError("vgId:%d, failed to unlock tsdb since %s", REPO_ID(pTsdb), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index ee216cb2ab61ecddd538aae2a05ca27f4b98aa96..5f2ea80078fa676ab29752ec33dd07883e3e7802 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -14,6 +14,7 @@
*/
#include "tsdb.h"
+#include "vnode.h"
#define EXTRA_BYTES 2
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
@@ -140,16 +141,8 @@ typedef struct STsdbReadHandle {
STSchema* pSchema;
} STsdbReadHandle;
-typedef struct STableGroupSupporter {
- int32_t numOfCols;
- SColIndex* pCols;
- SSchema* pTagSchema;
-} STableGroupSupporter;
-
-int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo);
-
-static STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList);
-static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList);
+static STimeWindow updateLastrowForEachGroup(STableListInfo* pList);
+static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList);
static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle);
// static int32_t tsdbGetCachedLastRow(STable* pTable, STSRow** pRes, TSKEY* lastKey);
@@ -213,12 +206,6 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) {
return rows;
}
- // STableData* pMem = NULL;
- // STableData* pIMem = NULL;
-
- // SMemTable* pMemT = pMemRef->snapshot.mem;
- // SMemTable* pIMemT = pMemRef->snapshot.imem;
-
size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
for (int32_t i = 0; i < size; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i);
@@ -235,41 +222,34 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) {
return rows;
}
-static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* pGroupList) {
- size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
- assert(numOfGroup >= 1);
+static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pTableList) {
+ size_t tableSize = taosArrayGetSize(pTableList->pTableList);
+ assert(tableSize >= 1);
// allocate buffer in order to load data blocks from file
- SArray* pTableCheckInfo = taosArrayInit(pGroupList->numOfTables, sizeof(STableCheckInfo));
+ SArray* pTableCheckInfo = taosArrayInit(tableSize, sizeof(STableCheckInfo));
if (pTableCheckInfo == NULL) {
return NULL;
}
// todo apply the lastkey of table check to avoid to load header file
- for (int32_t i = 0; i < numOfGroup; ++i) {
- SArray* group = *(SArray**)taosArrayGet(pGroupList->pGroupList, i);
+ for (int32_t j = 0; j < tableSize; ++j) {
+ STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pTableList->pTableList, j);
- size_t gsize = taosArrayGetSize(group);
- assert(gsize > 0);
-
- for (int32_t j = 0; j < gsize; ++j) {
- STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(group, j);
-
- STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid};
- if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
- if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) {
- info.lastKey = pTsdbReadHandle->window.skey;
- }
-
- assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey);
- } else {
+ STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid};
+ if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
+ if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) {
info.lastKey = pTsdbReadHandle->window.skey;
}
- taosArrayPush(pTableCheckInfo, &info);
- tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId,
- info.lastKey, pTsdbReadHandle->idStr);
+ assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey);
+ } else {
+ info.lastKey = pTsdbReadHandle->window.skey;
}
+
+ taosArrayPush(pTableCheckInfo, &info);
+ tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey,
+ pTsdbReadHandle->idStr);
}
// TODO group table according to the tag value.
@@ -326,34 +306,34 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) {
return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick
}
-static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) {
- pTsdbReadHandle->window = pCond->twindow;
+static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) {
+ pTsdbReadHandle->window = pCond->twindows[tWinIdx];
bool updateTs = false;
int64_t startTs = getEarliestValidTimestamp(pTsdbReadHandle->pTsdb);
if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
if (startTs > pTsdbReadHandle->window.skey) {
pTsdbReadHandle->window.skey = startTs;
- pCond->twindow.skey = startTs;
+ pCond->twindows[tWinIdx].skey = startTs;
updateTs = true;
}
} else {
if (startTs > pTsdbReadHandle->window.ekey) {
pTsdbReadHandle->window.ekey = startTs;
- pCond->twindow.ekey = startTs;
+ pCond->twindows[tWinIdx].ekey = startTs;
updateTs = true;
}
}
if (updateTs) {
tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 ", %s",
- pTsdbReadHandle, pCond->twindow.skey, pCond->twindow.ekey, pTsdbReadHandle->window.skey,
- pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr);
+ pTsdbReadHandle, pCond->twindows[tWinIdx].skey, pCond->twindows[tWinIdx].ekey,
+ pTsdbReadHandle->window.skey, pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr);
}
}
static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, TSKEY winSKey, SRetention* retentions) {
- if (vnodeIsRollup(pVnode)) {
+ if (VND_IS_RSMA(pVnode)) {
int level = 0;
int64_t now = taosGetTimestamp(pVnode->config.tsdbCfg.precision);
@@ -372,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle,
}
if (level == TSDB_RETENTION_L0) {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0);
+ tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L0);
return VND_RSMA0(pVnode);
} else if (level == TSDB_RETENTION_L1) {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1);
+ tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L1);
return VND_RSMA1(pVnode);
} else {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2);
+ tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L2);
return VND_RSMA2(pVnode);
}
}
@@ -391,7 +374,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
goto _end;
}
- STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindow.skey, pVnode->config.tsdbCfg.retentions);
+ STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindows[0].skey, pVnode->config.tsdbCfg.retentions);
pReadHandle->order = pCond->order;
pReadHandle->pTsdb = pTsdb;
@@ -417,11 +400,11 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
}
assert(pCond != NULL);
- setQueryTimewindow(pReadHandle, pCond);
+ setQueryTimewindow(pReadHandle, pCond, 0);
if (pCond->numOfCols > 0) {
int32_t rowLen = 0;
- for(int32_t i = 0; i < pCond->numOfCols; ++i) {
+ for (int32_t i = 0; i < pCond->numOfCols; ++i) {
rowLen += pCond->colList[i].bytes;
}
@@ -456,10 +439,10 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
}
pReadHandle->suppInfo.defaultLoadColumn = getDefaultLoadColumns(pReadHandle, true);
- pReadHandle->suppInfo.slotIds =
- taosMemoryMalloc(sizeof(int32_t) * taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn));
- pReadHandle->suppInfo.plist =
- taosMemoryCalloc(taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn), POINTER_BYTES);
+
+ size_t size = taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn);
+ pReadHandle->suppInfo.slotIds = taosMemoryCalloc(size, sizeof(int32_t));
+ pReadHandle->suppInfo.plist = taosMemoryCalloc(size, POINTER_BYTES);
}
pReadHandle->pDataCols = tdNewDataCols(1000, pVnode->config.tsdbCfg.maxRows);
@@ -480,7 +463,40 @@ _end:
return NULL;
}
-tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
+static int32_t setCurrentSchema(SVnode* pVnode, STsdbReadHandle* pTsdbReadHandle) {
+ STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0);
+
+ int32_t sversion = 1;
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, pVnode->pMeta, 0);
+ int32_t code = metaGetTableEntryByUid(&mr, pCheckInfo->tableId);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
+ metaReaderClear(&mr);
+ return terrno;
+ }
+
+ if (mr.me.type == TSDB_CHILD_TABLE) {
+ tb_uid_t suid = mr.me.ctbEntry.suid;
+ code = metaGetTableEntryByUid(&mr, suid);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
+ metaReaderClear(&mr);
+ return terrno;
+ }
+ sversion = mr.me.stbEntry.schemaRow.version;
+ } else {
+ ASSERT(mr.me.type == TSDB_NORMAL_TABLE);
+ sversion = mr.me.ntbEntry.schemaRow.version;
+ }
+
+ metaReaderClear(&mr);
+ pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, sversion);
+ return TSDB_CODE_SUCCESS;
+}
+
+tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId,
uint64_t taskId) {
STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId);
if (pTsdbReadHandle == NULL) {
@@ -492,16 +508,19 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG
}
// todo apply the lastkey of table check to avoid to load header file
- pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, groupList);
+ pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, tableList);
if (pTsdbReadHandle->pTableCheckInfo == NULL) {
// tsdbCleanupReadHandle(pTsdbReadHandle);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
- STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0);
+ int32_t code = setCurrentSchema(pVnode, pTsdbReadHandle);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ return NULL;
+ }
- pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1);
int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn);
int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData;
@@ -522,14 +541,14 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG
}
}
- tsdbDebug("%p total numOfTable:%" PRIzu " in this query, group %" PRIzu " %s", pTsdbReadHandle,
- taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(groupList->pGroupList),
+ tsdbDebug("%p total numOfTable:%" PRIzu " in this query, table %" PRIzu " %s", pTsdbReadHandle,
+ taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(tableList->pTableList),
pTsdbReadHandle->idStr);
return (tsdbReaderT)pTsdbReadHandle;
}
-void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) {
+void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) {
STsdbReadHandle* pTsdbReadHandle = queryHandle;
if (emptyQueryTimewindow(pTsdbReadHandle)) {
@@ -542,7 +561,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) {
}
pTsdbReadHandle->order = pCond->order;
- pTsdbReadHandle->window = pCond->twindow;
+ setQueryTimewindow(pTsdbReadHandle, pCond, tWinIdx);
pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL;
pTsdbReadHandle->cur.fid = -1;
pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER;
@@ -567,11 +586,12 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) {
resetCheckInfo(pTsdbReadHandle);
}
-void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableGroupInfo* groupList) {
+void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList,
+ int32_t tWinIdx) {
STsdbReadHandle* pTsdbReadHandle = queryHandle;
pTsdbReadHandle->order = pCond->order;
- pTsdbReadHandle->window = pCond->twindow;
+ pTsdbReadHandle->window = pCond->twindows[tWinIdx];
pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL;
pTsdbReadHandle->cur.fid = -1;
pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER;
@@ -609,27 +629,27 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon
// pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next);
}
-tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
+tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId,
uint64_t taskId) {
- pCond->twindow = updateLastrowForEachGroup(groupList);
+ pCond->twindows[0] = updateLastrowForEachGroup(pList);
// no qualified table
- if (groupList->numOfTables == 0) {
+ if (taosArrayGetSize(pList->pTableList) == 0) {
return NULL;
}
- STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, groupList, qId, taskId);
+ STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pList, qId, taskId);
if (pTsdbReadHandle == NULL) {
return NULL;
}
- int32_t code = checkForCachedLastRow(pTsdbReadHandle, groupList);
+ int32_t code = checkForCachedLastRow(pTsdbReadHandle, pList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
return NULL;
}
- assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey);
+ assert(pCond->order == TSDB_ORDER_ASC && pCond->twindows[0].skey <= pCond->twindows[0].ekey);
if (pTsdbReadHandle->cachelastrow) {
pTsdbReadHandle->type = TSDB_QUERY_TYPE_LAST;
}
@@ -669,60 +689,60 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) {
}
// leave only one table for each group
-static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
- assert(pGroupList);
- size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
-
- STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo));
- pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES);
-
- for (int32_t i = 0; i < numOfGroup; ++i) {
- SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
- size_t numOfTables = taosArrayGetSize(oneGroup);
-
- SArray* px = taosArrayInit(4, sizeof(STableKeyInfo));
- for (int32_t j = 0; j < numOfTables; ++j) {
- STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j);
- // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) {
- // taosArrayPush(px, pInfo);
- // pNew->numOfTables += 1;
- // break;
- // }
- }
-
- // there are no data in this group
- if (taosArrayGetSize(px) == 0) {
- taosArrayDestroy(px);
- } else {
- taosArrayPush(pNew->pGroupList, &px);
- }
- }
-
- return pNew;
-}
-
-tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
- uint64_t qId, uint64_t taskId) {
- STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
-
- if (pNew->numOfTables == 0) {
- tsdbDebug("update query time range to invalidate time window");
-
- assert(taosArrayGetSize(pNew->pGroupList) == 0);
- bool asc = ASCENDING_TRAVERSE(pCond->order);
- if (asc) {
- pCond->twindow.ekey = pCond->twindow.skey - 1;
- } else {
- pCond->twindow.skey = pCond->twindow.ekey - 1;
- }
- }
-
- STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId);
- pTsdbReadHandle->loadExternalRow = true;
- pTsdbReadHandle->currentLoadExternalRows = true;
+// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
+// assert(pGroupList);
+// size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
+//
+// STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo));
+// pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES);
+//
+// for (int32_t i = 0; i < numOfGroup; ++i) {
+// SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
+// size_t numOfTables = taosArrayGetSize(oneGroup);
+//
+// SArray* px = taosArrayInit(4, sizeof(STableKeyInfo));
+// for (int32_t j = 0; j < numOfTables; ++j) {
+// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j);
+// // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) {
+// // taosArrayPush(px, pInfo);
+// // pNew->numOfTables += 1;
+// // break;
+// // }
+// }
+//
+// // there are no data in this group
+// if (taosArrayGetSize(px) == 0) {
+// taosArrayDestroy(px);
+// } else {
+// taosArrayPush(pNew->pGroupList, &px);
+// }
+// }
+//
+// return pNew;
+//}
- return pTsdbReadHandle;
-}
+// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
+// uint64_t qId, uint64_t taskId) {
+// STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
+//
+// if (pNew->numOfTables == 0) {
+// tsdbDebug("update query time range to invalidate time window");
+//
+// assert(taosArrayGetSize(pNew->pGroupList) == 0);
+// bool asc = ASCENDING_TRAVERSE(pCond->order);
+// if (asc) {
+// pCond->twindow.ekey = pCond->twindow.skey - 1;
+// } else {
+// pCond->twindow.skey = pCond->twindow.ekey - 1;
+// }
+// }
+//
+// STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId);
+// pTsdbReadHandle->loadExternalRow = true;
+// pTsdbReadHandle->currentLoadExternalRows = true;
+//
+// return pTsdbReadHandle;
+//}
static bool initTableMemIterator(STsdbReadHandle* pHandle, STableCheckInfo* pCheckInfo) {
if (pCheckInfo->initBuf) {
@@ -1308,7 +1328,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
-
bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey));
if (cacheDataInFileBlockHole) {
@@ -1351,7 +1370,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
pTsdbReadHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
- cur->win = binfo.window;
+ cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
@@ -1362,9 +1381,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
cur->lastKey = binfo.window.skey - 1;
cur->pos = -1;
}
- } else { // partially copy to dest buffer
+ } else { // partially copy to dest buffer
// make sure to only load once
- bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan)));
+ bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan)));
if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) {
code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot);
if (code != TSDB_CODE_SUCCESS) {
@@ -1377,7 +1396,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
}
if (pTsdbReadHandle->outputCapacity >= binfo.rows) {
- ASSERT(cur->blockCompleted);
+ ASSERT(cur->blockCompleted || cur->mixBlock);
}
if (cur->rows == binfo.rows) {
@@ -1873,7 +1892,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
- int32_t step = ascScan? 1 : -1;
+ int32_t step = ascScan ? 1 : -1;
int32_t start = cur->pos;
int32_t end = endPos;
@@ -1888,8 +1907,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
// the time window should always be ascending order: skey <= ekey
cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
cur->mixBlock = (numOfRows != pBlockInfo->rows);
- cur->lastKey = tsArray[endPos] + step;
- cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0));
+ cur->lastKey = tsArray[endPos] + step;
+ cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0));
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
int32_t pos = endPos + step;
@@ -1905,7 +1924,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
- int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
+ int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pTsdbReadHandle->cur;
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
@@ -1965,7 +1984,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst &&
tsArray[pBlock->numOfRows - 1] == pBlock->keyLast);
- bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
+ bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t step = ascScan ? 1 : -1;
// for search the endPos, so the order needs to reverse
@@ -1976,8 +1995,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
STimeWindow* pWin = &blockInfo.window;
tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64
- " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows,
- cur->pos, endPos, pTsdbReadHandle->idStr);
+ " rows:%d, start:%d, end:%d, %s",
+ pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos,
+ pTsdbReadHandle->idStr);
// compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0;
@@ -2096,8 +2116,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
// still assign data into current row
- numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
- pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
+ numOfRows +=
+ mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
+ pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -2162,8 +2183,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
* if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT
* copy them all to result buffer, since it may be overlapped with file data block.
*/
- if (node == NULL ||
- ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
+ if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@@ -2184,7 +2204,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) ||
- ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
+ ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
if (!ascScan) {
TSWAP(cur->win.skey, cur->win.ekey);
@@ -2803,7 +2823,13 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
return numOfRows;
}
-static int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
+void* tsdbGetIdx(SMeta* pMeta) {
+ if (pMeta == NULL) {
+ return NULL;
+ }
+ return metaGetIdx(pMeta);
+}
+int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid);
while (1) {
@@ -2820,6 +2846,22 @@ static int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
return TSDB_CODE_SUCCESS;
}
+int32_t tsdbGetCtbIdList(SMeta* pMeta, int64_t suid, SArray* list) {
+ SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, suid);
+
+ while (1) {
+ tb_uid_t id = metaCtbCursorNext(pCur);
+ if (id == 0) {
+ break;
+ }
+
+ taosArrayPush(list, &id);
+ }
+
+ metaCloseCtbCursor(pCur);
+ return TSDB_CODE_SUCCESS;
+}
+
static void destroyHelper(void* param) {
if (param == NULL) {
return;
@@ -3340,8 +3382,8 @@ bool isTsdbCacheLastRow(tsdbReaderT* pReader) {
return ((STsdbReadHandle*)pReader)->cachelastrow > TSDB_CACHED_TYPE_NONE;
}
-int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList) {
- assert(pTsdbReadHandle != NULL && groupList != NULL);
+int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* tableList) {
+ assert(pTsdbReadHandle != NULL && tableList != NULL);
// TSKEY key = TSKEY_INITIAL_VAL;
//
@@ -3388,68 +3430,68 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) {
return code;
}
-STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList) {
+STimeWindow updateLastrowForEachGroup(STableListInfo* pList) {
STimeWindow window = {INT64_MAX, INT64_MIN};
- int32_t totalNumOfTable = 0;
- SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
-
- // NOTE: starts from the buffer in case of descending timestamp order check data blocks
- size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
- for (int32_t j = 0; j < numOfGroups; ++j) {
- SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
- TSKEY key = TSKEY_INITIAL_VAL;
-
- STableKeyInfo keyInfo = {0};
-
- size_t numOfTables = taosArrayGetSize(pGroup);
- for (int32_t i = 0; i < numOfTables; ++i) {
- STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
-
- // if the lastKey equals to INT64_MIN, there is no data in this table
- TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
- if (key < lastKey) {
- key = lastKey;
-
- // keyInfo.pTable = pInfo->pTable;
- keyInfo.lastKey = key;
- pInfo->lastKey = key;
-
- if (key < window.skey) {
- window.skey = key;
- }
-
- if (key > window.ekey) {
- window.ekey = key;
- }
- }
- }
-
- // more than one table in each group, only one table left for each group
- // if (keyInfo.pTable != NULL) {
- // totalNumOfTable++;
- // if (taosArrayGetSize(pGroup) == 1) {
- // // do nothing
- // } else {
- // taosArrayClear(pGroup);
- // taosArrayPush(pGroup, &keyInfo);
- // }
- // } else { // mark all the empty groups, and remove it later
- // taosArrayDestroy(pGroup);
- // taosArrayPush(emptyGroup, &j);
- // }
- }
-
- // window does not being updated, so set the original
- if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
- window = TSWINDOW_INITIALIZER;
- assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
- }
-
- taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
- taosArrayDestroy(emptyGroup);
-
- groupList->numOfTables = totalNumOfTable;
+ // int32_t totalNumOfTable = 0;
+ // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
+ //
+ // // NOTE: starts from the buffer in case of descending timestamp order check data blocks
+ // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
+ // for (int32_t j = 0; j < numOfGroups; ++j) {
+ // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
+ // TSKEY key = TSKEY_INITIAL_VAL;
+ //
+ // STableKeyInfo keyInfo = {0};
+ //
+ // size_t numOfTables = taosArrayGetSize(pGroup);
+ // for (int32_t i = 0; i < numOfTables; ++i) {
+ // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
+ //
+ // // if the lastKey equals to INT64_MIN, there is no data in this table
+ // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
+ // if (key < lastKey) {
+ // key = lastKey;
+ //
+ // // keyInfo.pTable = pInfo->pTable;
+ // keyInfo.lastKey = key;
+ // pInfo->lastKey = key;
+ //
+ // if (key < window.skey) {
+ // window.skey = key;
+ // }
+ //
+ // if (key > window.ekey) {
+ // window.ekey = key;
+ // }
+ // }
+ // }
+ //
+ // // more than one table in each group, only one table left for each group
+ // // if (keyInfo.pTable != NULL) {
+ // // totalNumOfTable++;
+ // // if (taosArrayGetSize(pGroup) == 1) {
+ // // // do nothing
+ // // } else {
+ // // taosArrayClear(pGroup);
+ // // taosArrayPush(pGroup, &keyInfo);
+ // // }
+ // // } else { // mark all the empty groups, and remove it later
+ // // taosArrayDestroy(pGroup);
+ // // taosArrayPush(emptyGroup, &j);
+ // // }
+ // }
+ //
+ // // window does not being updated, so set the original
+ // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
+ // window = TSWINDOW_INITIALIZER;
+ // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
+ // }
+ //
+ // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
+ // taosArrayDestroy(emptyGroup);
+ //
+ // groupList->numOfTables = totalNumOfTable;
return window;
}
@@ -3480,7 +3522,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa
pDataBlockInfo->rows = cur->rows;
pDataBlockInfo->window = cur->win;
- // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle));
}
/*
@@ -3514,7 +3555,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat
return TSDB_CODE_SUCCESS;
}
- tsdbDebug("vgId:%d succeed to load block statis part for uid %" PRIu64, REPO_ID(pHandle->pTsdb),
+ tsdbDebug("vgId:%d, succeed to load block statis part for uid %" PRIu64, REPO_ID(pHandle->pTsdb),
TSDB_READ_TABLE_UID(&pHandle->rhelper));
int16_t* colIds = pHandle->suppInfo.defaultLoadColumn->pData;
@@ -3546,9 +3587,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat
if (IS_BSMA_ON(&(pHandle->pSchema->columns[slotIds[i]]))) {
if (pHandle->suppInfo.pstatis[i].numOfNull == -1) { // set the column data are all NULL
pHandle->suppInfo.pstatis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
- } else {
- pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i];
}
+
+ pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i];
} else {
*allHave = false;
}
@@ -3597,108 +3638,6 @@ SArray* tsdbRetrieveDataBlock(tsdbReaderT* pTsdbReadHandle, SArray* pIdList) {
}
}
}
-#if 0
-void filterPrepare(void* expr, void* param) {
- tExprNode* pExpr = (tExprNode*)expr;
- if (pExpr->_node.info != NULL) {
- return;
- }
-
- pExpr->_node.info = taosMemoryCalloc(1, sizeof(tQueryInfo));
-
- STSchema* pTSSchema = (STSchema*) param;
- tQueryInfo* pInfo = pExpr->_node.info;
- tVariant* pCond = pExpr->_node.pRight->pVal;
- SSchema* pSchema = pExpr->_node.pLeft->pSchema;
-
- pInfo->sch = *pSchema;
- pInfo->optr = pExpr->_node.optr;
- pInfo->compare = getComparFunc(pInfo->sch.type, pInfo->optr);
- pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId;
-
- if (pInfo->optr == TSDB_RELATION_IN) {
- int dummy = -1;
- SHashObj *pObj = NULL;
- if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
- pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false);
- SArray *arr = (SArray *)(pCond->arr);
- for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
- char* p = taosArrayGetP(arr, i);
- strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p));
- taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy));
- }
- } else {
- buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen);
- }
- pInfo->q = (char *)pObj;
- } else if (pCond != NULL) {
- uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE;
- if (size < (uint32_t)pSchema->bytes) {
- size = pSchema->bytes;
- }
- // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(TdUcs4) space.
- pInfo->q = taosMemoryCalloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
- tVariantDump(pCond, pInfo->q, pSchema->type, true);
- }
-}
-
-#endif
-
-static int32_t tableGroupComparFn(const void* p1, const void* p2, const void* param) {
-#if 0
- STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param;
- STable* pTable1 = ((STableKeyInfo*) p1)->uid;
- STable* pTable2 = ((STableKeyInfo*) p2)->uid;
-
- for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) {
- SColIndex* pColIndex = &pTableGroupSupp->pCols[i];
- int32_t colIndex = pColIndex->colIndex;
-
- assert(colIndex >= TSDB_TBNAME_COLUMN_INDEX);
-
- char * f1 = NULL;
- char * f2 = NULL;
- int32_t type = 0;
- int32_t bytes = 0;
-
- if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- f1 = (char*) TABLE_NAME(pTable1);
- f2 = (char*) TABLE_NAME(pTable2);
- type = TSDB_DATA_TYPE_BINARY;
- bytes = tGetTbnameColumnSchema()->bytes;
- } else {
- if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) {
- STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
- bytes = pCol->bytes;
- type = pCol->type;
- f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
- f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
- }
- }
-
- // this tags value may be NULL
- if (f1 == NULL && f2 == NULL) {
- continue;
- }
-
- if (f1 == NULL) {
- return -1;
- }
-
- if (f2 == NULL) {
- return 1;
- }
-
- int32_t ret = doCompare(f1, f2, type, bytes);
- if (ret == 0) {
- continue;
- } else {
- return ret;
- }
- }
-#endif
- return 0;
-}
static int tsdbCheckInfoCompar(const void* key1, const void* key2) {
if (((STableCheckInfo*)key1)->tableId < ((STableCheckInfo*)key2)->tableId) {
@@ -3711,320 +3650,6 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) {
}
}
-void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey,
- STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) {
- STable* pTable = taosArrayGetP(pTableList, 0);
- SArray* g = taosArrayInit(16, sizeof(STableKeyInfo));
-
- STableKeyInfo info = {.lastKey = skey};
- taosArrayPush(g, &info);
-
- for (int32_t i = 1; i < numOfTables; ++i) {
- STable** prev = taosArrayGet(pTableList, i - 1);
- STable** p = taosArrayGet(pTableList, i);
-
- int32_t ret = compareFn(prev, p, pSupp);
- assert(ret == 0 || ret == -1);
-
- if (ret == 0) {
- STableKeyInfo info1 = {.lastKey = skey};
- taosArrayPush(g, &info1);
- } else {
- taosArrayPush(pGroups, &g); // current group is ended, start a new group
- g = taosArrayInit(16, sizeof(STableKeyInfo));
-
- STableKeyInfo info1 = {.lastKey = skey};
- taosArrayPush(g, &info1);
- }
- }
-
- taosArrayPush(pGroups, &g);
-}
-
-SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols,
- TSKEY skey) {
- assert(pTableList != NULL);
- SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES);
-
- size_t size = taosArrayGetSize(pTableList);
- if (size == 0) {
- tsdbDebug("no qualified tables");
- return pTableGroup;
- }
-
- if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table
- SArray* sa = taosArrayDup(pTableList);
- if (sa == NULL) {
- taosArrayDestroy(pTableGroup);
- return NULL;
- }
-
- taosArrayPush(pTableGroup, &sa);
- tsdbDebug("all %" PRIzu " tables belong to one group", size);
- } else {
- STableGroupSupporter sup = {0};
- sup.numOfCols = numOfOrderCols;
- sup.pTagSchema = pTagSchema->pSchema;
- sup.pCols = pCols;
-
- taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn);
- createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn);
- }
-
- return pTableGroup;
-}
-
-// static bool tableFilterFp(const void* pNode, void* param) {
-// tQueryInfo* pInfo = (tQueryInfo*) param;
-//
-// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
-//
-// char* val = NULL;
-// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
-// val = (char*) TABLE_NAME(pTable);
-// } else {
-// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
-// }
-//
-// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
-// if (pInfo->optr == TSDB_RELATION_ISNULL) {
-// return (val == NULL) || isNull(val, pInfo->sch.type);
-// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
-// return (val != NULL) && (!isNull(val, pInfo->sch.type));
-// }
-// } else if (pInfo->optr == TSDB_RELATION_IN) {
-// int type = pInfo->sch.type;
-// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) {
-// int64_t v;
-// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val);
-// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
-// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
-// uint64_t v;
-// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
-// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
-// }
-// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
-// double v;
-// GET_TYPED_DATA(v, double, pInfo->sch.type, val);
-// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
-// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){
-// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val));
-// }
-//
-// }
-//
-// int32_t ret = 0;
-// if (val == NULL) { //the val is possible to be null, so check it out carefully
-// ret = -1; // val is missing in table tags value pairs
-// } else {
-// ret = pInfo->compare(val, pInfo->q);
-// }
-//
-// switch (pInfo->optr) {
-// case TSDB_RELATION_EQUAL: {
-// return ret == 0;
-// }
-// case TSDB_RELATION_NOT_EQUAL: {
-// return ret != 0;
-// }
-// case TSDB_RELATION_GREATER_EQUAL: {
-// return ret >= 0;
-// }
-// case TSDB_RELATION_GREATER: {
-// return ret > 0;
-// }
-// case TSDB_RELATION_LESS_EQUAL: {
-// return ret <= 0;
-// }
-// case TSDB_RELATION_LESS: {
-// return ret < 0;
-// }
-// case TSDB_RELATION_LIKE: {
-// return ret == 0;
-// }
-// case TSDB_RELATION_MATCH: {
-// return ret == 0;
-// }
-// case TSDB_RELATION_NMATCH: {
-// return ret == 0;
-// }
-// case TSDB_RELATION_IN: {
-// return ret == 1;
-// }
-//
-// default:
-// assert(false);
-// }
-//
-// return true;
-//}
-
-// static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp
-// *param);
-
-// static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) {
-// // // query according to the expression tree
-// SExprTraverseSupp supp = {
-// .nodeFilterFn = (__result_filter_fn_t)tableFilterFp,
-// .setupInfoFn = filterPrepare,
-// .pExtInfo = pSTable->tagSchema,
-// };
-//
-// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp);
-// tExprTreeDestroy(pExpr, destroyHelper);
-// return TSDB_CODE_SUCCESS;
-//}
-
-int32_t tsdbQuerySTableByTagCond(void* pMeta, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
- int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo,
- SColIndex* pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId) {
- SMetaReader mr = {0};
-
- metaReaderInit(&mr, (SMeta*)pMeta, 0);
-
- if (metaGetTableEntryByUid(&mr, uid) < 0) {
- tsdbError("%p failed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId);
- metaReaderClear(&mr);
- terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
- goto _error;
- } else {
- tsdbDebug("%p succeed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId);
- }
-
- if (mr.me.type != TSDB_SUPER_TABLE) {
- tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId,
- reqId);
- terrno = TSDB_CODE_OPS_NOT_SUPPORT; // basically, this error is caused by invalid sql issued by client
- metaReaderClear(&mr);
- goto _error;
- }
-
- metaReaderClear(&mr);
-
- // NOTE: not add ref count for super table
- SArray* res = taosArrayInit(8, sizeof(STableKeyInfo));
- SSchemaWrapper* pTagSchema = metaGetTableSchema(pMeta, uid, 1, true);
-
- // no tags and tbname condition, all child tables of this stable are involved
- if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) {
- int32_t ret = getAllTableList(pMeta, uid, res);
- if (ret != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
- pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
-
- tsdbDebug("%p no table name/tag condition, all tables qualified, numOfTables:%u, group:%zu, TID:0x%" PRIx64
- " QID:0x%" PRIx64,
- pMeta, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList), taskId, reqId);
-
- taosArrayDestroy(res);
- return ret;
- }
-
- int32_t ret = TSDB_CODE_SUCCESS;
-
- SFilterInfo* filterInfo = NULL;
- ret = filterInitFromNode((SNode*)pTagCond, &filterInfo, 0);
- if (ret != TSDB_CODE_SUCCESS) {
- terrno = ret;
- return ret;
- }
- ret = tsdbQueryTableList(pMeta, res, filterInfo);
- pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
- pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
-
- // tsdbDebug("%p stable tid:%d, uid:%" PRIu64 " query, numOfTables:%u, belong to %" PRIzu " groups", tsdb,
- // pTable->tableId, pTable->uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
-
- taosArrayDestroy(res);
- return ret;
-
-_error:
- return terrno;
-}
-
-int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo) {
- // impl later
-
- return TSDB_CODE_SUCCESS;
-}
-int32_t tsdbGetOneTableGroup(void* pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) {
- SMetaReader mr = {0};
-
- metaReaderInit(&mr, (SMeta*)pMeta, 0);
-
- if (metaGetTableEntryByUid(&mr, uid) < 0) {
- terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
- goto _error;
- }
-
- metaReaderClear(&mr);
-
- pGroupInfo->numOfTables = 1;
- pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES);
-
- SArray* group = taosArrayInit(1, sizeof(STableKeyInfo));
-
- STableKeyInfo info = {.lastKey = startKey, .uid = uid};
- taosArrayPush(group, &info);
-
- taosArrayPush(pGroupInfo->pGroupList, &group);
- return TSDB_CODE_SUCCESS;
-
-_error:
- metaReaderClear(&mr);
- return terrno;
-}
-
-#if 0
-int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) {
- if (tsdbRLockRepoMeta(tsdb) < 0) {
- return terrno;
- }
-
- assert(pTableIdList != NULL);
- size_t size = taosArrayGetSize(pTableIdList);
- pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES);
- SArray* group = taosArrayInit(1, sizeof(STableKeyInfo));
-
- for(int32_t i = 0; i < size; ++i) {
- STableIdInfo *id = taosArrayGet(pTableIdList, i);
-
- STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), id->uid);
- if (pTable == NULL) {
- tsdbWarn("table uid:%"PRIu64", tid:%d has been drop already", id->uid, id->tid);
- continue;
- }
-
- if (pTable->type == TSDB_SUPER_TABLE) {
- tsdbError("direct query on super tale is not allowed, table uid:%"PRIu64", tid:%d", id->uid, id->tid);
- terrno = TSDB_CODE_QRY_INVALID_MSG;
- tsdbUnlockRepoMeta(tsdb);
- taosArrayDestroy(group);
- return terrno;
- }
-
- STableKeyInfo info = {.pTable = pTable, .lastKey = id->key};
- taosArrayPush(group, &info);
- }
-
- if (tsdbUnlockRepoMeta(tsdb) < 0) {
- taosArrayDestroy(group);
- return terrno;
- }
-
- pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(group);
- if (pGroupInfo->numOfTables > 0) {
- taosArrayPush(pGroupInfo->pGroupList, &group);
- } else {
- taosArrayDestroy(group);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
static void* doFreeColumnInfoData(SArray* pColumnInfoData) {
if (pColumnInfoData == NULL) {
return NULL;
@@ -4093,287 +3718,3 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) {
taosMemoryFreeClear(pTsdbReadHandle);
}
-
-#if 0
-void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
- assert(pGroupList != NULL);
-
- size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
-
- for(int32_t i = 0; i < numOfGroup; ++i) {
- SArray* p = taosArrayGetP(pGroupList->pGroupList, i);
-
- size_t numOfTables = taosArrayGetSize(p);
- for(int32_t j = 0; j < numOfTables; ++j) {
- STable* pTable = taosArrayGetP(p, j);
- if (pTable != NULL) { // in case of handling retrieve data from tsdb
- tsdbUnRefTable(pTable);
- }
- //assert(pTable != NULL);
- }
-
- taosArrayDestroy(p);
- }
-
- taosHashCleanup(pGroupList->map);
- taosArrayDestroy(pGroupList->pGroupList);
- pGroupList->numOfTables = 0;
-}
-
-static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) {
- SSkipListIterator* iter = tSkipListCreateIter(pSkipList);
-
- // Scan each node in the skiplist by using iterator
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
- if (exprTreeApplyFilter(pExpr, pNode, param)) {
- taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode)));
- }
- }
-
- tSkipListDestroyIter(iter);
-}
-
-typedef struct {
- char* v;
- int32_t optr;
-} SEndPoint;
-
-typedef struct {
- SEndPoint* start;
- SEndPoint* end;
-} SQueryCond;
-
-// todo check for malloc failure
-static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) {
- int32_t optr = queryColInfo->optr;
-
- if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL ||
- optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) {
- pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint));
- pCond->start->optr = queryColInfo->optr;
- pCond->start->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
- pCond->end = taosMemoryCalloc(1, sizeof(SEndPoint));
- pCond->end->optr = queryColInfo->optr;
- pCond->end->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_IN) {
- pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint));
- pCond->start->optr = queryColInfo->optr;
- pCond->start->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_LIKE) {
- assert(0);
- } else if (optr == TSDB_RELATION_MATCH) {
- assert(0);
- } else if (optr == TSDB_RELATION_NMATCH) {
- assert(0);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) {
- SSkipListIterator* iter = NULL;
-
- SQueryCond cond = {0};
- if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) {
- //todo handle error
- }
-
- if (cond.start != NULL) {
- iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC);
- } else {
- iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC);
- }
-
- if (cond.start != NULL) {
- int32_t optr = cond.start->optr;
-
- if (optr == TSDB_RELATION_EQUAL) { // equals
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
- int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- if (ret != 0) {
- break;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
- } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal
- bool comp = true;
- int32_t ret = 0;
-
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
- if (comp) {
- ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- assert(ret >= 0);
- }
-
- if (ret == 0 && optr == TSDB_RELATION_GREATER) {
- continue;
- } else {
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- comp = false;
- }
- }
- } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal
- bool comp = true;
-
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
- comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0);
- if (comp) {
- continue;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
-
- tSkipListDestroyIter(iter);
-
- comp = true;
- iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC);
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
- comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0);
- if (comp) {
- continue;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
-
- } else if (optr == TSDB_RELATION_IN) {
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
- int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- if (ret != 0) {
- break;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
-
- } else {
- assert(0);
- }
- } else {
- int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID;
- if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
- bool comp = true;
- int32_t ret = 0;
-
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
-
- if (comp) {
- ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v);
- assert(ret <= 0);
- }
-
- if (ret == 0 && optr == TSDB_RELATION_LESS) {
- continue;
- } else {
- STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- comp = false; // no need to compare anymore
- }
- }
- } else {
- assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL);
-
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
-
- bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type);
- if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) ||
- (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) {
- STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
- }
- }
- }
-
- taosMemoryFree(cond.start);
- taosMemoryFree(cond.end);
- tSkipListDestroyIter(iter);
-}
-
-static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) {
- SSkipListIterator* iter = tSkipListCreateIter(pSkipList);
-
- while (tSkipListIterNext(iter)) {
- bool addToResult = false;
-
- SSkipListNode *pNode = tSkipListIterGet(iter);
-
- char *pData = SL_GET_NODE_DATA(pNode);
- tstr *name = (tstr*) tsdbGetTableName((void*) pData);
-
- // todo speed up by using hash
- if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
- if (pQueryInfo->optr == TSDB_RELATION_IN) {
- addToResult = pQueryInfo->compare(name, pQueryInfo->q);
- } else if (pQueryInfo->optr == TSDB_RELATION_LIKE ||
- pQueryInfo->optr == TSDB_RELATION_MATCH ||
- pQueryInfo->optr == TSDB_RELATION_NMATCH) {
- addToResult = !pQueryInfo->compare(name, pQueryInfo->q);
- }
- } else {
- addToResult = filterFp(pNode, pQueryInfo);
- }
-
- if (addToResult) {
- STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(res, &info);
- }
- }
-
- tSkipListDestroyIter(iter);
-}
-
-// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list
-//void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) {
-// if (pExpr == NULL) {
-// return;
-// }
-//
-// tExprNode *pLeft = pExpr->_node.pLeft;
-// tExprNode *pRight = pExpr->_node.pRight;
-//
-// // column project
-// if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) {
-// assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY));
-//
-// param->setupInfoFn(pExpr, param->pExtInfo);
-//
-// tQueryInfo *pQueryInfo = pExpr->_node.info;
-// if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE
-// && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH
-// && pQueryInfo->optr != TSDB_RELATION_IN)) {
-// queryIndexedColumn(pSkipList, pQueryInfo, result);
-// } else {
-// queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn);
-// }
-//
-// return;
-// }
-//
-// // The value of hasPK is always 0.
-// uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK;
-// assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0);
-//
-// //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes
-// applyFilterToSkipListNode(pSkipList, pExpr, result, param);
-//}
-#endif
diff --git a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c
index f66037b16d76a79743d626010d32ce3820716e70..a6f2ff139437a45edbc9c42e41d1f1de16555d97 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c
@@ -87,7 +87,7 @@ int tsdbSetAndOpenReadFSet(SReadH *pReadh, SDFileSet *pSet) {
TSDB_FSET_SET_CLOSED(TSDB_READ_FSET(pReadh));
// if (tsdbOpenDFileSet(TSDB_READ_FSET(pReadh), O_RDONLY) < 0) {
if (tsdbOpenDFileSet(TSDB_READ_FSET(pReadh), TD_FILE_READ) < 0) {
- tsdbError("vgId:%d failed to open file set %d since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FSET_FID(pSet),
+ tsdbError("vgId:%d, failed to open file set %d since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FSET_FID(pSet),
tstrerror(terrno));
return -1;
}
@@ -107,7 +107,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) {
if (pHeadf->info.offset <= 0) return 0;
if (tsdbSeekDFile(pHeadf, pHeadf->info.offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load SBlockIdx part while seek file %s since %s, offset:%u len :%u",
+ tsdbError("vgId:%d, failed to load SBlockIdx part while seek file %s since %s, offset:%u len :%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset,
pHeadf->info.len);
return -1;
@@ -117,7 +117,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) {
int64_t nread = tsdbReadDFile(pHeadf, TSDB_READ_BUF(pReadh), pHeadf->info.len);
if (nread < 0) {
- tsdbError("vgId:%d failed to load SBlockIdx part while read file %s since %s, offset:%u len :%u",
+ tsdbError("vgId:%d, failed to load SBlockIdx part while read file %s since %s, offset:%u len :%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset,
pHeadf->info.len);
return -1;
@@ -125,14 +125,14 @@ int tsdbLoadBlockIdx(SReadH *pReadh) {
if (nread < pHeadf->info.len) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d SBlockIdx part in file %s is corrupted, offset:%u expected bytes:%u read bytes: %" PRId64,
+ tsdbError("vgId:%d, SBlockIdx part in file %s is corrupted, offset:%u expected bytes:%u read bytes: %" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len, nread);
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), pHeadf->info.len)) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d SBlockIdx part in file %s is corrupted since wrong checksum, offset:%u len :%u",
+ tsdbError("vgId:%d, SBlockIdx part in file %s is corrupted since wrong checksum, offset:%u len :%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len);
return -1;
}
@@ -157,7 +157,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) {
}
int tsdbSetReadTable(SReadH *pReadh, STable *pTable) {
- STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
+ STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_READ_REPO(pReadh), pTable, false, false, -1);
pReadh->pTable = pTable;
@@ -209,7 +209,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) {
SBlockIdx *pBlkIdx = pReadh->pBlkIdx;
if (tsdbSeekDFile(pHeadf, pBlkIdx->offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load SBlockInfo part while seek file %s since %s, offset:%u len:%u",
+ tsdbError("vgId:%d, failed to load SBlockInfo part while seek file %s since %s, offset:%u len:%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len);
return -1;
}
@@ -218,21 +218,21 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) {
int64_t nread = tsdbReadDFile(pHeadf, (void *)(pReadh->pBlkInfo), pBlkIdx->len);
if (nread < 0) {
- tsdbError("vgId:%d failed to load SBlockInfo part while read file %s since %s, offset:%u len :%u",
+ tsdbError("vgId:%d, failed to load SBlockInfo part while read file %s since %s, offset:%u len :%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len);
return -1;
}
if (nread < pBlkIdx->len) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d SBlockInfo part in file %s is corrupted, offset:%u expected bytes:%u read bytes:%" PRId64,
+ tsdbError("vgId:%d, SBlockInfo part in file %s is corrupted, offset:%u expected bytes:%u read bytes:%" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len, nread);
return -1;
}
if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkInfo), pBlkIdx->len)) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d SBlockInfo part in file %s is corrupted since wrong checksum, offset:%u len :%u",
+ tsdbError("vgId:%d, SBlockInfo part in file %s is corrupted since wrong checksum, offset:%u len :%u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len);
return -1;
}
@@ -467,7 +467,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) {
ASSERT(pBlock->numOfSubBlocks <= 1);
if (!pBlock->aggrStat) {
- tsdbDebug("vgId:%d no need to load block statis part for uid %" PRIu64 " since not exist", REPO_ID(pReadh->pRepo),
+ tsdbDebug("vgId:%d, no need to load block statis part for uid %" PRIu64 " since not exist", REPO_ID(pReadh->pRepo),
TSDB_READ_TABLE_UID(pReadh));
return TSDB_STATIS_NONE;
}
@@ -475,7 +475,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) {
SDFile *pDFileAggr = pBlock->last ? TSDB_READ_SMAL_FILE(pReadh) : TSDB_READ_SMAD_FILE(pReadh);
if (tsdbSeekDFile(pDFileAggr, pBlock->aggrOffset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load block statis part for uid %" PRIu64 " while seek file %s to offset %" PRIu64
+ tsdbError("vgId:%d, failed to load block statis part for uid %" PRIu64 " while seek file %s to offset %" PRIu64
" since %s",
TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr),
(uint64_t)pBlock->aggrOffset, tstrerror(terrno));
@@ -487,7 +487,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) {
int64_t nreadAggr = tsdbReadDFile(pDFileAggr, (void *)(pReadh->pAggrBlkData), sizeAggr);
if (nreadAggr < 0) {
- tsdbError("vgId:%d failed to load block statis part for uid %" PRIu64
+ tsdbError("vgId:%d, failed to load block statis part for uid %" PRIu64
" while read file %s since %s, offset:%" PRIu64 " len :%" PRIzu,
TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr),
tstrerror(terrno), (uint64_t)pBlock->aggrOffset, sizeAggr);
@@ -496,7 +496,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) {
if (nreadAggr < sizeAggr) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block statis part for uid %" PRIu64 " in file %s is corrupted, offset:%" PRIu64
+ tsdbError("vgId:%d, block statis part for uid %" PRIu64 " in file %s is corrupted, offset:%" PRIu64
" expected bytes:%" PRIzu " read bytes: %" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr),
(uint64_t)pBlock->aggrOffset, sizeAggr, nreadAggr);
@@ -505,7 +505,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) {
if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pAggrBlkData), (uint32_t)sizeAggr)) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block statis part for uid %" PRIu64
+ tsdbError("vgId:%d, block statis part for uid %" PRIu64
"in file %s is corrupted since wrong checksum, offset:%" PRIu64 " len :%" PRIzu,
TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr),
(uint64_t)pBlock->aggrOffset, sizeAggr);
@@ -518,7 +518,7 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) {
ASSERT(pBlock->numOfSubBlocks <= 1);
SDFile *pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh);
if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load block head part while seek file %s to offset %" PRId64 " since %s",
+ tsdbError("vgId:%d, failed to load block head part while seek file %s to offset %" PRId64 " since %s",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno));
return -1;
}
@@ -528,14 +528,14 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) {
int64_t nread = tsdbReadDFile(pDFile, (void *)(pReadh->pBlkData), size);
if (nread < 0) {
- tsdbError("vgId:%d failed to load block head part while read file %s since %s, offset:%" PRId64 " len :%" PRIzu,
+ tsdbError("vgId:%d, failed to load block head part while read file %s since %s, offset:%" PRId64 " len :%" PRIzu,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset, size);
return -1;
}
if (nread < size) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block head part in file %s is corrupted, offset:%" PRId64 " expected bytes:%" PRIzu
+ tsdbError("vgId:%d, block head part in file %s is corrupted, offset:%" PRId64 " expected bytes:%" PRIzu
" read bytes: %" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size, nread);
return -1;
@@ -543,7 +543,7 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) {
if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkData), (uint32_t)size)) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%" PRIzu,
+ tsdbError("vgId:%d, block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%" PRIzu,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size);
return -1;
}
@@ -671,14 +671,14 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
SBlockData *pBlockData = (SBlockData *)TSDB_READ_BUF(pReadh);
if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load block data part while seek file %s to offset %" PRId64 " since %s",
+ tsdbError("vgId:%d, failed to load block data part while seek file %s to offset %" PRId64 " since %s",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno));
return -1;
}
int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlock->len);
if (nread < 0) {
- tsdbError("vgId:%d failed to load block data part while read file %s since %s, offset:%" PRId64 " len :%d",
+ tsdbError("vgId:%d, failed to load block data part while read file %s since %s, offset:%" PRId64 " len :%d",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset,
pBlock->len);
return -1;
@@ -686,7 +686,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
if (nread < pBlock->len) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block data part in file %s is corrupted, offset:%" PRId64
+ tsdbError("vgId:%d, block data part in file %s is corrupted, offset:%" PRId64
" expected bytes:%d read bytes: %" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, pBlock->len, nread);
return -1;
@@ -695,7 +695,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
int32_t tsize = (int32_t)tsdbBlockStatisSize(pBlock->numOfCols, (uint32_t)pBlock->blkVer);
if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), tsize)) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%d",
+ tsdbError("vgId:%d, block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%d",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tsize);
return -1;
}
@@ -750,7 +750,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
pBlockCol ? pBlockCol->blen : 0, pBlock->algorithm, pBlock->numOfRows,
tLenBitmap, pDataCols->maxPoints, TSDB_READ_COMP_BUF(pReadh),
(int)taosTSizeof(TSDB_READ_COMP_BUF(pReadh))) < 0) {
- tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %u",
+ tsdbError("vgId:%d, file %s is broken at column %d block offset %" PRId64 " column offset %u",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tcolId, (int64_t)pBlock->offset, toffset);
return -1;
}
@@ -945,21 +945,21 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc
int64_t offset = pBlock->offset + tsdbBlockStatisSize(pBlock->numOfCols, (uint32_t)pBlock->blkVer) +
tsdbGetBlockColOffset(pBlockCol);
if (tsdbSeekDFile(pDFile, offset, SEEK_SET) < 0) {
- tsdbError("vgId:%d failed to load block column data while seek file %s to offset %" PRId64 " since %s",
+ tsdbError("vgId:%d, failed to load block column data while seek file %s to offset %" PRId64 " since %s",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, tstrerror(terrno));
return -1;
}
int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlockCol->len);
if (nread < 0) {
- tsdbError("vgId:%d failed to load block column data while read file %s since %s, offset:%" PRId64 " len :%d",
+ tsdbError("vgId:%d, failed to load block column data while read file %s since %s, offset:%" PRId64 " len :%d",
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), offset, pBlockCol->len);
return -1;
}
if (nread < pBlockCol->len) {
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
- tsdbError("vgId:%d block column data in file %s is corrupted, offset:%" PRId64 " expected bytes:%d" PRIzu
+ tsdbError("vgId:%d, block column data in file %s is corrupted, offset:%" PRId64 " expected bytes:%d" PRIzu
" read bytes: %" PRId64,
TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, pBlockCol->len, nread);
return -1;
@@ -968,7 +968,7 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc
if (tsdbCheckAndDecodeColumnData(pDataCol, pReadh->pBuf, pBlockCol->len, pBlockCol->blen, pBlock->algorithm,
pBlock->numOfRows, tLenBitmap, pCfg->maxRows, pReadh->pCBuf,
(int32_t)taosTSizeof(pReadh->pCBuf)) < 0) {
- tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
+ tsdbError("vgId:%d, file %s is broken at column %d offset %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile),
pBlockCol->colId, offset);
return -1;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c
deleted file mode 100644
index 18cf18dbad32bb1a780d098c0343c8c7894f700b..0000000000000000000000000000000000000000
--- a/source/dnode/vnode/src/tsdb/tsdbSma.c
+++ /dev/null
@@ -1,2203 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include "tsdbSma.h"
-#include "tsdb.h"
-
-static const char *TSDB_SMA_DNAME[] = {
- "", // TSDB_SMA_TYPE_BLOCK
- "tsma", // TSDB_SMA_TYPE_TIME_RANGE
- "rsma", // TSDB_SMA_TYPE_ROLLUP
-};
-
-#undef _TEST_SMA_PRINT_DEBUG_LOG_
-#define SMA_STORAGE_TSDB_DAYS 30
-#define SMA_STORAGE_TSDB_TIMES 10
-#define SMA_STORAGE_SPLIT_HOURS 24
-#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8
-#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds
-
-#define SMA_STATE_HASH_SLOT 4
-#define SMA_STATE_ITEM_HASH_SLOT 32
-
-#define SMA_TEST_INDEX_NAME "smaTestIndexName" // TODO: just for test
-#define SMA_TEST_INDEX_UID 2000000001 // TODO: just for test
-
-typedef struct SRSmaInfo SRSmaInfo;
-typedef enum {
- SMA_STORAGE_LEVEL_TSDB = 0, // use days of self-defined e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f200.tsma
- SMA_STORAGE_LEVEL_DFILESET = 1 // use days of TS data e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f1906.tsma
-} ESmaStorageLevel;
-
-typedef struct SPoolMem {
- int64_t size;
- struct SPoolMem *prev;
- struct SPoolMem *next;
-} SPoolMem;
-
-struct SSmaEnv {
- TdThreadRwlock lock;
- int8_t type;
- TXN txn;
- SPoolMem *pPool;
- SDiskID did;
- TDB *dbEnv; // TODO: If it's better to put it in smaIndex level?
- char *path; // relative path
- SSmaStat *pStat;
-};
-
-#define SMA_ENV_LOCK(env) ((env)->lock)
-#define SMA_ENV_TYPE(env) ((env)->type)
-#define SMA_ENV_DID(env) ((env)->did)
-#define SMA_ENV_ENV(env) ((env)->dbEnv)
-#define SMA_ENV_PATH(env) ((env)->path)
-#define SMA_ENV_STAT(env) ((env)->pStat)
-#define SMA_ENV_STAT_ITEMS(env) ((env)->pStat->smaStatItems)
-
-typedef struct {
- STsdb *pTsdb;
- SDBFile dFile;
- const SArray *pDataBlocks; // sma data
- int32_t interval; // interval with the precision of DB
-} STSmaWriteH;
-
-typedef struct {
- int32_t iter;
- int32_t fid;
-} SmaFsIter;
-
-typedef struct {
- STsdb *pTsdb;
- SDBFile dFile;
- int32_t interval; // interval with the precision of DB
- int32_t blockSize; // size of SMA block item
- int8_t storageLevel;
- int8_t days;
- SmaFsIter smaFsIter;
-} STSmaReadH;
-
-typedef struct {
- /**
- * @brief The field 'state' is here to demonstrate if one smaIndex is ready to provide service.
- * - TSDB_SMA_STAT_OK: 1) The sma calculation of history data is finished; 2) Or recevied information from
- * Streaming Module or TSDB local persistence.
- * - TSDB_SMA_STAT_EXPIRED: 1) If sma calculation of history TS data is not finished; 2) Or if the TSDB is open,
- * without information about its previous state.
- * - TSDB_SMA_STAT_DROPPED: 1)sma dropped
- * N.B. only applicable to tsma
- */
- int8_t state; // ETsdbSmaStat
- SHashObj *expiredWindows; // key: skey of time window, value: N/A
- STSma *pSma; // cache schema
-} SSmaStatItem;
-
-#define RSMA_TASK_INFO_HASH_SLOT 8
-struct SRSmaInfo {
- void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
-};
-
-struct SSmaStat {
- union {
- SHashObj *smaStatItems; // key: indexUid, value: SSmaStatItem for tsma
- SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
- };
- T_REF_DECLARE()
-};
-#define SMA_STAT_ITEMS(s) ((s)->smaStatItems)
-#define SMA_STAT_INFO_HASH(s) ((s)->rsmaInfoHash)
-
-static FORCE_INLINE void tsdbFreeTaskHandle(qTaskInfo_t *taskHandle) {
- // Note: free/kill may in RC
- qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
- if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
- qDestroyTask(otaskHandle);
- }
-}
-
-static FORCE_INLINE void *tsdbFreeRSmaInfo(SRSmaInfo *pInfo) {
- for (int32_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
- if (pInfo->taskInfo[i]) {
- tsdbFreeTaskHandle(pInfo->taskInfo[i]);
- }
- }
- return NULL;
-}
-
-// declaration of static functions
-
-// expired window
-static int32_t tsdbUpdateExpiredWindowImpl(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version);
-static int32_t tsdbSetExpiredWindow(STsdb *pTsdb, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey,
- int64_t version);
-static int32_t tsdbInitSmaStat(SSmaStat **pSmaStat, int8_t smaType);
-static void *tsdbFreeSmaStatItem(SSmaStatItem *pSmaStatItem);
-static int32_t tsdbDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
-static SSmaEnv *tsdbNewSmaEnv(const STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did);
-static int32_t tsdbInitSmaEnv(STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv);
-static int32_t tsdbResetExpiredWindow(STsdb *pTsdb, SSmaStat *pStat, int64_t indexUid, TSKEY skey);
-static int32_t tsdbRefSmaStat(STsdb *pTsdb, SSmaStat *pStat);
-static int32_t tsdbUnRefSmaStat(STsdb *pTsdb, SSmaStat *pStat);
-
-// read data
-// TODO: This is the basic params, and should wrap the params to a queryHandle.
-static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult);
-
-// insert data
-static int32_t tsdbInitTSmaWriteH(STSmaWriteH *pSmaH, STsdb *pTsdb, const SArray *pDataBlocks, int64_t interval,
- int8_t intervalUnit);
-static void tsdbDestroyTSmaWriteH(STSmaWriteH *pSmaH);
-static int32_t tsdbInitTSmaReadH(STSmaReadH *pSmaH, STsdb *pTsdb, int64_t interval, int8_t intervalUnit);
-static int32_t tsdbGetSmaStorageLevel(int64_t interval, int8_t intervalUnit);
-static int32_t tsdbSetRSmaDataFile(STSmaWriteH *pSmaH, int32_t fid);
-static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
- TXN *txn);
-static int64_t tsdbGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted);
-static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel);
-static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid);
-static int32_t tsdbInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey);
-static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey);
-static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]);
-static int32_t tsdbInsertTSmaDataImpl(STsdb *pTsdb, int64_t indexUid, const char *msg);
-static int32_t tsdbInsertRSmaDataImpl(STsdb *pTsdb, const char *msg);
-
-static FORCE_INLINE int32_t tsdbUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
-static FORCE_INLINE int32_t tsdbUpdateTbUidListImpl(STsdb *pTsdb, tb_uid_t *suid, SArray *tbUids);
-static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, int32_t inputType,
- qTaskInfo_t *taskInfo, STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid,
- int8_t level);
-// mgmt interface
-static int32_t tsdbDropTSmaDataImpl(STsdb *pTsdb, int64_t indexUid);
-
-// Pool Memory
-static SPoolMem *openPool();
-static void clearPool(SPoolMem *pPool);
-static void closePool(SPoolMem *pPool);
-static void *poolMalloc(void *arg, size_t size);
-static void poolFree(void *arg, void *ptr);
-
-static int tsdbSmaBeginCommit(SSmaEnv *pEnv);
-static int tsdbSmaEndCommit(SSmaEnv *pEnv);
-
-// implementation
-static FORCE_INLINE int16_t tsdbTSmaAdd(STsdb *pTsdb, int16_t n) {
- return atomic_add_fetch_16(&REPO_TSMA_NUM(pTsdb), n);
-}
-static FORCE_INLINE int16_t tsdbTSmaSub(STsdb *pTsdb, int16_t n) {
- return atomic_sub_fetch_16(&REPO_TSMA_NUM(pTsdb), n);
-}
-
-static FORCE_INLINE int32_t tsdbRLockSma(SSmaEnv *pEnv) {
- int code = taosThreadRwlockRdlock(&(pEnv->lock));
- if (code != 0) {
- terrno = TAOS_SYSTEM_ERROR(code);
- return -1;
- }
- return 0;
-}
-
-static FORCE_INLINE int32_t tsdbWLockSma(SSmaEnv *pEnv) {
- int code = taosThreadRwlockWrlock(&(pEnv->lock));
- if (code != 0) {
- terrno = TAOS_SYSTEM_ERROR(code);
- return -1;
- }
- return 0;
-}
-
-static FORCE_INLINE int32_t tsdbUnLockSma(SSmaEnv *pEnv) {
- int code = taosThreadRwlockUnlock(&(pEnv->lock));
- if (code != 0) {
- terrno = TAOS_SYSTEM_ERROR(code);
- return -1;
- }
- return 0;
-}
-
-static SPoolMem *openPool() {
- SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool));
-
- pPool->prev = pPool->next = pPool;
- pPool->size = 0;
-
- return pPool;
-}
-
-static void clearPool(SPoolMem *pPool) {
- if (!pPool) return;
-
- SPoolMem *pMem;
-
- do {
- pMem = pPool->next;
-
- if (pMem == pPool) break;
-
- pMem->next->prev = pMem->prev;
- pMem->prev->next = pMem->next;
- pPool->size -= pMem->size;
-
- taosMemoryFree(pMem);
- } while (1);
-
- assert(pPool->size == 0);
-}
-
-static void closePool(SPoolMem *pPool) {
- if (pPool) {
- clearPool(pPool);
- taosMemoryFree(pPool);
- }
-}
-
-static void *poolMalloc(void *arg, size_t size) {
- void *ptr = NULL;
- SPoolMem *pPool = (SPoolMem *)arg;
- SPoolMem *pMem;
-
- pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size);
- if (!pMem) {
- assert(0);
- }
-
- pMem->size = sizeof(*pMem) + size;
- pMem->next = pPool->next;
- pMem->prev = pPool;
-
- pPool->next->prev = pMem;
- pPool->next = pMem;
- pPool->size += pMem->size;
-
- ptr = (void *)(&pMem[1]);
- return ptr;
-}
-
-static void poolFree(void *arg, void *ptr) {
- SPoolMem *pPool = (SPoolMem *)arg;
- SPoolMem *pMem;
-
- pMem = &(((SPoolMem *)ptr)[-1]);
-
- pMem->next->prev = pMem->prev;
- pMem->prev->next = pMem->next;
- pPool->size -= pMem->size;
-
- taosMemoryFree(pMem);
-}
-
-int32_t tsdbInitSma(STsdb *pTsdb) {
- // tSma
- int32_t numOfTSma = taosArrayGetSize(metaGetSmaTbUids(REPO_META(pTsdb), false));
- if (numOfTSma > 0) {
- atomic_store_16(&REPO_TSMA_NUM(pTsdb), (int16_t)numOfTSma);
- }
- // TODO: rSma
- return TSDB_CODE_SUCCESS;
-}
-
-static FORCE_INLINE int8_t tsdbSmaStat(SSmaStatItem *pStatItem) {
- if (pStatItem) {
- return atomic_load_8(&pStatItem->state);
- }
- return TSDB_SMA_STAT_UNKNOWN;
-}
-
-static FORCE_INLINE bool tsdbSmaStatIsOK(SSmaStatItem *pStatItem, int8_t *state) {
- if (!pStatItem) {
- return false;
- }
-
- if (state) {
- *state = atomic_load_8(&pStatItem->state);
- return *state == TSDB_SMA_STAT_OK;
- }
- return atomic_load_8(&pStatItem->state) == TSDB_SMA_STAT_OK;
-}
-
-static FORCE_INLINE bool tsdbSmaStatIsExpired(SSmaStatItem *pStatItem) {
- return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_EXPIRED) : true;
-}
-
-static FORCE_INLINE bool tsdbSmaStatIsDropped(SSmaStatItem *pStatItem) {
- return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_DROPPED) : true;
-}
-
-static FORCE_INLINE void tsdbSmaStatSetOK(SSmaStatItem *pStatItem) {
- if (pStatItem) {
- atomic_store_8(&pStatItem->state, TSDB_SMA_STAT_OK);
- }
-}
-
-static FORCE_INLINE void tsdbSmaStatSetExpired(SSmaStatItem *pStatItem) {
- if (pStatItem) {
- atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_EXPIRED);
- }
-}
-
-static FORCE_INLINE void tsdbSmaStatSetDropped(SSmaStatItem *pStatItem) {
- if (pStatItem) {
- atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_DROPPED);
- }
-}
-
-static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]) {
- snprintf(dirName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s", TD_DIRSEP, vgId, TD_DIRSEP, TSDB_SMA_DNAME[smaType]);
-}
-
-static SSmaEnv *tsdbNewSmaEnv(const STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did) {
- SSmaEnv *pEnv = NULL;
-
- pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv));
- if (!pEnv) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
- }
-
- SMA_ENV_TYPE(pEnv) = smaType;
-
- int code = taosThreadRwlockInit(&(pEnv->lock), NULL);
- if (code) {
- terrno = TAOS_SYSTEM_ERROR(code);
- taosMemoryFree(pEnv);
- return NULL;
- }
-
- ASSERT(path && (strlen(path) > 0));
- SMA_ENV_PATH(pEnv) = strdup(path);
- if (!SMA_ENV_PATH(pEnv)) {
- tsdbFreeSmaEnv(pEnv);
- return NULL;
- }
-
- SMA_ENV_DID(pEnv) = did;
-
- if (tsdbInitSmaStat(&SMA_ENV_STAT(pEnv), smaType) != TSDB_CODE_SUCCESS) {
- tsdbFreeSmaEnv(pEnv);
- return NULL;
- }
-
- char aname[TSDB_FILENAME_LEN] = {0};
- tfsAbsoluteName(REPO_TFS(pTsdb), did, path, aname);
- if (tsdbOpenDBEnv(&pEnv->dbEnv, aname) != TSDB_CODE_SUCCESS) {
- tsdbFreeSmaEnv(pEnv);
- return NULL;
- }
-
- if (!(pEnv->pPool = openPool())) {
- tsdbFreeSmaEnv(pEnv);
- return NULL;
- }
-
- return pEnv;
-}
-
-static int32_t tsdbInitSmaEnv(STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv) {
- if (!pEnv) {
- terrno = TSDB_CODE_INVALID_PTR;
- return TSDB_CODE_FAILED;
- }
-
- if (!(*pEnv)) {
- if (!(*pEnv = tsdbNewSmaEnv(pTsdb, smaType, path, did))) {
- return TSDB_CODE_FAILED;
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Release resources allocated for its member fields, not including itself.
- *
- * @param pSmaEnv
- * @return int32_t
- */
-void tsdbDestroySmaEnv(SSmaEnv *pSmaEnv) {
- if (pSmaEnv) {
- tsdbDestroySmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
- taosMemoryFreeClear(pSmaEnv->pStat);
- taosMemoryFreeClear(pSmaEnv->path);
- taosThreadRwlockDestroy(&(pSmaEnv->lock));
- tsdbCloseDBEnv(pSmaEnv->dbEnv);
- closePool(pSmaEnv->pPool);
- }
-}
-
-void *tsdbFreeSmaEnv(SSmaEnv *pSmaEnv) {
- tsdbDestroySmaEnv(pSmaEnv);
- taosMemoryFreeClear(pSmaEnv);
- return NULL;
-}
-
-static int32_t tsdbRefSmaStat(STsdb *pTsdb, SSmaStat *pStat) {
- if (!pStat) return 0;
-
- int ref = T_REF_INC(pStat);
- tsdbDebug("vgId:%d ref sma stat:%p, val:%d", REPO_ID(pTsdb), pStat, ref);
- return 0;
-}
-
-static int32_t tsdbUnRefSmaStat(STsdb *pTsdb, SSmaStat *pStat) {
- if (!pStat) return 0;
-
- int ref = T_REF_DEC(pStat);
- tsdbDebug("vgId:%d unref sma stat:%p, val:%d", REPO_ID(pTsdb), pStat, ref);
- return 0;
-}
-
-static int32_t tsdbInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) {
- ASSERT(pSmaStat != NULL);
-
- if (*pSmaStat) { // no lock
- return TSDB_CODE_SUCCESS;
- }
-
- /**
- * 1. Lazy mode utilized when init SSmaStat to update expired window(or hungry mode when tsdbNew).
- * 2. Currently, there is mutex lock when init SSmaEnv, thus no need add lock on SSmaStat, and please add lock if
- * tsdbInitSmaStat invoked in other multithread environment later.
- */
- if (!(*pSmaStat)) {
- *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat));
- if (!(*pSmaStat)) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
-
- if (smaType == TSDB_SMA_TYPE_ROLLUP) {
- SMA_STAT_INFO_HASH(*pSmaStat) = taosHashInit(
- RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
-
- if (!SMA_STAT_INFO_HASH(*pSmaStat)) {
- taosMemoryFreeClear(*pSmaStat);
- return TSDB_CODE_FAILED;
- }
- } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
- SMA_STAT_ITEMS(*pSmaStat) =
- taosHashInit(SMA_STATE_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
-
- if (!SMA_STAT_ITEMS(*pSmaStat)) {
- taosMemoryFreeClear(*pSmaStat);
- return TSDB_CODE_FAILED;
- }
- } else {
- ASSERT(0);
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static SSmaStatItem *tsdbNewSmaStatItem(int8_t state) {
- SSmaStatItem *pItem = NULL;
-
- pItem = (SSmaStatItem *)taosMemoryCalloc(1, sizeof(SSmaStatItem));
- if (pItem) {
- pItem->state = state;
- pItem->expiredWindows = taosHashInit(SMA_STATE_ITEM_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP),
- true, HASH_ENTRY_LOCK);
- if (!pItem->expiredWindows) {
- taosMemoryFreeClear(pItem);
- }
- }
- return pItem;
-}
-
-static void *tsdbFreeSmaStatItem(SSmaStatItem *pSmaStatItem) {
- if (pSmaStatItem) {
- tdDestroyTSma(pSmaStatItem->pSma);
- taosMemoryFreeClear(pSmaStatItem->pSma);
- taosHashCleanup(pSmaStatItem->expiredWindows);
- taosMemoryFreeClear(pSmaStatItem);
- }
- return NULL;
-}
-
-/**
- * @brief Release resources allocated for its member fields, not including itself.
- *
- * @param pSmaStat
- * @return int32_t
- */
-int32_t tsdbDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
- if (pSmaStat) {
- // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
- if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
- void *item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), NULL);
- while (item) {
- SSmaStatItem *pItem = *(SSmaStatItem **)item;
- tsdbFreeSmaStatItem(pItem);
- item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), item);
- }
- taosHashCleanup(SMA_STAT_ITEMS(pSmaStat));
- } else if (smaType == TSDB_SMA_TYPE_ROLLUP) {
- void *infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), NULL);
- while (infoHash) {
- SRSmaInfo *pInfoHash = *(SRSmaInfo **)infoHash;
- tsdbFreeRSmaInfo(pInfoHash);
- infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), infoHash);
- }
- taosHashCleanup(SMA_STAT_INFO_HASH(pSmaStat));
- } else {
- ASSERT(0);
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbCheckAndInitSmaEnv(STsdb *pTsdb, int8_t smaType) {
- SSmaEnv *pEnv = NULL;
-
- // return if already init
- switch (smaType) {
- case TSDB_SMA_TYPE_TIME_RANGE:
- if ((pEnv = (SSmaEnv *)atomic_load_ptr(&REPO_TSMA_ENV(pTsdb)))) {
- return TSDB_CODE_SUCCESS;
- }
- break;
- case TSDB_SMA_TYPE_ROLLUP:
- if ((pEnv = (SSmaEnv *)atomic_load_ptr(&REPO_RSMA_ENV(pTsdb)))) {
- return TSDB_CODE_SUCCESS;
- }
- break;
- default:
- terrno = TSDB_CODE_INVALID_PARA;
- return TSDB_CODE_FAILED;
- }
-
- // init sma env
- tsdbLockRepo(pTsdb);
- pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&REPO_TSMA_ENV(pTsdb))
- : atomic_load_ptr(&REPO_RSMA_ENV(pTsdb));
- if (!pEnv) {
- char rname[TSDB_FILENAME_LEN] = {0};
-
- SDiskID did = {0};
- tfsAllocDisk(REPO_TFS(pTsdb), TFS_PRIMARY_LEVEL, &did);
- if (did.level < 0 || did.id < 0) {
- tsdbUnlockRepo(pTsdb);
- return TSDB_CODE_FAILED;
- }
- tsdbGetSmaDir(REPO_ID(pTsdb), smaType, rname);
-
- if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rname, did) != TSDB_CODE_SUCCESS) {
- tsdbUnlockRepo(pTsdb);
- return TSDB_CODE_FAILED;
- }
-
- if (tsdbInitSmaEnv(pTsdb, smaType, rname, did, &pEnv) != TSDB_CODE_SUCCESS) {
- tsdbUnlockRepo(pTsdb);
- return TSDB_CODE_FAILED;
- }
-
- (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&REPO_TSMA_ENV(pTsdb), pEnv)
- : atomic_store_ptr(&REPO_RSMA_ENV(pTsdb), pEnv);
- }
- tsdbUnlockRepo(pTsdb);
-
- return TSDB_CODE_SUCCESS;
-};
-
-static int32_t tsdbSetExpiredWindow(STsdb *pTsdb, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey,
- int64_t version) {
- SSmaStatItem *pItem = taosHashGet(pItemsHash, &indexUid, sizeof(indexUid));
- if (!pItem) {
- // TODO: use TSDB_SMA_STAT_EXPIRED and update by stream computing later
- pItem = tsdbNewSmaStatItem(TSDB_SMA_STAT_OK); // TODO use the real state
- if (!pItem) {
- // Response to stream computing: OOM
- // For query, if the indexUid not found, the TSDB should tell query module to query raw TS data.
- return TSDB_CODE_FAILED;
- }
-
- // cache smaMeta
- STSma *pSma = metaGetSmaInfoByIndex(REPO_META(pTsdb), indexUid, true);
- if (!pSma) {
- terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META;
- taosHashCleanup(pItem->expiredWindows);
- taosMemoryFree(pItem);
- tsdbWarn("vgId:%d update expired window failed for smaIndex %" PRIi64 " since %s", REPO_ID(pTsdb), indexUid,
- tstrerror(terrno));
- return TSDB_CODE_FAILED;
- }
- pItem->pSma = pSma;
-
- if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) {
- // If error occurs during put smaStatItem, free the resources of pItem
- taosHashCleanup(pItem->expiredWindows);
- taosMemoryFree(pItem);
- return TSDB_CODE_FAILED;
- }
- } else if (!(pItem = *(SSmaStatItem **)pItem)) {
- terrno = TSDB_CODE_INVALID_PTR;
- return TSDB_CODE_FAILED;
- }
-
- if (taosHashPut(pItem->expiredWindows, &winSKey, sizeof(TSKEY), &version, sizeof(version)) != 0) {
- // If error occurs during taosHashPut expired windows, remove the smaIndex from pTsdb->pSmaStat, thus TSDB would
- // tell query module to query raw TS data.
- // N.B.
- // 1) It is assumed to be extemely little probability event of fail to taosHashPut.
- // 2) This would solve the inconsistency to some extent, but not completely, unless we record all expired
- // windows failed to put into hash table.
- taosHashCleanup(pItem->expiredWindows);
- taosMemoryFreeClear(pItem->pSma);
- taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid));
- tsdbWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", REPO_ID(pTsdb), indexUid,
- winSKey);
- return TSDB_CODE_FAILED;
- }
-
- tsdbDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", REPO_ID(pTsdb), indexUid,
- winSKey);
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Update expired window according to msg from stream computing module.
- *
- * @param pTsdb
- * @param msg SSubmitReq
- * @return int32_t
- */
-int32_t tsdbUpdateExpiredWindowImpl(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version) {
- // no time-range-sma, just return success
- if (atomic_load_16(&REPO_TSMA_NUM(pTsdb)) <= 0) {
- tsdbTrace("vgId:%d not update expire window since no tSma", REPO_ID(pTsdb));
- return TSDB_CODE_SUCCESS;
- }
-
- if (!REPO_META(pTsdb)) {
- terrno = TSDB_CODE_INVALID_PTR;
- return TSDB_CODE_FAILED;
- }
-
- if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_TDB_INIT_FAILED;
- return TSDB_CODE_FAILED;
- }
-
- // Firstly, assume that tSma can only be created on super table/normal table.
- // getActiveTimeWindow
-
- SSmaEnv *pEnv = REPO_TSMA_ENV(pTsdb);
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SHashObj *pItemsHash = SMA_ENV_STAT_ITEMS(pEnv);
-
- TASSERT(pEnv && pStat && pItemsHash);
-
- // basic procedure
- // TODO: optimization
- tsdbRefSmaStat(pTsdb, pStat);
-
- SSubmitMsgIter msgIter = {0};
- SSubmitBlk *pBlock = NULL;
- SInterval interval = {0};
- TSKEY lastWinSKey = INT64_MIN;
-
- if (tInitSubmitMsgIter(pMsg, &msgIter) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_FAILED;
- }
-
- while (true) {
- tGetSubmitMsgNext(&msgIter, &pBlock);
- if (!pBlock) break;
-
- STSmaWrapper *pSW = NULL;
- STSma *pTSma = NULL;
-
- SSubmitBlkIter blkIter = {0};
- if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) != TSDB_CODE_SUCCESS) {
- pSW = tdFreeTSmaWrapper(pSW);
- break;
- }
-
- while (true) {
- STSRow *row = tGetSubmitBlkNext(&blkIter);
- if (!row) {
- tdFreeTSmaWrapper(pSW);
- break;
- }
- if (!pSW || (pTSma->tableUid != pBlock->suid)) {
- if (pSW) {
- pSW = tdFreeTSmaWrapper(pSW);
- }
- if (!(pSW = metaGetSmaInfoByTable(REPO_META(pTsdb), pBlock->suid))) {
- break;
- }
- if ((pSW->number) <= 0 || !pSW->tSma) {
- pSW = tdFreeTSmaWrapper(pSW);
- break;
- }
-
- pTSma = pSW->tSma;
-
- interval.interval = pTSma->interval;
- interval.intervalUnit = pTSma->intervalUnit;
- interval.offset = pTSma->offset;
- interval.precision = REPO_CFG(pTsdb)->precision;
- interval.sliding = pTSma->sliding;
- interval.slidingUnit = pTSma->slidingUnit;
- }
-
- TSKEY winSKey = taosTimeTruncate(TD_ROW_KEY(row), &interval, interval.precision);
-
- if (lastWinSKey != winSKey) {
- lastWinSKey = winSKey;
- tsdbSetExpiredWindow(pTsdb, pItemsHash, pTSma->indexUid, winSKey, version);
- } else {
- tsdbDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated",
- REPO_ID(pTsdb), pTSma->indexUid, winSKey);
- }
- }
- }
-
- tsdbUnRefSmaStat(pTsdb, pStat);
-
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief When sma data received from stream computing, make the relative expired window valid.
- *
- * @param pTsdb
- * @param pStat
- * @param indexUid
- * @param skey
- * @return int32_t
- */
-static int32_t tsdbResetExpiredWindow(STsdb *pTsdb, SSmaStat *pStat, int64_t indexUid, TSKEY skey) {
- SSmaStatItem *pItem = NULL;
-
- tsdbRefSmaStat(pTsdb, pStat);
-
- if (pStat && SMA_STAT_ITEMS(pStat)) {
- pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
- }
- if ((pItem) && ((pItem = *(SSmaStatItem **)pItem))) {
- // pItem resides in hash buffer all the time unless drop sma index
- // TODO: multithread protect
- if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) {
- // error handling
- tsdbUnRefSmaStat(pTsdb, pStat);
- tsdbWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", REPO_ID(pTsdb),
- skey, indexUid);
- return TSDB_CODE_FAILED;
- }
- tsdbDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", REPO_ID(pTsdb),
- skey, indexUid);
- // TODO: use a standalone interface to received state upate notification from stream computing module.
- /**
- * @brief state
- * - When SMA env init in TSDB, its status is TSDB_SMA_STAT_OK.
- * - In startup phase of stream computing module, it should notify the SMA env in TSDB to expired if needed(e.g.
- * when batch data caculation not finised)
- * - When TSDB_SMA_STAT_OK, the stream computing module should also notify that to the SMA env in TSDB.
- */
- pItem->state = TSDB_SMA_STAT_OK;
- } else {
- // error handling
- tsdbUnRefSmaStat(pTsdb, pStat);
- tsdbWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, REPO_ID(pTsdb), skey, indexUid);
- return TSDB_CODE_FAILED;
- }
-
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Judge the tSma storage level
- *
- * @param interval
- * @param intervalUnit
- * @return int32_t
- */
-static int32_t tsdbGetSmaStorageLevel(int64_t interval, int8_t intervalUnit) {
- // TODO: configurable for SMA_STORAGE_SPLIT_HOURS?
- switch (intervalUnit) {
- case TIME_UNIT_HOUR:
- if (interval < SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- case TIME_UNIT_MINUTE:
- if (interval < 60 * SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- case TIME_UNIT_SECOND:
- if (interval < 3600 * SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- case TIME_UNIT_MILLISECOND:
- if (interval < 3600 * 1e3 * SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- case TIME_UNIT_MICROSECOND:
- if (interval < 3600 * 1e6 * SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- case TIME_UNIT_NANOSECOND:
- if (interval < 3600 * 1e9 * SMA_STORAGE_SPLIT_HOURS) {
- return SMA_STORAGE_LEVEL_DFILESET;
- }
- break;
- default:
- break;
- }
- return SMA_STORAGE_LEVEL_TSDB;
-}
-
-/**
- * @brief Insert TSma data blocks to DB File build by B+Tree
- *
- * @param pSmaH
- * @param smaKey tableUid-colId-skeyOfWindow(8-2-8)
- * @param keyLen
- * @param pData
- * @param dataLen
- * @return int32_t
- */
-static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
- TXN *txn) {
- SDBFile *pDBFile = &pSmaH->dFile;
-
- // TODO: insert tsma data blocks into B+Tree(TTB)
- if (tsdbSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) {
- tsdbWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail",
- REPO_ID(pSmaH->pTsdb), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
- return TSDB_CODE_FAILED;
- }
- tsdbDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed",
- REPO_ID(pSmaH->pTsdb), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
-
-#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
- uint32_t valueSize = 0;
- void *data = tsdbGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize);
- ASSERT(data != NULL);
- for (uint32_t v = 0; v < valueSize; v += 8) {
- tsdbWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v));
- }
-#endif
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Approximate value for week/month/year.
- *
- * @param interval
- * @param intervalUnit
- * @param precision
- * @param adjusted Interval already adjusted according to DB precision
- * @return int64_t
- */
-static int64_t tsdbGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted) {
- if (adjusted) {
- return interval;
- }
-
- switch (intervalUnit) {
- case TIME_UNIT_YEAR: // approximate value
- interval *= 365 * 86400 * 1e3;
- break;
- case TIME_UNIT_MONTH: // approximate value
- interval *= 30 * 86400 * 1e3;
- break;
- case TIME_UNIT_WEEK: // approximate value
- interval *= 7 * 86400 * 1e3;
- break;
- case TIME_UNIT_DAY: // the interval for tSma calculation must <= day
- interval *= 86400 * 1e3;
- break;
- case TIME_UNIT_HOUR:
- interval *= 3600 * 1e3;
- break;
- case TIME_UNIT_MINUTE:
- interval *= 60 * 1e3;
- break;
- case TIME_UNIT_SECOND:
- interval *= 1e3;
- break;
- default:
- break;
- }
-
- switch (precision) {
- case TSDB_TIME_PRECISION_MILLI:
- if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
- return interval / 1e3;
- } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
- return interval / 1e6;
- } else { // ms
- return interval;
- }
- break;
- case TSDB_TIME_PRECISION_MICRO:
- if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
- return interval;
- } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
- return interval / 1e3;
- } else { // ms
- return interval * 1e3;
- }
- break;
- case TSDB_TIME_PRECISION_NANO:
- if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
- return interval * 1e3;
- } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
- return interval;
- } else { // ms
- return interval * 1e6;
- }
- break;
- default: // ms
- if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
- return interval / 1e3;
- } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
- return interval / 1e6;
- } else { // ms
- return interval;
- }
- break;
- }
- return interval;
-}
-
-static int32_t tsdbInitTSmaWriteH(STSmaWriteH *pSmaH, STsdb *pTsdb, const SArray *pDataBlocks, int64_t interval,
- int8_t intervalUnit) {
- pSmaH->pTsdb = pTsdb;
- pSmaH->interval = tsdbGetIntervalByPrecision(interval, intervalUnit, REPO_CFG(pTsdb)->precision, true);
- pSmaH->pDataBlocks = pDataBlocks;
- pSmaH->dFile.fid = TSDB_IVLD_FID;
- return TSDB_CODE_SUCCESS;
-}
-
-static void tsdbDestroyTSmaWriteH(STSmaWriteH *pSmaH) {
- if (pSmaH) {
- tsdbCloseDBF(&pSmaH->dFile);
- }
-}
-
-static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid) {
- STsdb *pTsdb = pSmaH->pTsdb;
- ASSERT(!pSmaH->dFile.path && !pSmaH->dFile.pDB);
-
- pSmaH->dFile.fid = fid;
- char tSmaFile[TSDB_FILENAME_LEN] = {0};
- snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, REPO_ID(pTsdb), fid);
- pSmaH->dFile.path = strdup(tSmaFile);
-
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief
- *
- * @param pTsdb
- * @param interval Interval calculated by DB's precision
- * @param storageLevel
- * @return int32_t
- */
-static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel) {
- STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb);
- int32_t daysPerFile = pCfg->days;
-
- if (storageLevel == SMA_STORAGE_LEVEL_TSDB) {
- int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]);
- daysPerFile = days > SMA_STORAGE_TSDB_DAYS ? days : SMA_STORAGE_TSDB_DAYS;
- }
-
- return daysPerFile;
-}
-
-static int tsdbSmaBeginCommit(SSmaEnv *pEnv) {
- TXN *pTxn = &pEnv->txn;
- // start a new txn
- tdbTxnOpen(pTxn, 0, poolMalloc, poolFree, pEnv->pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
- if (tdbBegin(pEnv->dbEnv, pTxn) != 0) {
- tsdbWarn("tsdbSma tdb begin commit fail");
- return -1;
- }
- return 0;
-}
-
-static int tsdbSmaEndCommit(SSmaEnv *pEnv) {
- TXN *pTxn = &pEnv->txn;
-
- // Commit current txn
- if (tdbCommit(pEnv->dbEnv, pTxn) != 0) {
- tsdbWarn("tsdbSma tdb end commit fail");
- return -1;
- }
- tdbTxnClose(pTxn);
- clearPool(pEnv->pPool);
- return 0;
-}
-
-/**
- * @brief Insert/Update Time-range-wise SMA data.
- * - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g.
- * v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files.
- * - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The
- * days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d).
- * - The destination file of one data block for some interval is determined by its start TS key.
- *
- * @param pTsdb
- * @param msg
- * @return int32_t
- */
-static int32_t tsdbInsertTSmaDataImpl(STsdb *pTsdb, int64_t indexUid, const char *msg) {
- STsdbCfg *pCfg = REPO_CFG(pTsdb);
- const SArray *pDataBlocks = (const SArray *)msg;
-
- // TODO: destroy SSDataBlocks(msg)
-
- // For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus
- // the sma data would arrive ahead of the update-expired-window msg.
- if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_TDB_INIT_FAILED;
- return TSDB_CODE_FAILED;
- }
-
- if (!pDataBlocks) {
- terrno = TSDB_CODE_INVALID_PTR;
- tsdbWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", REPO_ID(pTsdb));
- return terrno;
- }
-
- if (taosArrayGetSize(pDataBlocks) <= 0) {
- terrno = TSDB_CODE_INVALID_PARA;
- tsdbWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", REPO_ID(pTsdb));
- return TSDB_CODE_FAILED;
- }
-
- SSmaEnv *pEnv = REPO_TSMA_ENV(pTsdb);
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SSmaStatItem *pItem = NULL;
-
- tsdbRefSmaStat(pTsdb, pStat);
-
- if (pStat && SMA_STAT_ITEMS(pStat)) {
- pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
- }
-
- if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tsdbSmaStatIsDropped(pItem)) {
- terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_FAILED;
- }
-
- STSma *pSma = pItem->pSma;
- STSmaWriteH tSmaH = {0};
-
- if (tsdbInitTSmaWriteH(&tSmaH, pTsdb, pDataBlocks, pSma->interval, pSma->intervalUnit) != 0) {
- return TSDB_CODE_FAILED;
- }
-
- char rPath[TSDB_FILENAME_LEN] = {0};
- char aPath[TSDB_FILENAME_LEN] = {0};
- snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid);
- tfsAbsoluteName(REPO_TFS(pTsdb), SMA_ENV_DID(pEnv), rPath, aPath);
- if (!taosCheckExistFile(aPath)) {
- if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) {
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_FAILED;
- }
- }
-
- // Step 1: Judge the storage level and days
- int32_t storageLevel = tsdbGetSmaStorageLevel(pSma->interval, pSma->intervalUnit);
- int32_t daysPerFile = tsdbGetTSmaDays(pTsdb, tSmaH.interval, storageLevel);
-
- char smaKey[SMA_KEY_LEN] = {0}; // key: skey + groupId
- char dataBuf[512] = {0}; // val: aggr data // TODO: handle 512 buffer?
- void *pDataBuf = NULL;
- int32_t sz = taosArrayGetSize(pDataBlocks);
- for (int32_t i = 0; i < sz; ++i) {
- SSDataBlock *pDataBlock = taosArrayGet(pDataBlocks, i);
- int32_t colNum = pDataBlock->info.numOfCols;
- int32_t rows = pDataBlock->info.rows;
- int32_t rowSize = pDataBlock->info.rowSize;
- int64_t groupId = pDataBlock->info.groupId;
- for (int32_t j = 0; j < rows; ++j) {
- printf("|");
- TSKEY skey = TSKEY_INITIAL_VAL; // the start key of TS window by interval
- void *pSmaKey = &smaKey;
- bool isStartKey = false;
-
- int32_t tlen = 0; // reset the len
- pDataBuf = &dataBuf; // reset the buf
- for (int32_t k = 0; k < colNum; ++k) {
- SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
- void *var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
- switch (pColInfoData->info.type) {
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!isStartKey) {
- isStartKey = true;
- skey = *(TSKEY *)var;
- printf("= skey %" PRIi64 " groupId = %" PRIi64 "|", skey, groupId);
- tsdbEncodeTSmaKey(groupId, skey, &pSmaKey);
- } else {
- printf(" %" PRIi64 " |", *(int64_t *)var);
- tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
- break;
- }
- break;
- case TSDB_DATA_TYPE_BOOL:
- case TSDB_DATA_TYPE_UTINYINT:
- printf(" %15d |", *(uint8_t *)var);
- tlen += taosEncodeFixedU8(&pDataBuf, *(uint8_t *)var);
- break;
- case TSDB_DATA_TYPE_TINYINT:
- printf(" %15d |", *(int8_t *)var);
- tlen += taosEncodeFixedI8(&pDataBuf, *(int8_t *)var);
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- printf(" %15d |", *(int16_t *)var);
- tlen += taosEncodeFixedI16(&pDataBuf, *(int16_t *)var);
- break;
- case TSDB_DATA_TYPE_USMALLINT:
- printf(" %15d |", *(uint16_t *)var);
- tlen += taosEncodeFixedU16(&pDataBuf, *(uint16_t *)var);
- break;
- case TSDB_DATA_TYPE_INT:
- printf(" %15d |", *(int32_t *)var);
- tlen += taosEncodeFixedI32(&pDataBuf, *(int32_t *)var);
- break;
- case TSDB_DATA_TYPE_FLOAT:
- printf(" %15f |", *(float *)var);
- tlen += taosEncodeBinary(&pDataBuf, var, sizeof(float));
- break;
- case TSDB_DATA_TYPE_UINT:
- printf(" %15u |", *(uint32_t *)var);
- tlen += taosEncodeFixedU32(&pDataBuf, *(uint32_t *)var);
- break;
- case TSDB_DATA_TYPE_BIGINT:
- printf(" %15ld |", *(int64_t *)var);
- tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- printf(" %15lf |", *(double *)var);
- tlen += taosEncodeBinary(&pDataBuf, var, sizeof(double));
- case TSDB_DATA_TYPE_UBIGINT:
- printf(" %15lu |", *(uint64_t *)var);
- tlen += taosEncodeFixedU64(&pDataBuf, *(uint64_t *)var);
- break;
- case TSDB_DATA_TYPE_NCHAR: {
- char tmpChar[100] = {0};
- strncpy(tmpChar, varDataVal(var), varDataLen(var));
- printf(" %s |", tmpChar);
- tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
- break;
- }
- case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
- char tmpChar[100] = {0};
- strncpy(tmpChar, varDataVal(var), varDataLen(var));
- printf(" %s |", tmpChar);
- tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
- break;
- }
- case TSDB_DATA_TYPE_VARBINARY:
- // TODO: add binary/varbinary
- TASSERT(0);
- default:
- printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
- TASSERT(0);
- break;
- }
- }
- // if ((tlen > 0) && (skey != TSKEY_INITIAL_VAL)) {
- if (tlen > 0) {
- int32_t fid = (int32_t)(TSDB_KEY_FID(skey, daysPerFile, pCfg->precision));
-
- // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index
- // file
- // - Set and open the DFile or the B+Tree file
- // TODO: tsdbStartTSmaCommit();
- if (fid != tSmaH.dFile.fid) {
- if (tSmaH.dFile.fid != TSDB_IVLD_FID) {
- tsdbSmaEndCommit(pEnv);
- tsdbCloseDBF(&tSmaH.dFile);
- }
- tsdbSetTSmaDataFile(&tSmaH, indexUid, fid);
- if (tsdbOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) {
- tsdbWarn("vgId:%d open DB file %s failed since %s", REPO_ID(pTsdb),
- tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno));
- tsdbDestroyTSmaWriteH(&tSmaH);
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_FAILED;
- }
- tsdbSmaBeginCommit(pEnv);
- }
-
- if (tsdbInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) {
- tsdbWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64
- " since %s",
- REPO_ID(pTsdb), indexUid, skey, groupId, tstrerror(terrno));
- tsdbSmaEndCommit(pEnv);
- tsdbDestroyTSmaWriteH(&tSmaH);
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_FAILED;
- }
- tsdbDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64,
- REPO_ID(pTsdb), indexUid, skey, groupId);
- // TODO:tsdbEndTSmaCommit();
-
- // Step 3: reset the SSmaStat
- tsdbResetExpiredWindow(pTsdb, pStat, indexUid, skey);
- } else {
- tsdbWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64,
- REPO_ID(pTsdb), skey, tlen, indexUid);
- }
-
- printf("\n");
- }
- }
- tsdbSmaEndCommit(pEnv); // TODO: not commit for every insert
- tsdbDestroyTSmaWriteH(&tSmaH);
- tsdbUnRefSmaStat(pTsdb, pStat);
-
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Drop tSma data and local cache
- * - insert/query reference
- * @param pTsdb
- * @param msg
- * @return int32_t
- */
-static int32_t tsdbDropTSmaDataImpl(STsdb *pTsdb, int64_t indexUid) {
- SSmaEnv *pEnv = atomic_load_ptr(&REPO_TSMA_ENV(pTsdb));
-
- // clear local cache
- if (pEnv) {
- tsdbDebug("vgId:%d drop tSma local cache for %" PRIi64, REPO_ID(pTsdb), indexUid);
-
- SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
- if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) {
- if (tsdbSmaStatIsDropped(pItem)) {
- tsdbDebug("vgId:%d tSma stat is already dropped for %" PRIi64, REPO_ID(pTsdb), indexUid);
- return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
- }
-
- tsdbWLockSma(pEnv);
- if (tsdbSmaStatIsDropped(pItem)) {
- tsdbUnLockSma(pEnv);
- tsdbDebug("vgId:%d tSma stat is already dropped for %" PRIi64, REPO_ID(pTsdb), indexUid);
- return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
- }
- tsdbSmaStatSetDropped(pItem);
- tsdbUnLockSma(pEnv);
-
- int32_t nSleep = 0;
- int32_t refVal = INT32_MAX;
- while (true) {
- if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) {
- tsdbDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", REPO_ID(pTsdb), indexUid, refVal);
- break;
- }
- tsdbDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", REPO_ID(pTsdb), indexUid, refVal);
- taosSsleep(1);
- if (++nSleep > SMA_DROP_EXPIRED_TIME) {
- tsdbDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", REPO_ID(pTsdb), indexUid, nSleep,
- refVal);
- break;
- };
- }
-
- tsdbFreeSmaStatItem(pItem);
- tsdbDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", REPO_ID(pTsdb), indexUid);
- }
- }
- // clear sma data files
- // TODO:
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbSetRSmaDataFile(STSmaWriteH *pSmaH, int32_t fid) {
- STsdb *pTsdb = pSmaH->pTsdb;
-
- char tSmaFile[TSDB_FILENAME_LEN] = {0};
- snprintf(tSmaFile, TSDB_FILENAME_LEN, "v%df%d.rsma", REPO_ID(pTsdb), fid);
- pSmaH->dFile.path = strdup(tSmaFile);
-
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbInsertRSmaDataImpl(STsdb *pTsdb, const char *msg) {
- STsdbCfg *pCfg = REPO_CFG(pTsdb);
- const SArray *pDataBlocks = (const SArray *)msg;
- SSmaEnv *pEnv = atomic_load_ptr(&REPO_RSMA_ENV(pTsdb));
- int64_t indexUid = SMA_TEST_INDEX_UID;
-
- if (!pEnv) {
- terrno = TSDB_CODE_INVALID_PTR;
- tsdbWarn("vgId:%d insert rSma data failed since pTSmaEnv is NULL", REPO_ID(pTsdb));
- return terrno;
- }
-
- if (!pDataBlocks) {
- terrno = TSDB_CODE_INVALID_PTR;
- tsdbWarn("vgId:%d insert rSma data failed since pDataBlocks is NULL", REPO_ID(pTsdb));
- return terrno;
- }
-
- if (taosArrayGetSize(pDataBlocks) <= 0) {
- terrno = TSDB_CODE_INVALID_PARA;
- tsdbWarn("vgId:%d insert rSma data failed since pDataBlocks is empty", REPO_ID(pTsdb));
- return TSDB_CODE_FAILED;
- }
-
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SSmaStatItem *pItem = NULL;
-
- tsdbRefSmaStat(pTsdb, pStat);
-
- if (pStat && SMA_STAT_ITEMS(pStat)) {
- pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
- }
-
- if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tsdbSmaStatIsDropped(pItem)) {
- terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_FAILED;
- }
-
- STSma *pSma = pItem->pSma;
-
- STSmaWriteH tSmaH = {0};
-
- if (tsdbInitTSmaWriteH(&tSmaH, pTsdb, pDataBlocks, pSma->interval, pSma->intervalUnit) != 0) {
- return TSDB_CODE_FAILED;
- }
-
- char rPath[TSDB_FILENAME_LEN] = {0};
- char aPath[TSDB_FILENAME_LEN] = {0};
- snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid);
- tfsAbsoluteName(REPO_TFS(pTsdb), SMA_ENV_DID(pEnv), rPath, aPath);
- if (!taosCheckExistFile(aPath)) {
- if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_FAILED;
- }
- }
-
- // Step 1: Judge the storage level and days
- int32_t storageLevel = tsdbGetSmaStorageLevel(pSma->interval, pSma->intervalUnit);
- int32_t daysPerFile = tsdbGetTSmaDays(pTsdb, tSmaH.interval, storageLevel);
-#if 0
- int32_t fid = (int32_t)(TSDB_KEY_FID(pData->skey, daysPerFile, pCfg->precision));
-
- // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index file
- // - Set and open the DFile or the B+Tree file
- // TODO: tsdbStartTSmaCommit();
- tsdbSetTSmaDataFile(&tSmaH, pData, indexUid, fid);
- if (tsdbOpenDBF(pTsdb->pTSmaEnv->dbEnv, &tSmaH.dFile) != 0) {
- tsdbWarn("vgId:%d open DB file %s failed since %s", REPO_ID(pTsdb),
- tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno));
- tsdbDestroyTSmaWriteH(&tSmaH);
- return TSDB_CODE_FAILED;
- }
-
- if (tsdbInsertTSmaDataSection(&tSmaH, pData) != 0) {
- tsdbWarn("vgId:%d insert tSma data section failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- tsdbDestroyTSmaWriteH(&tSmaH);
- return TSDB_CODE_FAILED;
- }
- // TODO:tsdbEndTSmaCommit();
-
- // Step 3: reset the SSmaStat
- tsdbResetExpiredWindow(pTsdb, SMA_ENV_STAT(pTsdb->pTSmaEnv), pData->indexUid, pData->skey);
-#endif
-
- tsdbDestroyTSmaWriteH(&tSmaH);
- tsdbUnRefSmaStat(pTsdb, pStat);
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief
- *
- * @param pSmaH
- * @param pTsdb
- * @param interval
- * @param intervalUnit
- * @return int32_t
- */
-static int32_t tsdbInitTSmaReadH(STSmaReadH *pSmaH, STsdb *pTsdb, int64_t interval, int8_t intervalUnit) {
- pSmaH->pTsdb = pTsdb;
- pSmaH->interval = tsdbGetIntervalByPrecision(interval, intervalUnit, REPO_CFG(pTsdb)->precision, true);
- pSmaH->storageLevel = tsdbGetSmaStorageLevel(interval, intervalUnit);
- pSmaH->days = tsdbGetTSmaDays(pTsdb, pSmaH->interval, pSmaH->storageLevel);
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Init of tSma FS
- *
- * @param pReadH
- * @param indexUid
- * @param skey
- * @return int32_t
- */
-static int32_t tsdbInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey) {
- STsdb *pTsdb = pSmaH->pTsdb;
-
- int32_t fid = (int32_t)(TSDB_KEY_FID(skey, pSmaH->days, REPO_CFG(pTsdb)->precision));
- char tSmaFile[TSDB_FILENAME_LEN] = {0};
- snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, REPO_ID(pTsdb), fid);
- pSmaH->dFile.path = strdup(tSmaFile);
- pSmaH->smaFsIter.iter = 0;
- pSmaH->smaFsIter.fid = fid;
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Set and open tSma file if it has key locates in queryWin.
- *
- * @param pReadH
- * @param param
- * @param queryWin
- * @return true
- * @return false
- */
-static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) {
- SArray *smaFs = pReadH->pTsdb->fs->cstatus->sf;
- int32_t nSmaFs = taosArrayGetSize(smaFs);
-
- tsdbCloseDBF(&pReadH->dFile);
-
-#if 0
- while (pReadH->smaFsIter.iter < nSmaFs) {
- void *pSmaFile = taosArrayGet(smaFs, pReadH->smaFsIter.iter);
- if (pSmaFile) { // match(indexName, queryWindow)
- // TODO: select the file by index_name ...
- pReadH->dFile = pSmaFile;
- ++pReadH->smaFsIter.iter;
- break;
- }
- ++pReadH->smaFsIter.iter;
- }
-
- if (pReadH->pDFile) {
- tsdbDebug("vg%d: smaFile %s matched", REPO_ID(pReadH->pTsdb), "[pSmaFile dir]");
- return true;
- }
-#endif
-
- return false;
-}
-
-/**
- * @brief
- *
- * @param pTsdb Return the data between queryWin and fill the pData.
- * @param pData
- * @param indexUid
- * @param pQuerySKey
- * @param nMaxResult The query invoker should control the nMaxResult need to return to avoid OOM.
- * @return int32_t
- */
-static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
- SSmaEnv *pEnv = atomic_load_ptr(&REPO_TSMA_ENV(pTsdb));
- SSmaStat *pStat = NULL;
-
- if (!pEnv) {
- terrno = TSDB_CODE_INVALID_PTR;
- tsdbWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", REPO_ID(pTsdb));
- return TSDB_CODE_FAILED;
- }
-
- pStat = SMA_ENV_STAT(pEnv);
-
- tsdbRefSmaStat(pTsdb, pStat);
- SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
- if (!pItem || !(pItem = *(SSmaStatItem **)pItem)) {
- // Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if
- // it's NULL.
- tsdbUnRefSmaStat(pTsdb, pStat);
- terrno = TSDB_CODE_TDB_INVALID_ACTION;
- tsdbDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, REPO_ID(pTsdb), indexUid);
- return TSDB_CODE_FAILED;
- }
-
-#if 0
- int32_t nQueryWin = taosArrayGetSize(pQuerySKey);
- for (int32_t n = 0; n < nQueryWin; ++n) {
- TSKEY skey = taosArrayGet(pQuerySKey, n);
- if (taosHashGet(pItem->expiredWindows, &skey, sizeof(TSKEY))) {
- // TODO: mark this window as expired.
- }
- }
-#endif
-
-#if 1
- int8_t smaStat = 0;
- if (!tsdbSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query
- tsdbUnRefSmaStat(pTsdb, pStat);
- terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- tsdbWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, REPO_ID(pTsdb), indexUid,
- tstrerror(terrno), smaStat);
- return TSDB_CODE_FAILED;
- }
-
- if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) {
- // TODO: mark this window as expired.
- tsdbDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, REPO_ID(pTsdb),
- querySKey, indexUid);
- } else {
- tsdbDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, REPO_ID(pTsdb), querySKey,
- indexUid);
- }
-
- STSma *pTSma = pItem->pSma;
-#endif
-
- STSmaReadH tReadH = {0};
- tsdbInitTSmaReadH(&tReadH, pTsdb, pTSma->interval, pTSma->intervalUnit);
- tsdbCloseDBF(&tReadH.dFile);
-
- tsdbUnRefSmaStat(pTsdb, pStat);
-
- tsdbInitTSmaFile(&tReadH, indexUid, querySKey);
- if (tsdbOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) {
- tsdbWarn("vgId:%d open DBF %s failed since %s", REPO_ID(pTsdb), tReadH.dFile.path, tstrerror(terrno));
- return TSDB_CODE_FAILED;
- }
-
- char smaKey[SMA_KEY_LEN] = {0};
- void *pSmaKey = &smaKey;
- int64_t queryGroupId = 1;
- tsdbEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey);
-
- tsdbDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", REPO_ID(pTsdb),
- tReadH.dFile.path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN);
-
- void *result = NULL;
- int32_t valueSize = 0;
- if (!(result = tsdbGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) {
- tsdbWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s",
- REPO_ID(pTsdb), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno));
- tsdbCloseDBF(&tReadH.dFile);
- return TSDB_CODE_FAILED;
- }
-
-#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
- for (uint32_t v = 0; v < valueSize; v += 8) {
- tsdbWarn("vgId:%d get sma data v[%d]=%" PRIi64, REPO_ID(pTsdb), v, *(int64_t *)POINTER_SHIFT(result, v));
- }
-#endif
- taosMemoryFreeClear(result); // TODO: fill the result to output
-
-#if 0
- int32_t nResult = 0;
- int64_t lastKey = 0;
-
- while (true) {
- if (nResult >= nMaxResult) {
- break;
- }
-
- // set and open the file according to the STSma param
- if (tsdbSetAndOpenTSmaFile(&tReadH, queryWin)) {
- char bTree[100] = "\0";
- while (strncmp(bTree, "has more nodes", 100) == 0) {
- if (nResult >= nMaxResult) {
- break;
- }
- // tsdbGetDataFromBTree(bTree, queryWin, lastKey)
- // fill the pData
- ++nResult;
- }
- }
- }
-#endif
- // read data from file and fill the result
- tsdbCloseDBF(&tReadH.dFile);
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tsdbCreateTSma(STsdb *pTsdb, char *pMsg) {
- SSmaCfg vCreateSmaReq = {0};
- if (!tDeserializeSVCreateTSmaReq(pMsg, &vCreateSmaReq)) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- tsdbWarn("vgId:%d tsma create msg received but deserialize failed since %s", REPO_ID(pTsdb), terrstr(terrno));
- return -1;
- }
-
- tsdbDebug("vgId:%d tsma create msg %s:%" PRIi64 " for table %" PRIi64 " received", REPO_ID(pTsdb),
- vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid);
-
- // record current timezone of server side
- vCreateSmaReq.tSma.timezoneInt = tsTimezone;
-
- if (metaCreateTSma(REPO_META(pTsdb), &vCreateSmaReq) < 0) {
- // TODO: handle error
- tsdbWarn("vgId:%d tsma %s:%" PRIi64 " create failed for table %" PRIi64 " since %s", REPO_ID(pTsdb),
- vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid, terrstr(terrno));
- tdDestroyTSma(&vCreateSmaReq.tSma);
- return -1;
- }
-
- tsdbTSmaAdd(pTsdb, 1);
-
- tdDestroyTSma(&vCreateSmaReq.tSma);
- // TODO: return directly or go on follow steps?
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tsdbDropTSma(STsdb *pTsdb, char *pMsg) {
- SVDropTSmaReq vDropSmaReq = {0};
- if (!tDeserializeSVDropTSmaReq(pMsg, &vDropSmaReq)) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- // TODO: send msg to stream computing to drop tSma
- // if ((send msg to stream computing) < 0) {
- // tdDestroyTSma(&vCreateSmaReq);
- // return -1;
- // }
- //
-
- if (metaDropTSma(REPO_META(pTsdb), vDropSmaReq.indexUid) < 0) {
- // TODO: handle error
- return -1;
- }
-
- if (tsdbDropTSmaData(pTsdb, vDropSmaReq.indexUid) < 0) {
- // TODO: handle error
- return -1;
- }
-
- tsdbTSmaSub(pTsdb, 1);
-
- // TODO: return directly or go on follow steps?
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Check and init qTaskInfo_t, only applicable to stable with SRSmaParam.
- *
- * @param pTsdb
- * @param pMeta
- * @param pReq
- * @return int32_t
- */
-int32_t tsdbRegisterRSma(STsdb *pTsdb, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) {
- if (!pReq->rollup) {
- tsdbDebug("vgId:%d return directly since no rollup for stable %s %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid);
- return TSDB_CODE_SUCCESS;
- }
-
- SRSmaParam *param = &pReq->pRSmaParam;
-
- if ((param->qmsg1Len == 0) && (param->qmsg2Len == 0)) {
- tsdbWarn("vgId:%d no qmsg1/qmsg2 for rollup stable %s %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid);
- return TSDB_CODE_SUCCESS;
- }
-
- if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_ROLLUP) != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_TDB_INIT_FAILED;
- return TSDB_CODE_FAILED;
- }
-
- SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb);
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SRSmaInfo *pRSmaInfo = NULL;
-
- pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t));
- if (pRSmaInfo) {
- tsdbWarn("vgId:%d rsma info already exists for stb: %s, %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid);
- return TSDB_CODE_SUCCESS;
- }
-
- pRSmaInfo = (SRSmaInfo *)taosMemoryCalloc(1, sizeof(SRSmaInfo));
- if (!pRSmaInfo) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
-
- STqReadHandle *pReadHandle = tqInitSubmitMsgScanner(pMeta);
- if (!pReadHandle) {
- taosMemoryFree(pRSmaInfo);
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
-
- SReadHandle handle = {
- .reader = pReadHandle,
- .meta = pMeta,
- .pMsgCb = pMsgCb,
- };
-
- if (param->qmsg1) {
- pRSmaInfo->taskInfo[0] = qCreateStreamExecTaskInfo(param->qmsg1, &handle);
- if (!pRSmaInfo->taskInfo[0]) {
- taosMemoryFree(pRSmaInfo);
- taosMemoryFree(pReadHandle);
- return TSDB_CODE_FAILED;
- }
- }
-
- if (param->qmsg2) {
- pRSmaInfo->taskInfo[1] = qCreateStreamExecTaskInfo(param->qmsg2, &handle);
- if (!pRSmaInfo->taskInfo[1]) {
- taosMemoryFree(pRSmaInfo);
- taosMemoryFree(pReadHandle);
- return TSDB_CODE_FAILED;
- }
- }
-
- if (taosHashPut(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t), &pRSmaInfo, sizeof(pRSmaInfo)) !=
- TSDB_CODE_SUCCESS) {
- return TSDB_CODE_FAILED;
- } else {
- tsdbDebug("vgId:%d register rsma info succeed for suid:%" PRIi64, REPO_ID(pTsdb), pReq->suid);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief store suid/[uids], prefer to use array and then hash
- *
- * @param pStore
- * @param suid
- * @param uid
- * @return int32_t
- */
-static int32_t tsdbUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid) {
- // prefer to store suid/uids in array
- if ((suid == pStore->suid) || (pStore->suid == 0)) {
- if (pStore->suid == 0) {
- pStore->suid = suid;
- }
- if (uid) {
- if (!pStore->tbUids) {
- if (!(pStore->tbUids = taosArrayInit(1, sizeof(tb_uid_t)))) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
- }
- if (!taosArrayPush(pStore->tbUids, uid)) {
- return TSDB_CODE_FAILED;
- }
- }
- } else {
- // store other suid/uids in hash when multiple stable/table included in 1 batch of request
- if (!pStore->uidHash) {
- pStore->uidHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
- if (!pStore->uidHash) {
- return TSDB_CODE_FAILED;
- }
- }
- if (uid) {
- SArray *uidArray = taosHashGet(pStore->uidHash, &suid, sizeof(tb_uid_t));
- if (uidArray && ((uidArray = *(SArray **)uidArray))) {
- taosArrayPush(uidArray, uid);
- } else {
- SArray *pUidArray = taosArrayInit(1, sizeof(tb_uid_t));
- if (!pUidArray) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
- if (!taosArrayPush(pUidArray, uid)) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
- if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), &pUidArray, sizeof(pUidArray)) != 0) {
- return TSDB_CODE_FAILED;
- }
- }
- } else {
- if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), NULL, 0) != 0) {
- return TSDB_CODE_FAILED;
- }
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-void tsdbUidStoreDestory(STbUidStore *pStore) {
- if (pStore) {
- if (pStore->uidHash) {
- if (pStore->tbUids) {
- // When pStore->tbUids not NULL, the pStore->uidHash has k/v; otherwise pStore->uidHash only has keys.
- void *pIter = taosHashIterate(pStore->uidHash, NULL);
- while (pIter) {
- SArray *arr = *(SArray **)pIter;
- taosArrayDestroy(arr);
- pIter = taosHashIterate(pStore->uidHash, pIter);
- }
- }
- taosHashCleanup(pStore->uidHash);
- }
- taosArrayDestroy(pStore->tbUids);
- }
-}
-
-void *tsdbUidStoreFree(STbUidStore *pStore) {
- if (pStore) {
- tsdbUidStoreDestory(pStore);
- taosMemoryFree(pStore);
- }
- return NULL;
-}
-
-/**
- * @brief fetch suid/uids when create child tables of rollup SMA
- *
- * @param pTsdb
- * @param ppStore
- * @param suid
- * @param uid
- * @return int32_t
- */
-int32_t tsdbFetchTbUidList(STsdb *pTsdb, STbUidStore **ppStore, tb_uid_t suid, tb_uid_t uid) {
- SSmaEnv *pEnv = REPO_RSMA_ENV((STsdb *)pTsdb);
-
- // only applicable to rollup SMA ctables
- if (!pEnv) {
- return TSDB_CODE_SUCCESS;
- }
-
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SHashObj *infoHash = NULL;
- if (!pStat || !(infoHash = SMA_STAT_INFO_HASH(pStat))) {
- terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- return TSDB_CODE_FAILED;
- }
-
- // info cached when create rsma stable and return directly for non-rsma ctables
- if (!taosHashGet(infoHash, &suid, sizeof(tb_uid_t))) {
- return TSDB_CODE_SUCCESS;
- }
-
- ASSERT(ppStore != NULL);
-
- if (!(*ppStore)) {
- if (tsdbUidStoreInit(ppStore) != 0) {
- return TSDB_CODE_FAILED;
- }
- }
-
- if (tsdbUidStorePut(*ppStore, suid, &uid) != 0) {
- *ppStore = tsdbUidStoreFree(*ppStore);
- return TSDB_CODE_FAILED;
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-static FORCE_INLINE int32_t tsdbUpdateTbUidListImpl(STsdb *pTsdb, tb_uid_t *suid, SArray *tbUids) {
- SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb);
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SRSmaInfo *pRSmaInfo = NULL;
-
- if (!suid || !tbUids) {
- terrno = TSDB_CODE_INVALID_PTR;
- tsdbError("vgId:%d failed to get rsma info for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno));
- return TSDB_CODE_FAILED;
- }
-
- pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), suid, sizeof(tb_uid_t));
- if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- tsdbError("vgId:%d failed to get rsma info for uid:%" PRIi64, REPO_ID(pTsdb), *suid);
- terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
- return TSDB_CODE_FAILED;
- }
-
- if (pRSmaInfo->taskInfo[0] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[0], tbUids, true) != 0)) {
- tsdbError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno));
- return TSDB_CODE_FAILED;
- } else {
- tsdbDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, REPO_ID(pTsdb),
- pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
- }
-
- if (pRSmaInfo->taskInfo[1] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[1], tbUids, true) != 0)) {
- tsdbError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno));
- return TSDB_CODE_FAILED;
- } else {
- tsdbDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, REPO_ID(pTsdb),
- pRSmaInfo->taskInfo[1], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tsdbUpdateTbUidList(STsdb *pTsdb, STbUidStore *pStore) {
- if (!pStore || (taosArrayGetSize(pStore->tbUids) == 0)) {
- return TSDB_CODE_SUCCESS;
- }
-
- if (tsdbUpdateTbUidListImpl(pTsdb, &pStore->suid, pStore->tbUids) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_FAILED;
- }
-
- void *pIter = taosHashIterate(pStore->uidHash, NULL);
- while (pIter) {
- tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- SArray *pTbUids = *(SArray **)pIter;
-
- if (tsdbUpdateTbUidListImpl(pTsdb, pTbSuid, pTbUids) != TSDB_CODE_SUCCESS) {
- taosHashCancelIterate(pStore->uidHash, pIter);
- return TSDB_CODE_FAILED;
- }
-
- pIter = taosHashIterate(pStore->uidHash, pIter);
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
- if (!pReq) {
- terrno = TSDB_CODE_INVALID_PTR;
- return TSDB_CODE_FAILED;
- }
-
- SSubmitReq *pSubmitReq = (SSubmitReq *)pReq;
-
- if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) {
- return TSDB_CODE_FAILED;
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
- ASSERT(pMsg != NULL);
- SSubmitMsgIter msgIter = {0};
- SSubmitBlk *pBlock = NULL;
- SSubmitBlkIter blkIter = {0};
- STSRow *row = NULL;
-
- terrno = TSDB_CODE_SUCCESS;
-
- if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
- while (true) {
- if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
-
- if (!pBlock) break;
- tsdbUidStorePut(pStore, msgIter.suid, NULL);
- pStore->uid = msgIter.uid; // TODO: remove, just for debugging
- }
-
- if (terrno != TSDB_CODE_SUCCESS) return -1;
- return 0;
-}
-
-static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, int32_t inputType,
- qTaskInfo_t *taskInfo, STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid,
- int8_t level) {
- SArray *pResult = NULL;
-
- if (!taskInfo) {
- tsdbDebug("vgId:%d no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, REPO_ID(pTsdb), level, suid);
- return TSDB_CODE_SUCCESS;
- }
-
- tsdbDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, REPO_ID(pTsdb), level, taskInfo,
- suid);
-
- qSetStreamInput(taskInfo, pMsg, inputType);
- while (1) {
- SSDataBlock *output = NULL;
- uint64_t ts;
- if (qExecTask(taskInfo, &output, &ts) < 0) {
- ASSERT(false);
- }
- if (!output) {
- break;
- }
- if (!pResult) {
- pResult = taosArrayInit(0, sizeof(SSDataBlock));
- if (!pResult) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
- }
- }
-
- taosArrayPush(pResult, output);
- }
-
- if (taosArrayGetSize(pResult) > 0) {
- blockDebugShowData(pResult);
- STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pTsdb->pVnode->pRSma1 : pTsdb->pVnode->pRSma2);
- SSubmitReq *pReq = NULL;
- if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), uid, suid) != 0) {
- taosArrayDestroy(pResult);
- return TSDB_CODE_FAILED;
- }
- if (tsdbProcessSubmitReq(sinkTsdb, INT64_MAX, pReq) != 0) {
- taosArrayDestroy(pResult);
- taosMemoryFreeClear(pReq);
- return TSDB_CODE_FAILED;
- }
- taosMemoryFreeClear(pReq);
- } else {
- tsdbWarn("vgId:%d no rsma % " PRIi8 " data generated since %s", REPO_ID(pTsdb), level, tstrerror(terrno));
- }
-
- taosArrayDestroy(pResult);
-
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tsdbExecuteRSma(STsdb *pTsdb, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) {
- SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb);
- if (!pEnv) {
- // only applicable when rsma env exists
- return TSDB_CODE_SUCCESS;
- }
-
- ASSERT(uid != 0); // TODO: remove later
-
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SRSmaInfo *pRSmaInfo = NULL;
-
- pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
-
- if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- tsdbDebug("vgId:%d no rsma info for suid:%" PRIu64, REPO_ID(pTsdb), suid);
- return TSDB_CODE_SUCCESS;
- }
- if (!pRSmaInfo->taskInfo[0]) {
- tsdbDebug("vgId:%d no rsma qTaskInfo for suid:%" PRIu64, REPO_ID(pTsdb), suid);
- return TSDB_CODE_SUCCESS;
- }
-
- if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
- // TODO: use the proper schema instead of 0, and cache STSchema in cache
- STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 1);
- if (!pTSchema) {
- terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
- return TSDB_CODE_FAILED;
- }
- tsdbExecuteRSmaImpl(pTsdb, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1);
- tsdbExecuteRSmaImpl(pTsdb, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2);
- taosMemoryFree(pTSchema);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tsdbTriggerRSma(STsdb *pTsdb, void *pMsg, int32_t inputType) {
- SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb);
- if (!pEnv) {
- // only applicable when rsma env exists
- return TSDB_CODE_SUCCESS;
- }
-
- if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
- STbUidStore uidStore = {0};
- tsdbFetchSubmitReqSuids(pMsg, &uidStore);
-
- if (uidStore.suid != 0) {
- tsdbExecuteRSma(pTsdb, pMsg, inputType, uidStore.suid, uidStore.uid);
-
- void *pIter = taosHashIterate(uidStore.uidHash, NULL);
- while (pIter) {
- tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- tsdbExecuteRSma(pTsdb, pMsg, inputType, *pTbSuid, 0);
- pIter = taosHashIterate(uidStore.uidHash, pIter);
- }
-
- tsdbUidStoreDestory(&uidStore);
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-#if 0
-/**
- * @brief Get the start TS key of the last data block of one interval/sliding.
- *
- * @param pTsdb
- * @param param
- * @param result
- * @return int32_t
- * 1) Return 0 and fill the result if the check procedure is normal;
- * 2) Return -1 if error occurs during the check procedure.
- */
-int32_t tsdbGetTSmaStatus(STsdb *pTsdb, void *smaIndex, void *result) {
- const char *procedure = "";
- if (strncmp(procedure, "get the start TS key of the last data block", 100) != 0) {
- return -1;
- }
- // fill the result
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @brief Remove the tSma data files related to param between pWin.
- *
- * @param pTsdb
- * @param param
- * @param pWin
- * @return int32_t
- */
-int32_t tsdbRemoveTSmaData(STsdb *pTsdb, void *smaIndex, STimeWindow *pWin) {
- // for ("tSmaFiles of param-interval-sliding between pWin") {
- // // remove the tSmaFile
- // }
- return TSDB_CODE_SUCCESS;
-}
-#endif
-
-// TODO: Who is responsible for resource allocate and release?
-int32_t tsdbInsertTSmaData(STsdb *pTsdb, int64_t indexUid, const char *msg) {
- int32_t code = TSDB_CODE_SUCCESS;
- if ((code = tsdbInsertTSmaDataImpl(pTsdb, indexUid, msg)) < 0) {
- tsdbWarn("vgId:%d insert tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- }
- // TODO: destroy SSDataBlocks(msg)
- return code;
-}
-
-int32_t tsdbUpdateSmaWindow(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version) {
- int32_t code = TSDB_CODE_SUCCESS;
- if ((code = tsdbUpdateExpiredWindowImpl(pTsdb, pMsg, version)) < 0) {
- tsdbWarn("vgId:%d update expired sma window failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- }
- return code;
-}
-
-int32_t tsdbInsertRSmaData(STsdb *pTsdb, char *msg) {
- int32_t code = TSDB_CODE_SUCCESS;
- if ((code = tsdbInsertRSmaDataImpl(pTsdb, msg)) < 0) {
- tsdbWarn("vgId:%d insert rSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- }
- return code;
-}
-
-int32_t tsdbGetTSmaData(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
- int32_t code = TSDB_CODE_SUCCESS;
- if ((code = tsdbGetTSmaDataImpl(pTsdb, pData, indexUid, querySKey, nMaxResult)) < 0) {
- tsdbWarn("vgId:%d get tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- }
- return code;
-}
-
-int32_t tsdbDropTSmaData(STsdb *pTsdb, int64_t indexUid) {
- int32_t code = TSDB_CODE_SUCCESS;
- if ((code = tsdbDropTSmaDataImpl(pTsdb, indexUid)) < 0) {
- tsdbWarn("vgId:%d drop tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno));
- }
- return code;
-}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
new file mode 100644
index 0000000000000000000000000000000000000000..79989a55601b99e681c573cae1f5c26e38cd7421
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+struct STsdbSnapshotReader {
+ STsdb* pTsdb;
+ // TODO
+};
+
+int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever) {
+ // TODO
+ return 0;
+}
+
+int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader) {
+ // TODO
+ return 0;
+}
+
+int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData) {
+ // TODO
+ return 0;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c b/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c
deleted file mode 100644
index a553f32bee0ad4d0df24ca844ad2616e5c4157ae..0000000000000000000000000000000000000000
--- a/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#define ALLOW_FORBID_FUNC
-
-#include "tsdb.h"
-
-int32_t tsdbOpenDBEnv(TDB **ppEnv, const char *path) {
- int ret = 0;
-
- if (path == NULL) return -1;
-
- ret = tdbOpen(path, 4096, 256, ppEnv); // use as param
-
- if (ret != 0) {
- tsdbError("Failed to create tsdb db env, ret = %d", ret);
- return -1;
- }
-
- return 0;
-}
-
-int32_t tsdbCloseDBEnv(TDB *pEnv) { return tdbClose(pEnv); }
-
-static inline int tsdbSmaKeyCmpr(const void *arg1, int len1, const void *arg2, int len2) {
- const SSmaKey *pKey1 = (const SSmaKey *)arg1;
- const SSmaKey *pKey2 = (const SSmaKey *)arg2;
-
- ASSERT(len1 == len2 && len1 == sizeof(SSmaKey));
-
- if (pKey1->skey < pKey2->skey) {
- return -1;
- } else if (pKey1->skey > pKey2->skey) {
- return 1;
- }
- if (pKey1->groupId < pKey2->groupId) {
- return -1;
- } else if (pKey1->groupId > pKey2->groupId) {
- return 1;
- }
-
- return 0;
-}
-
-static int32_t tsdbOpenDBDb(TTB **ppDB, TDB *pEnv, const char *pFName) {
- int ret;
- tdb_cmpr_fn_t compFunc;
-
- // Create a database
- compFunc = tsdbSmaKeyCmpr;
- ret = tdbTbOpen(pFName, -1, -1, compFunc, pEnv, ppDB);
-
- return 0;
-}
-
-static int32_t tsdbCloseDBDb(TTB *pDB) { return tdbTbClose(pDB); }
-
-int32_t tsdbOpenDBF(TDB *pEnv, SDBFile *pDBF) {
- // TEnv is shared by a group of SDBFile
- if (!pEnv || !pDBF) {
- terrno = TSDB_CODE_INVALID_PTR;
- return -1;
- }
-
- // Open DBF
- if (tsdbOpenDBDb(&(pDBF->pDB), pEnv, pDBF->path) < 0) {
- terrno = TSDB_CODE_TDB_INIT_FAILED;
- tsdbCloseDBDb(pDBF->pDB);
- return -1;
- }
-
- return 0;
-}
-
-int32_t tsdbCloseDBF(SDBFile *pDBF) {
- int32_t ret = 0;
- if (pDBF->pDB) {
- ret = tsdbCloseDBDb(pDBF->pDB);
- pDBF->pDB = NULL;
- }
- taosMemoryFreeClear(pDBF->path);
- return ret;
-}
-
-int32_t tsdbSaveSmaToDB(SDBFile *pDBF, void *pKey, int32_t keyLen, void *pVal, int32_t valLen, TXN *txn) {
- int32_t ret;
-
- ret = tdbTbInsert(pDBF->pDB, pKey, keyLen, pVal, valLen, txn);
- if (ret < 0) {
- tsdbError("Failed to create insert sma data into db, ret = %d", ret);
- return -1;
- }
-
- return 0;
-}
-
-void *tsdbGetSmaDataByKey(SDBFile *pDBF, const void *pKey, int32_t keyLen, int32_t *valLen) {
- void *pVal = NULL;
- int ret;
-
- ret = tdbTbGet(pDBF->pDB, pKey, keyLen, &pVal, valLen);
-
- if (ret < 0) {
- tsdbError("Failed to get sma data from db, ret = %d", ret);
- return NULL;
- }
-
- ASSERT(*valLen >= 0);
-
- // TODO: lock?
- // TODO: Would the key/value be destoryed during return the data?
- // TODO: How about the key is updated while value length is changed? The original value buffer would be freed
- // automatically?
-
- return pVal;
-}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c
index aab4da26a37119c6e0044849b1494ede2f33e552..6faf6bd1679c36dd0c9fdc0bed538f74cafc13cd 100644
--- a/source/dnode/vnode/src/tsdb/tsdbWrite.c
+++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c
@@ -28,7 +28,7 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq *pMsg, SSubmitRsp *
// scan and convert
if (tsdbScanAndConvertSubmitMsg(pTsdb, pMsg) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
- tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pTsdb), tstrerror(terrno));
+ tsdbError("vgId:%d, failed to insert data since %s", REPO_ID(pTsdb), tstrerror(terrno));
}
return -1;
}
@@ -59,7 +59,7 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdb *pTsdb, STable *pTable, STSRow *
TSKEY now) {
TSKEY rowKey = TD_ROW_KEY(row);
if (rowKey < minKey || rowKey > maxKey) {
- tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
+ tsdbError("vgId:%d, table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
" maxKey %" PRId64 " row key %" PRId64,
REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey,
rowKey);
@@ -75,7 +75,7 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdb *pTsdb, tb_uid_t uid, STSRow *ro
TSKEY now) {
TSKEY rowKey = TD_ROW_KEY(row);
if (rowKey < minKey || rowKey > maxKey) {
- tsdbError("vgId:%d table uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
+ tsdbError("vgId:%d, table uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
" maxKey %" PRId64 " row key %" PRId64,
REPO_ID(pTsdb), uid, now, minKey, maxKey, rowKey);
terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
@@ -115,7 +115,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) {
#if 0
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
- tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid,
+ tsdbError("vgId:%d, failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
@@ -123,14 +123,14 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) {
STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
- tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid,
+ tsdbError("vgId:%d, failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
- tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable));
+ tsdbError("vgId:%d, invalid action trying to insert a super table %s", REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c
index 9122913cda69d05889e1f575a5da4b61ef4a03a9..9ca4dd6efb981acdf2ff271635b7e146052c7a40 100644
--- a/source/dnode/vnode/src/vnd/vnodeBufPool.c
+++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c
@@ -30,7 +30,7 @@ int vnodeOpenBufPool(SVnode *pVnode, int64_t size) {
// create pool
ret = vnodeBufPoolCreate(size, &pPool);
if (ret < 0) {
- vError("vgId:%d failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno));
vnodeCloseBufPool(pVnode);
return -1;
}
@@ -40,7 +40,7 @@ int vnodeOpenBufPool(SVnode *pVnode, int64_t size) {
pVnode->pPool = pPool;
}
- vDebug("vgId:%d vnode buffer pool is opened, pool size: %" PRId64, TD_VID(pVnode), size);
+ vDebug("vgId:%d, vnode buffer pool is opened, pool size: %" PRId64, TD_VID(pVnode), size);
return 0;
}
@@ -53,7 +53,7 @@ int vnodeCloseBufPool(SVnode *pVnode) {
vnodeBufPoolDestroy(pPool);
}
- vDebug("vgId:%d vnode buffer pool is closed", TD_VID(pVnode));
+ vDebug("vgId:%d, vnode buffer pool is closed", TD_VID(pVnode));
return 0;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index a66ecc493d7cbef19370349568398d084dc5bc27..e8fa2ed3c140312d3f64d42fbf5449178c67a772 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -56,6 +56,8 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "szBuf", pCfg->szBuf) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "isHeap", pCfg->isHeap) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "isWeak", pCfg->isWeak) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "isTsma", pCfg->isTsma) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "isRsma", pCfg->isRsma) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "precision", pCfg->tsdbCfg.precision) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "update", pCfg->tsdbCfg.update) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "compression", pCfg->tsdbCfg.compression) < 0) return -1;
@@ -130,6 +132,10 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if(code < 0) return -1;
tjsonGetNumberValue(pJson, "isWeak", pCfg->isWeak, code);
if(code < 0) return -1;
+ tjsonGetNumberValue(pJson, "isTsma", pCfg->isTsma, code);
+ if(code < 0) return -1;
+ tjsonGetNumberValue(pJson, "isRsma", pCfg->isRsma, code);
+ if(code < 0) return -1;
tjsonGetNumberValue(pJson, "precision", pCfg->tsdbCfg.precision, code);
if(code < 0) return -1;
tjsonGetNumberValue(pJson, "update", pCfg->tsdbCfg.update, code);
diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c
index b4fbd01c633c87ae8d14c707a9bfba2cbb0511a0..3715866bb88f3ae030d5f65c7ce938e69120f466 100644
--- a/source/dnode/vnode/src/vnd/vnodeCommit.c
+++ b/source/dnode/vnode/src/vnd/vnodeCommit.c
@@ -42,29 +42,29 @@ int vnodeBegin(SVnode *pVnode) {
// begin meta
if (metaBegin(pVnode->pMeta) < 0) {
- vError("vgId:%d failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
// begin tsdb
if (pVnode->pSma) {
if (tsdbBegin(VND_RSMA0(pVnode)) < 0) {
- vError("vgId:%d failed to begin rsma0 since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin rsma0 since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
if (tsdbBegin(VND_RSMA1(pVnode)) < 0) {
- vError("vgId:%d failed to begin rsma1 since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin rsma1 since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
if (tsdbBegin(VND_RSMA2(pVnode)) < 0) {
- vError("vgId:%d failed to begin rsma2 since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin rsma2 since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
} else {
if (tsdbBegin(pVnode->pTsdb) < 0) {
- vError("vgId:%d failed to begin tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
}
@@ -110,7 +110,7 @@ int vnodeSaveInfo(const char *dir, const SVnodeInfo *pInfo) {
// free info binary
taosMemoryFree(data);
- vInfo("vgId:%d vnode info is saved, fname: %s", pInfo->config.vgId, fname);
+ vInfo("vgId:%d, vnode info is saved, fname: %s", pInfo->config.vgId, fname);
return 0;
@@ -132,7 +132,7 @@ int vnodeCommitInfo(const char *dir, const SVnodeInfo *pInfo) {
return -1;
}
- vInfo("vgId:%d vnode info is committed", pInfo->config.vgId);
+ vInfo("vgId:%d, vnode info is committed", pInfo->config.vgId);
return 0;
}
@@ -210,7 +210,7 @@ int vnodeCommit(SVnode *pVnode) {
SVnodeInfo info = {0};
char dir[TSDB_FILENAME_LEN];
- vInfo("vgId:%d start to commit, version: %" PRId64, TD_VID(pVnode), pVnode->state.applied);
+ vInfo("vgId:%d, start to commit, version: %" PRId64, TD_VID(pVnode), pVnode->state.applied);
pVnode->onCommit = pVnode->inUse;
pVnode->inUse = NULL;
@@ -230,7 +230,7 @@ int vnodeCommit(SVnode *pVnode) {
return -1;
}
- if(vnodeIsRollup(pVnode)) {
+ if (VND_IS_RSMA(pVnode)) {
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
ASSERT(0);
return -1;
@@ -250,7 +250,6 @@ int vnodeCommit(SVnode *pVnode) {
}
}
-
if (tqCommit(pVnode->pTq) < 0) {
ASSERT(0);
return -1;
@@ -269,7 +268,7 @@ int vnodeCommit(SVnode *pVnode) {
pVnode->pPool = pVnode->onCommit;
pVnode->onCommit = NULL;
- vInfo("vgId:%d commit over", TD_VID(pVnode));
+ vInfo("vgId:%d, commit over", TD_VID(pVnode));
return 0;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c
index efae74b55a95525c105c7a8c3de3e887a0f3b2d2..d0aede145eb8640e1e9031160d5ab7573d4a74e8 100644
--- a/source/dnode/vnode/src/vnd/vnodeModule.c
+++ b/source/dnode/vnode/src/vnd/vnodeModule.c
@@ -69,6 +69,9 @@ int vnodeInit(int nthreads) {
if (walInit() < 0) {
return -1;
}
+ if (tqInit() < 0) {
+ return -1;
+ }
return 0;
}
@@ -94,6 +97,9 @@ void vnodeCleanup() {
taosMemoryFreeClear(vnodeGlobal.threads);
taosThreadCondDestroy(&(vnodeGlobal.hasTask));
taosThreadMutexDestroy(&(vnodeGlobal.mutex));
+
+ walCleanUp();
+ tqCleanUp();
}
int vnodeScheduleTask(int (*execute)(void*), void* arg) {
@@ -155,4 +161,4 @@ static void* loop(void* arg) {
}
return NULL;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index a90bb7afcb6847ba0cb803d7a7e58720159bf10f..a85b8306165326bc07f643718e9b67201e668de6 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -23,13 +23,13 @@ int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) {
// check config
if (vnodeCheckCfg(pCfg) < 0) {
- vError("vgId:%d failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno));
+ vError("vgId:%d, failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno));
return -1;
}
// create vnode env
if (tfsMkdir(pTfs, path) < 0) {
- vError("vgId:%d failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno));
+ vError("vgId:%d, failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno));
return -1;
}
@@ -39,11 +39,11 @@ int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) {
info.state.applied = -1;
if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir, &info) < 0) {
- vError("vgId:%d failed to save vnode config since %s", pCfg->vgId, tstrerror(terrno));
+ vError("vgId:%d, failed to save vnode config since %s", pCfg->vgId, tstrerror(terrno));
return -1;
}
- vInfo("vgId:%d vnode is created", pCfg->vgId);
+ vInfo("vgId:%d, vnode is created", pCfg->vgId);
return 0;
}
@@ -70,7 +70,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
pVnode = (SVnode *)taosMemoryCalloc(1, sizeof(*pVnode) + strlen(path) + 1);
if (pVnode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- vError("vgId:%d failed to open vnode since %s", info.config.vgId, tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode since %s", info.config.vgId, tstrerror(terrno));
return NULL;
}
@@ -86,25 +86,25 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
// open buffer pool
if (vnodeOpenBufPool(pVnode, pVnode->config.isHeap ? 0 : pVnode->config.szBuf / 3) < 0) {
- vError("vgId:%d failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open meta
if (metaOpen(pVnode, &pVnode->pMeta) < 0) {
- vError("vgId:%d failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open tsdb
- if (!vnodeIsRollup(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, TSDB_TYPE_TSDB) < 0) {
- vError("vgId:%d failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
+ if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL) < 0) {
+ vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open sma
if (smaOpen(pVnode)) {
- vError("vgId:%d failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
@@ -113,7 +113,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
taosRealPath(tdir, NULL, sizeof(tdir));
pVnode->pWal = walOpen(tdir, &(pVnode->config.walCfg));
if (pVnode->pWal == NULL) {
- vError("vgId:%d failed to open vnode wal since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode wal since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
@@ -122,27 +122,27 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
taosRealPath(tdir, NULL, sizeof(tdir));
pVnode->pTq = tqOpen(tdir, pVnode, pVnode->pWal);
if (pVnode->pTq == NULL) {
- vError("vgId:%d failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
// open query
if (vnodeQueryOpen(pVnode)) {
- vError("vgId:%d failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno));
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
// vnode begin
if (vnodeBegin(pVnode) < 0) {
- vError("vgId:%d failed to begin since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to begin since %s", TD_VID(pVnode), tstrerror(terrno));
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
// open sync
if (vnodeSyncOpen(pVnode, dir)) {
- vError("vgId:%d failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno));
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -189,4 +189,4 @@ void vnodeStop(SVnode *pVnode) {}
int64_t vnodeGetSyncHandle(SVnode *pVnode) { return pVnode->sync; }
-void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot) { pSnapshot->lastApplyIndex = pVnode->state.committed; }
\ No newline at end of file
+void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot) { pSnapshot->lastApplyIndex = pVnode->state.committed; }
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 3b47b9025492a7ad53514750b9e88dbf01f52d49..9afe25fbf10e866805ba3a9f096071690584376f 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -64,7 +64,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
if (mer1.me.type == TSDB_SUPER_TABLE) {
strcpy(metaRsp.stbName, mer1.me.name);
- schema = mer1.me.stbEntry.schema;
+ schema = mer1.me.stbEntry.schemaRow;
schemaTag = mer1.me.stbEntry.schemaTag;
metaRsp.suid = mer1.me.uid;
} else if (mer1.me.type == TSDB_CHILD_TABLE) {
@@ -73,10 +73,10 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
strcpy(metaRsp.stbName, mer2.me.name);
metaRsp.suid = mer2.me.uid;
- schema = mer2.me.stbEntry.schema;
+ schema = mer2.me.stbEntry.schemaRow;
schemaTag = mer2.me.stbEntry.schemaTag;
} else if (mer1.me.type == TSDB_NORMAL_TABLE) {
- schema = mer1.me.ntbEntry.schema;
+ schema = mer1.me.ntbEntry.schemaRow;
} else {
ASSERT(0);
}
@@ -84,7 +84,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
metaRsp.numOfTags = schemaTag.nCols;
metaRsp.numOfColumns = schema.nCols;
metaRsp.precision = pVnode->config.tsdbCfg.precision;
- metaRsp.sversion = schema.sver;
+ metaRsp.sversion = schema.version;
metaRsp.pSchemas = (SSchema *)taosMemoryMalloc(sizeof(SSchema) * (metaRsp.numOfColumns + metaRsp.numOfTags));
memcpy(metaRsp.pSchemas, schema.pSchema, sizeof(SSchema) * schema.nCols);
@@ -147,16 +147,10 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) {
}
// wrapper of tsdb read interface
-tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId,
+tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo* tableList, uint64_t qId,
void *pMemRef) {
#if 0
return tsdbQueryCacheLastT(pVnode->pTsdb, pCond, groupList, qId, pMemRef);
#endif
return 0;
-}
-int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo) {
-#if 0
- return tsdbGetTableGroupFromIdListT(pVnode->pTsdb, pTableIdList, pGroupInfo);
-#endif
- return 0;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
new file mode 100644
index 0000000000000000000000000000000000000000..baa8422307dd7785201bcc4b8b632bb3c05a37cb
--- /dev/null
+++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "vnodeInt.h"
+
+struct SVSnapshotReader {
+ SVnode *pVnode;
+ int64_t sver;
+ int64_t ever;
+ int8_t isMetaEnd;
+ int8_t isTsdbEnd;
+ SMetaSnapshotReader *pMetaReader;
+ STsdbSnapshotReader *pTsdbReader;
+ void *pData;
+ int32_t nData;
+};
+
+int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever) {
+ SVSnapshotReader *pReader = NULL;
+
+ pReader = (SVSnapshotReader *)taosMemoryCalloc(1, sizeof(*pReader));
+ if (pReader == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pReader->pVnode = pVnode;
+ pReader->sver = sver;
+ pReader->ever = ever;
+ pReader->isMetaEnd = 0;
+ pReader->isTsdbEnd = 0;
+
+ if (metaSnapshotReaderOpen(pVnode->pMeta, &pReader->pMetaReader, sver, ever) < 0) {
+ taosMemoryFree(pReader);
+ goto _err;
+ }
+
+ if (tsdbSnapshotReaderOpen(pVnode->pTsdb, &pReader->pTsdbReader, sver, ever) < 0) {
+ metaSnapshotReaderClose(pReader->pMetaReader);
+ taosMemoryFree(pReader);
+ goto _err;
+ }
+
+_exit:
+ *ppReader = pReader;
+ return 0;
+
+_err:
+ *ppReader = NULL;
+ return -1;
+}
+
+int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader) {
+ if (pReader) {
+ vnodeFree(pReader->pData);
+ tsdbSnapshotReaderClose(pReader->pTsdbReader);
+ metaSnapshotReaderClose(pReader->pMetaReader);
+ taosMemoryFree(pReader);
+ }
+ return 0;
+}
+
+int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData) {
+ int32_t code = 0;
+
+ if (!pReader->isMetaEnd) {
+ code = metaSnapshotRead(pReader->pMetaReader, &pReader->pData, &pReader->nData);
+ if (code) {
+ if (code == TSDB_CODE_VND_READ_END) {
+ pReader->isMetaEnd = 1;
+ } else {
+ return code;
+ }
+ } else {
+ *ppData = pReader->pData;
+ *nData = pReader->nData;
+ return code;
+ }
+ }
+
+ if (!pReader->isTsdbEnd) {
+ code = tsdbSnapshotRead(pReader->pTsdbReader, &pReader->pData, &pReader->nData);
+ if (code) {
+ if (code == TSDB_CODE_VND_READ_END) {
+ pReader->isTsdbEnd = 1;
+ } else {
+ return code;
+ }
+ } else {
+ *ppData = pReader->pData;
+ *nData = pReader->nData;
+ return code;
+ }
+ }
+
+ code = TSDB_CODE_VND_READ_END;
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 5e50a1b79627d02c51da5f040d9559455aaaca79..47eaf2d5a5e270cabbb33290682bd9c7f096b5c3 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -22,9 +22,10 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq,
static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp);
+static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) {
+ int32_t code = 0;
SDecoder dc = {0};
switch (pMsg->msgType) {
@@ -38,9 +39,11 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) {
tDecodeI32v(&dc, &nReqs);
for (int32_t iReq = 0; iReq < nReqs; iReq++) {
tb_uid_t uid = tGenIdPI64();
+ char *name = NULL;
tStartDecode(&dc);
tDecodeI32v(&dc, NULL);
+ tDecodeCStr(&dc, &name);
*(int64_t *)(dc.data + dc.pos) = uid;
*(int64_t *)(dc.data + dc.pos + 8) = ctime;
@@ -64,12 +67,18 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) {
if (pBlock == NULL) break;
if (msgIter.schemaLen > 0) {
- uid = tGenIdPI64();
+ char *name = NULL;
tDecoderInit(&dc, pBlock->data, msgIter.schemaLen);
tStartDecode(&dc);
tDecodeI32v(&dc, NULL);
+ tDecodeCStr(&dc, &name);
+
+ uid = metaGetTableEntryUidByName(pVnode->pMeta, name);
+ if (uid == 0) {
+ uid = tGenIdPI64();
+ }
*(int64_t *)(dc.data + dc.pos) = uid;
*(int64_t *)(dc.data + dc.pos + 8) = ctime;
pBlock->uid = htobe64(uid);
@@ -80,11 +89,14 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) {
}
} break;
+ case TDMT_VND_ALTER_REPLICA: {
+ code = vnodeSyncAlter(pVnode, pMsg);
+ } break;
default:
break;
}
- return 0;
+ return code;
}
int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp) {
@@ -93,7 +105,7 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
int len;
int ret;
- vTrace("vgId:%d start to process write request %s, version %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
+ vTrace("vgId:%d, start to process write request %s, version %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
version);
pVnode->state.applied = version;
@@ -146,23 +158,23 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
pMsg->contLen - sizeof(SMsgHead)) < 0) {
}
} break;
- case TDMT_VND_ALTER_VNODE:
+ case TDMT_VND_ALTER_CONFIG:
break;
default:
ASSERT(0);
break;
}
- vDebug("vgId:%d process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
+ vTrace("vgId:%d, process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
- vError("vgId:%d failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
+ vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
// commit if need
if (vnodeShouldCommit(pVnode)) {
- vInfo("vgId:%d commit at version %" PRId64, TD_VID(pVnode), version);
+ vInfo("vgId:%d, commit at version %" PRId64, TD_VID(pVnode), version);
// commit current change
vnodeCommit(pVnode);
@@ -173,7 +185,7 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
return 0;
_err:
- vDebug("vgId:%d process %s request failed since %s, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
+ vError("vgId:%d, process %s request failed since %s, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
tstrerror(terrno), version);
return -1;
}
@@ -183,9 +195,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);
+ return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_CONTINUE:
- return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg);
+ return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -198,17 +210,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
switch (pMsg->msgType) {
case TDMT_VND_FETCH:
- return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_FETCH_RSP:
- return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg);
- case TDMT_VND_RES_READY:
- return qWorkerProcessReadyMsg(pVnode, pVnode->pQuery, pMsg);
- case TDMT_VND_TASKS_STATUS:
- return qWorkerProcessStatusMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_CANCEL_TASK:
- return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_DROP_TASK:
- return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0);
+ case TDMT_VND_QUERY_HEARTBEAT:
+ return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
+
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
@@ -227,9 +238,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER_RSP:
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
-
- case TDMT_VND_QUERY_HEARTBEAT:
- return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
default:
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -244,6 +252,13 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
}
+void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
+ strcpy(pMetaRsp->dbFName, pVnode->config.dbname);
+ pMetaRsp->dbId = pVnode->config.dbId;
+ pMetaRsp->vgId = TD_VID(pVnode);
+ pMetaRsp->precision = pVnode->config.tsdbCfg.precision;
+}
+
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
int32_t ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
@@ -256,7 +271,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
SMsgHead *pHead = pMsg->pCont;
- char logBuf[512];
+ char logBuf[512] = {0};
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
syncRpcMsgLog2(logBuf, pMsg);
@@ -356,7 +371,7 @@ static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq,
goto _err;
}
- tdProcessRSmaCreate(pVnode->pSma, pVnode->pMeta, &req, &pVnode->msgCb);
+ tdProcessRSmaCreate(pVnode, &req);
tDecoderClear(&coder);
return 0;
@@ -513,12 +528,13 @@ _exit:
}
static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
- SVAlterTbReq vAlterTbReq = {0};
- SVAlterTbRsp vAlterTbRsp = {0};
- SDecoder dc = {0};
- int rcode = 0;
- int ret;
- SEncoder ec = {0};
+ SVAlterTbReq vAlterTbReq = {0};
+ SVAlterTbRsp vAlterTbRsp = {0};
+ SDecoder dc = {0};
+ int rcode = 0;
+ int ret;
+ SEncoder ec = {0};
+ STableMetaRsp vMetaRsp = {0};
pRsp->msgType = TDMT_VND_ALTER_TABLE_RSP;
pRsp->pCont = NULL;
@@ -536,7 +552,7 @@ static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, i
}
// process
- if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq) < 0) {
+ if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq, &vMetaRsp) < 0) {
vAlterTbRsp.code = TSDB_CODE_INVALID_MSG;
tDecoderClear(&dc);
rcode = -1;
@@ -544,6 +560,11 @@ static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, i
}
tDecoderClear(&dc);
+ if (NULL != vMetaRsp.pSchemas) {
+ vnodeUpdateMetaRsp(pVnode, &vMetaRsp);
+ vAlterTbRsp.pMeta = &vMetaRsp;
+ }
+
_exit:
tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);
@@ -617,16 +638,18 @@ static int vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, SSub
STSchema *pSchema = NULL;
tb_uid_t suid = 0;
STSRow *row = NULL;
+ int32_t rv = -1;
tInitSubmitBlkIter(msgIter, pBlock, &blkIter);
if (blkIter.row == NULL) return 0;
- if (!pSchema || (suid != msgIter->suid)) {
+ if (!pSchema || (suid != msgIter->suid) || rv != TD_ROW_SVER(blkIter.row)) {
if (pSchema) {
taosMemoryFreeClear(pSchema);
}
- pSchema = metaGetTbTSchema(pMeta, msgIter->suid, 1); // TODO: use the real schema
+ pSchema = metaGetTbTSchema(pMeta, msgIter->suid, TD_ROW_SVER(blkIter.row)); // TODO: use the real schema
if (pSchema) {
suid = msgIter->suid;
+ rv = TD_ROW_SVER(blkIter.row);
}
}
if (!pSchema) {
@@ -672,6 +695,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
int32_t nRows;
int32_t tsize, ret;
SEncoder encoder = {0};
+ SArray *newTbUids = NULL;
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@@ -692,6 +716,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
}
submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp));
+ newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t));
if (!submitRsp.pArray) {
pRsp->code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -721,6 +746,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
goto _exit;
}
}
+ taosArrayPush(newTbUids, &createTbReq.uid);
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
@@ -748,8 +774,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
submitRsp.affectedRows += submitBlkRsp.affectedRows;
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
}
+ tqUpdateTbUidList(pVnode->pTq, newTbUids, true);
_exit:
+ taosArrayDestroy(newTbUids);
tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret);
pRsp->pCont = rpcMallocCont(tsize);
pRsp->contLen = tsize;
@@ -765,28 +793,30 @@ _exit:
// TODO: the partial success scenario and the error case
// TODO: refactor
- if ((terrno == TSDB_CODE_SUCCESS || terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) &&
- (pRsp->code == TSDB_CODE_SUCCESS)) {
+ if ((terrno == TSDB_CODE_SUCCESS) && (pRsp->code == TSDB_CODE_SUCCESS)) {
tdProcessRSmaSubmit(pVnode->pSma, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK);
}
return 0;
}
-static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp) {
+static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVCreateTSmaReq req = {0};
SDecoder coder;
- pRsp->msgType = TDMT_VND_CREATE_SMA_RSP;
- pRsp->code = TSDB_CODE_SUCCESS;
- pRsp->pCont = NULL;
- pRsp->contLen = 0;
+ if (pRsp) {
+ pRsp->msgType = TDMT_VND_CREATE_SMA_RSP;
+ pRsp->code = TSDB_CODE_SUCCESS;
+ pRsp->pCont = NULL;
+ pRsp->contLen = 0;
+ }
// decode and process req
tDecoderInit(&coder, pReq, len);
if (tDecodeSVCreateTSmaReq(&coder, &req) < 0) {
- pRsp->code = terrno;
+ terrno = TSDB_CODE_MSG_DECODE_ERROR;
+ if (pRsp) pRsp->code = terrno;
goto _err;
}
@@ -794,18 +824,30 @@ static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq
req.timezoneInt = tsTimezone;
if (tdProcessTSmaCreate(pVnode->pSma, version, (const char *)&req) < 0) {
- pRsp->code = terrno;
+ if (pRsp) pRsp->code = terrno;
goto _err;
}
tDecoderClear(&coder);
- vDebug("vgId:%d success to create tsma %s:%" PRIi64 " for table %" PRIi64, TD_VID(pVnode), req.indexName,
- req.indexUid, req.tableUid);
+ vDebug("vgId:%d, success to create tsma %s:%" PRIi64 " version %" PRIi64 " for table %" PRIi64, TD_VID(pVnode),
+ req.indexName, req.indexUid, version, req.tableUid);
return 0;
_err:
tDecoderClear(&coder);
- vError("vgId:%d failed to create tsma %s:%" PRIi64 " for table %" PRIi64 " since %s", TD_VID(pVnode), req.indexName,
- req.indexUid, req.tableUid, terrstr(terrno));
+ vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
+ TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr(terrno));
return -1;
}
+
+/**
+ * @brief specific for smaDstVnode
+ *
+ * @param pVnode
+ * @param pCont
+ * @param contLen
+ * @return int32_t
+ */
+int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen) {
+ return vnodeProcessCreateTSmaReq(pVnode, 1, pCont, contLen, NULL);
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 882ee912cde37414bc219efe75c113d0868c1810..37f765d786bf14476f64643bb3803a9e21690b51 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -27,6 +27,7 @@ static int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
SSyncInfo syncInfo = {
.vgId = pVnode->config.vgId,
+ .isStandBy = pVnode->config.standby,
.syncCfg = pVnode->config.syncCfg,
.pWal = pVnode->pWal,
.msgcb = NULL,
@@ -49,28 +50,74 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
return 0;
}
+int32_t vnodeSyncAlter(SVnode *pVnode, SRpcMsg *pMsg) {
+ SAlterVnodeReq req = {0};
+ if (tDeserializeSAlterVnodeReq((char *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead), &req) != 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ return TSDB_CODE_INVALID_MSG;
+ }
+
+ vInfo("vgId:%d, start to alter vnode replica to %d", TD_VID(pVnode), req.replica);
+ SSyncCfg cfg = {.replicaNum = req.replica, .myIndex = req.selfIndex};
+ for (int32_t r = 0; r < req.replica; ++r) {
+ SNodeInfo *pNode = &cfg.nodeInfo[r];
+ tstrncpy(pNode->nodeFqdn, req.replicas[r].fqdn, sizeof(pNode->nodeFqdn));
+ pNode->nodePort = req.replicas[r].port;
+ vInfo("vgId:%d, replica:%d %s:%u", TD_VID(pVnode), r, pNode->nodeFqdn, pNode->nodePort);
+ }
+
+ int32_t code = syncReconfig(pVnode->sync, &cfg);
+ if (code == TAOS_SYNC_PROPOSE_SUCCESS) {
+ // todo refactor
+ SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
+ tmsgSendRsp(&rsp);
+ return TSDB_CODE_ACTION_IN_PROGRESS;
+ }
+
+ return code;
+}
+
void vnodeSyncStart(SVnode *pVnode) {
syncSetMsgCb(pVnode->sync, &pVnode->msgCb);
- syncStart(pVnode->sync);
+ if (pVnode->config.standby) {
+ syncStartStandBy(pVnode->sync);
+ } else {
+ syncStart(pVnode->sync);
+ }
}
void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
-int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
+int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
int32_t code = tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg);
if (code != 0) {
rpcFreeCont(pMsg->pCont);
+ pMsg->pCont = NULL;
}
return code;
}
-int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
+int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
+ int32_t code = tmsgSendReq(pEpSet, pMsg);
+ if (code != 0) {
+ rpcFreeCont(pMsg->pCont);
+ pMsg->pCont = NULL;
+ }
+ return code;
+}
int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) {
vnodeGetSnapshot(pFsm->data, pSnapshot);
return 0;
}
+void vnodeSyncReconfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
+ SVnode *pVnode = pFsm->data;
+ vInfo("vgId:%d, sync reconfig is confirmed", TD_VID(pVnode));
+
+ // todo rpc response here
+}
+
void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SyncIndex beginIndex = SYNC_INDEX_INVALID;
if (pFsm->FpGetSnapshot != NULL) {
@@ -80,27 +127,19 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
}
if (cbMeta.index > beginIndex) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(
logBuf, sizeof(logBuf),
"==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n",
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), beginIndex);
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
- SVnode *pVnode = (SVnode *)(pFsm->data);
+ SVnode *pVnode = pFsm->data;
SyncApplyMsg *pSyncApplyMsg = syncApplyMsgBuild2(pMsg, pVnode->config.vgId, &cbMeta);
SRpcMsg applyMsg;
syncApplyMsg2RpcMsg(pSyncApplyMsg, &applyMsg);
syncApplyMsgDestroy(pSyncApplyMsg);
- /*
- SRpcMsg applyMsg;
- applyMsg = *pMsg;
- applyMsg.pCont = rpcMallocCont(applyMsg.contLen);
- assert(applyMsg.contLen == pMsg->contLen);
- memcpy(applyMsg.pCont, pMsg->pCont, applyMsg.contLen);
- */
-
// recover handle for response
SRpcMsg saveRpcMsg;
int32_t ret = syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &saveRpcMsg);
@@ -115,7 +154,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg);
} else {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, "
"beginIndex :%ld\n",
@@ -126,7 +165,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
}
void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
@@ -134,7 +173,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet
}
void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
@@ -147,6 +186,8 @@ SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
pFsm->FpGetSnapshot = vnodeSyncGetSnapshot;
- pFsm->FpRestoreFinish = NULL;
+ pFsm->FpRestoreFinishCb = NULL;
+ pFsm->FpReConfigCb = vnodeSyncReconfig;
+
return pFsm;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/vnd/vnodeUtil.c b/source/dnode/vnode/src/vnd/vnodeUtil.c
new file mode 100644
index 0000000000000000000000000000000000000000..cd942099bc8924fde06ea912b0eecdfbe72603cb
--- /dev/null
+++ b/source/dnode/vnode/src/vnd/vnodeUtil.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "vnd.h"
+
+int32_t vnodeRealloc(void** pp, int32_t size) {
+ uint8_t* p = NULL;
+ int32_t csize = 0;
+
+ if (*pp) {
+ p = (uint8_t*)(*pp) - sizeof(int32_t);
+ csize = *(int32_t*)p;
+ }
+
+ if (csize >= size) {
+ return 0;
+ }
+
+ p = (uint8_t*)taosMemoryRealloc(p, size);
+ if (p == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ *(int32_t*)p = size;
+ *pp = p + sizeof(int32_t);
+
+ return 0;
+}
+
+void vnodeFree(void* p) {
+ if (p) {
+ taosMemoryFree(((uint8_t*)p) - sizeof(int32_t));
+ }
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/test/tsdbSmaTest.cpp b/source/dnode/vnode/test/tsdbSmaTest.cpp
index ab617cb18660bc6663b500d7ef9da60a5c2d9fa5..3b8c94e413ee866441f5d7514e13986d31fc4137 100644
--- a/source/dnode/vnode/test/tsdbSmaTest.cpp
+++ b/source/dnode/vnode/test/tsdbSmaTest.cpp
@@ -147,8 +147,8 @@ TEST(testCase, tSma_Meta_Encode_Decode_Test) {
// resource release
taosMemoryFreeClear(pSW);
- tdDestroyTSma(&tSma);
- tdDestroyTSmaWrapper(&dstTSmaWrapper);
+ tDestroyTSma(&tSma);
+ tDestroyTSmaWrapper(&dstTSmaWrapper);
}
#endif
@@ -218,7 +218,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
printf("tagsFilter1 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : "");
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1);
EXPECT_EQ(qSmaCfg->tableUid, tSma.tableUid);
- tdDestroyTSma(qSmaCfg);
+ tDestroyTSma(qSmaCfg);
taosMemoryFreeClear(qSmaCfg);
qSmaCfg = metaGetSmaInfoByIndex(pMeta, indexUid2, true);
@@ -229,7 +229,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
printf("tagsFilter2 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : "");
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2);
EXPECT_EQ(qSmaCfg->interval, tSma.interval);
- tdDestroyTSma(qSmaCfg);
+ tDestroyTSma(qSmaCfg);
taosMemoryFreeClear(qSmaCfg);
// get index name by table uid
@@ -265,7 +265,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
EXPECT_EQ((pSW->tSma + 1)->indexUid, indexUid2);
EXPECT_EQ((pSW->tSma + 1)->tableUid, tbUid);
- tdDestroyTSmaWrapper(pSW);
+ tDestroyTSmaWrapper(pSW);
taosMemoryFreeClear(pSW);
// get all sma table uids
@@ -282,7 +282,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
metaRemoveSmaFromDb(pMeta, indexUid1);
metaRemoveSmaFromDb(pMeta, indexUid2);
- tdDestroyTSma(&tSma);
+ tDestroyTSma(&tSma);
metaClose(pMeta);
}
#endif
@@ -368,7 +368,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
SDiskCfg pDisks = {0};
pDisks.level = 0;
pDisks.primary = 1;
- strncpy(pDisks.dir, "/var/lib/taos", TSDB_FILENAME_LEN);
+ strncpy(pDisks.dir, TD_DATA_DIR_PATH, TSDB_FILENAME_LEN);
int32_t numOfDisks = 1;
pTsdb->pTfs = tfsOpen(&pDisks, numOfDisks);
EXPECT_NE(pTsdb->pTfs, nullptr);
@@ -576,7 +576,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
taosArrayDestroy(pDataBlocks);
// release meta
- tdDestroyTSma(&tSma);
+ tDestroyTSma(&tSma);
tfsClose(pTsdb->pTfs);
tsdbClose(pTsdb);
metaClose(pMeta);
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 9f66b6c598de2dcb3c1c32190760c74719ccb414..239d719fa80eb3b4fbf9b075e165dc50051b3e52 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -49,19 +49,21 @@ enum {
};
enum {
- CTG_ACT_UPDATE_VG = 0,
- CTG_ACT_UPDATE_TBL,
- CTG_ACT_REMOVE_DB,
- CTG_ACT_REMOVE_STB,
- CTG_ACT_REMOVE_TBL,
- CTG_ACT_UPDATE_USER,
- CTG_ACT_MAX
+ CTG_OP_UPDATE_VGROUP = 0,
+ CTG_OP_UPDATE_TB_META,
+ CTG_OP_DROP_DB_CACHE,
+ CTG_OP_DROP_STB_META,
+ CTG_OP_DROP_TB_META,
+ CTG_OP_UPDATE_USER,
+ CTG_OP_UPDATE_VG_EPSET,
+ CTG_OP_MAX
};
typedef enum {
CTG_TASK_GET_QNODE = 0,
CTG_TASK_GET_DB_VGROUP,
CTG_TASK_GET_DB_CFG,
+ CTG_TASK_GET_DB_INFO,
CTG_TASK_GET_TB_META,
CTG_TASK_GET_TB_HASH,
CTG_TASK_GET_INDEX,
@@ -98,6 +100,10 @@ typedef struct SCtgDbCfgCtx {
char dbFName[TSDB_DB_FNAME_LEN];
} SCtgDbCfgCtx;
+typedef struct SCtgDbInfoCtx {
+ char dbFName[TSDB_DB_FNAME_LEN];
+} SCtgDbInfoCtx;
+
typedef struct SCtgTbHashCtx {
char dbFName[TSDB_DB_FNAME_LEN];
SName* pName;
@@ -171,7 +177,7 @@ typedef struct SCtgJob {
uint64_t queryId;
SCatalog* pCtg;
void* pTrans;
- const SEpSet* pMgmtEps;
+ SEpSet pMgmtEps;
void* userParam;
catalogCallback userFp;
int32_t tbMetaNum;
@@ -182,6 +188,7 @@ typedef struct SCtgJob {
int32_t dbCfgNum;
int32_t indexNum;
int32_t userNum;
+ int32_t dbInfoNum;
} SCtgJob;
typedef struct SCtgMsgCtx {
@@ -285,16 +292,22 @@ typedef struct SCtgUpdateUserMsg {
SGetUserAuthRsp userAuth;
} SCtgUpdateUserMsg;
+typedef struct SCtgUpdateEpsetMsg {
+ SCatalog* pCtg;
+ char dbFName[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SEpSet epSet;
+} SCtgUpdateEpsetMsg;
-typedef struct SCtgMetaAction {
- int32_t act;
+typedef struct SCtgCacheOperation {
+ int32_t opId;
void *data;
- bool syncReq;
+ bool syncOp;
uint64_t seqId;
-} SCtgMetaAction;
+} SCtgCacheOperation;
typedef struct SCtgQNode {
- SCtgMetaAction action;
+ SCtgCacheOperation op;
struct SCtgQNode *next;
} SCtgQNode;
@@ -321,24 +334,24 @@ typedef struct SCatalogMgmt {
} SCatalogMgmt;
typedef uint32_t (*tableNameHashFp)(const char *, uint32_t);
-typedef int32_t (*ctgActFunc)(SCtgMetaAction *);
+typedef int32_t (*ctgOpFunc)(SCtgCacheOperation *);
-typedef struct SCtgAction {
- int32_t actId;
+typedef struct SCtgOperation {
+ int32_t opId;
char name[32];
- ctgActFunc func;
-} SCtgAction;
+ ctgOpFunc func;
+} SCtgOperation;
-#define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
-#define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
+#define CTG_QUEUE_INC() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
+#define CTG_QUEUE_DEC() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
-#define CTG_STAT_ADD(_item, _n) atomic_add_fetch_64(&(_item), _n)
-#define CTG_STAT_SUB(_item, _n) atomic_sub_fetch_64(&(_item), _n)
+#define CTG_STAT_INC(_item, _n) atomic_add_fetch_64(&(_item), _n)
+#define CTG_STAT_DEC(_item, _n) atomic_sub_fetch_64(&(_item), _n)
#define CTG_STAT_GET(_item) atomic_load_64(&(_item))
-#define CTG_RUNTIME_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.runtime.item, n))
-#define CTG_CACHE_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.cache.item, n))
-#define CTG_CACHE_STAT_SUB(item, n) (CTG_STAT_SUB(gCtgMgmt.stat.cache.item, n))
+#define CTG_RT_STAT_INC(item, n) (CTG_STAT_INC(gCtgMgmt.stat.runtime.item, n))
+#define CTG_CACHE_STAT_INC(item, n) (CTG_STAT_INC(gCtgMgmt.stat.cache.item, n))
+#define CTG_CACHE_STAT_DEC(item, n) (CTG_STAT_DEC(gCtgMgmt.stat.cache.item, n))
#define CTG_IS_META_NULL(type) ((type) == META_TYPE_NULL_TABLE)
#define CTG_IS_META_CTABLE(type) ((type) == META_TYPE_CTABLE)
@@ -435,26 +448,28 @@ int32_t ctgdShowCacheInfo(void);
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq);
int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
-int32_t ctgActUpdateVg(SCtgMetaAction *action);
-int32_t ctgActUpdateTb(SCtgMetaAction *action);
-int32_t ctgActRemoveDB(SCtgMetaAction *action);
-int32_t ctgActRemoveStb(SCtgMetaAction *action);
-int32_t ctgActRemoveTb(SCtgMetaAction *action);
-int32_t ctgActUpdateUser(SCtgMetaAction *action);
+int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action);
+int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action);
+int32_t ctgOpDropDbCache(SCtgCacheOperation *action);
+int32_t ctgOpDropStbMeta(SCtgCacheOperation *action);
+int32_t ctgOpDropTbMeta(SCtgCacheOperation *action);
+int32_t ctgOpUpdateUser(SCtgCacheOperation *action);
+int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation);
int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache);
void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache);
void ctgReleaseVgInfo(SCtgDBCache *dbCache);
int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache);
int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist);
int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
-int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tbType, uint64_t *suid, char *stbName);
+int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName);
int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass);
-int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId);
-int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq);
-int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq);
-int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq);
-int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq);
-int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq);
+int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId);
+int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq);
+int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq);
+int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq);
+int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq);
+int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq);
+int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet);
int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type);
int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size);
int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size);
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index 4afebf9951db2a895ef4b8c728e7b99fe979ce35..7e0efe22dbfa2dd61a5c662c1103a1b98f579da8 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -41,9 +41,9 @@ int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq
tNameGetFullDbName(pTableName, dbFName);
if (TSDB_SUPER_TABLE == tblMeta->tableType) {
- CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq));
+ CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq));
} else {
- CTG_ERR_JRET(ctgPutRmTbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq));
+ CTG_ERR_JRET(ctgDropTbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq));
}
_return:
@@ -72,7 +72,7 @@ int32_t ctgGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, con
CTG_ERR_JRET(ctgCloneVgInfo(DbOut.dbVgroup, pInfo));
- CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false));
+ CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false));
return TSDB_CODE_SUCCESS;
@@ -108,13 +108,13 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps,
if (code) {
if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) {
ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId);
- ctgPutRmDBToQueue(pCtg, input.db, input.dbId);
+ ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId);
}
CTG_ERR_RET(code);
}
- CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true));
+ CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true));
return TSDB_CODE_SUCCESS;
}
@@ -201,7 +201,7 @@ int32_t ctgRefreshTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMetaOutput **pOut
CTG_ERR_JRET(ctgCloneMetaOutput(output, pOutput));
}
- CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, syncReq));
+ CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncReq));
return TSDB_CODE_SUCCESS;
@@ -298,9 +298,9 @@ _return:
}
if (TSDB_SUPER_TABLE == ctx->tbInfo.tbType) {
- ctgPutRmStbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false);
+ ctgDropStbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false);
} else {
- ctgPutRmTbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false);
+ ctgDropTbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false);
}
}
@@ -314,6 +314,36 @@ _return:
CTG_RET(code);
}
+int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp *rspMsg, bool syncOp) {
+ STableMetaOutput *output = taosMemoryCalloc(1, sizeof(STableMetaOutput));
+ if (NULL == output) {
+ ctgError("malloc %d failed", (int32_t)sizeof(STableMetaOutput));
+ CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
+ }
+
+ int32_t code = 0;
+
+ strcpy(output->dbFName, rspMsg->dbFName);
+ strcpy(output->tbName, rspMsg->tbName);
+
+ output->dbId = rspMsg->dbId;
+
+ SET_META_TYPE_TABLE(output->metaType);
+
+ CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta));
+
+ CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp));
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ taosMemoryFreeClear(output->tbMeta);
+ taosMemoryFreeClear(output);
+
+ CTG_RET(code);
+}
+
int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) {
bool inCache = false;
@@ -348,7 +378,7 @@ int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const c
_return:
- ctgPutUpdateUserToQueue(pCtg, &authRsp, false);
+ ctgUpdateUserEnqueue(pCtg, &authRsp, false);
return TSDB_CODE_SUCCESS;
}
@@ -540,12 +570,6 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
}
- SHashObj *metaCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- if (NULL == metaCache) {
- qError("taosHashInit failed, num:%d", gCtgMgmt.cfg.maxTblCacheNum);
- CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
- }
-
code = taosHashPut(gCtgMgmt.pCluster, &clusterId, sizeof(clusterId), &clusterCtg, POINTER_BYTES);
if (code) {
if (HASH_NODE_EXIST(code)) {
@@ -564,7 +588,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
*catalogHandle = clusterCtg;
- CTG_CACHE_STAT_ADD(clusterNum, 1);
+ CTG_CACHE_STAT_INC(clusterNum, 1);
return TSDB_CODE_SUCCESS;
@@ -585,7 +609,7 @@ void catalogFreeHandle(SCatalog* pCtg) {
return;
}
- CTG_CACHE_STAT_SUB(clusterNum, 1);
+ CTG_CACHE_STAT_DEC(clusterNum, 1);
uint64_t clusterId = pCtg->clusterId;
@@ -676,7 +700,7 @@ int32_t catalogUpdateDBVgInfo(SCatalog* pCtg, const char* dbFName, uint64_t dbId
CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT);
}
- code = ctgPutUpdateVgToQueue(pCtg, dbFName, dbId, dbInfo, false);
+ code = ctgUpdateVgroupEnqueue(pCtg, dbFName, dbId, dbInfo, false);
_return:
@@ -697,7 +721,7 @@ int32_t catalogRemoveDB(SCatalog* pCtg, const char* dbFName, uint64_t dbId) {
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
- CTG_ERR_JRET(ctgPutRmDBToQueue(pCtg, dbFName, dbId));
+ CTG_ERR_JRET(ctgDropDbCacheEnqueue(pCtg, dbFName, dbId));
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
@@ -707,7 +731,19 @@ _return:
}
int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet) {
- return 0;
+ CTG_API_ENTER();
+
+ int32_t code = 0;
+
+ if (NULL == pCtg || NULL == dbFName || NULL == epSet) {
+ CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
+ }
+
+ CTG_ERR_JRET(ctgUpdateVgEpsetEnqueue(pCtg, (char*)dbFName, vgId, epSet));
+
+_return:
+
+ CTG_API_LEAVE(code);
}
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) {
@@ -744,7 +780,7 @@ int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId,
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
- CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, dbId, stbName, suid, true));
+ CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, dbId, stbName, suid, true));
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
@@ -773,38 +809,17 @@ int32_t catalogGetSTableMeta(SCatalog* pCtg, void * pTrans, const SEpSet* pMgmtE
CTG_API_LEAVE(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, pTableMeta));
}
-int32_t catalogUpdateSTableMeta(SCatalog* pCtg, STableMetaRsp *rspMsg) {
+int32_t catalogUpdateTableMeta(SCatalog* pCtg, STableMetaRsp *pMsg) {
CTG_API_ENTER();
- if (NULL == pCtg || NULL == rspMsg) {
+ if (NULL == pCtg || NULL == pMsg) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
- STableMetaOutput *output = taosMemoryCalloc(1, sizeof(STableMetaOutput));
- if (NULL == output) {
- ctgError("malloc %d failed", (int32_t)sizeof(STableMetaOutput));
- CTG_API_LEAVE(TSDB_CODE_CTG_MEM_ERROR);
- }
-
int32_t code = 0;
-
- strcpy(output->dbFName, rspMsg->dbFName);
- strcpy(output->tbName, rspMsg->tbName);
-
- output->dbId = rspMsg->dbId;
-
- SET_META_TYPE_TABLE(output->metaType);
-
- CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, true, &output->tbMeta));
-
- CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, false));
-
- CTG_API_LEAVE(code);
+ CTG_ERR_JRET(ctgUpdateTbMeta(pCtg, pMsg, true));
_return:
-
- taosMemoryFreeClear(output->tbMeta);
- taosMemoryFreeClear(output);
CTG_API_LEAVE(code);
}
@@ -818,6 +833,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgm
SName name;
int32_t sver = 0;
+ int32_t tver = 0;
int32_t tbNum = taosArrayGetSize(pTables);
for (int32_t i = 0; i < tbNum; ++i) {
STbSVersion* pTb = (STbSVersion*)taosArrayGet(pTables, i);
@@ -834,8 +850,8 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgm
int32_t tbType = 0;
uint64_t suid = 0;
char stbName[TSDB_TABLE_FNAME_LEN];
- ctgReadTbSverFromCache(pCtg, &name, &sver, &tbType, &suid, stbName);
- if (sver >= 0 && sver < pTb->sver) {
+ ctgReadTbVerFromCache(pCtg, &name, &sver, &tver, &tbType, &suid, stbName);
+ if ((sver >= 0 && sver < pTb->sver) || (tver >= 0 && tver < pTb->tver)) {
switch (tbType) {
case TSDB_CHILD_TABLE: {
SName stb = name;
@@ -983,7 +999,7 @@ int32_t catalogGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps,
}
if (pReq->qNodeRequired) {
- pRsp->pQnodeList = taosArrayInit(10, sizeof(SQueryNodeAddr));
+ pRsp->pQnodeList = taosArrayInit(10, sizeof(SQueryNodeLoad));
CTG_ERR_JRET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), pRsp->pQnodeList, NULL));
}
@@ -1157,7 +1173,7 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
- CTG_API_LEAVE(ctgPutUpdateUserToQueue(pCtg, pAuth, false));
+ CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false));
}
@@ -1199,7 +1215,7 @@ void catalogDestroy(void) {
taosHashCleanup(gCtgMgmt.pCluster);
gCtgMgmt.pCluster = NULL;
- CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock);
+ if (CTG_IS_LOCKED(&gCtgMgmt.lock) == TD_RWLATCH_WRITE_FLAG_COPY) CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock);
qInfo("catalog destroyed");
}
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 4908dc510116a2347796239d6dd2708df29f2cf8..eb84bf00a444fb6bc57652ee32abdf44035a0426 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -21,173 +21,213 @@
#include "tref.h"
int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, SName *name) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_TB_META;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_TB_META;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbMetaCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbMetaCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgTbMetaCtx* ctx = pTask->taskCtx;
+ SCtgTbMetaCtx* ctx = task.taskCtx;
ctx->pName = taosMemoryMalloc(sizeof(*name));
if (NULL == ctx->pName) {
- taosMemoryFree(pTask->taskCtx);
+ taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(ctx->pName, name, sizeof(*name));
ctx->flag = CTG_FLAG_UNKNOWN_STB;
- qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, pTask->type, name->tname);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, task.type, name->tname);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_DB_VGROUP;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_DB_VGROUP;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbVgCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbVgCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgDbVgCtx* ctx = pTask->taskCtx;
+ SCtgDbVgCtx* ctx = task.taskCtx;
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
- qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, pTask->type, dbFName);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
+
+ task.type = CTG_TASK_GET_DB_CFG;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
+
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbCfgCtx));
+ if (NULL == task.taskCtx) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ SCtgDbCfgCtx* ctx = task.taskCtx;
+
+ memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
+
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName);
+
+ return TSDB_CODE_SUCCESS;
+}
- pTask->type = CTG_TASK_GET_DB_CFG;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) {
+ SCtgTask task = {0};
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbCfgCtx));
- if (NULL == pTask->taskCtx) {
+ task.type = CTG_TASK_GET_DB_INFO;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
+
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbInfoCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgDbCfgCtx* ctx = pTask->taskCtx;
+ SCtgDbInfoCtx* ctx = task.taskCtx;
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
- qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, pTask->type, dbFName);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName);
return TSDB_CODE_SUCCESS;
}
+
int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, SName *name) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_TB_HASH;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_TB_HASH;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbHashCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbHashCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgTbHashCtx* ctx = pTask->taskCtx;
+ SCtgTbHashCtx* ctx = task.taskCtx;
ctx->pName = taosMemoryMalloc(sizeof(*name));
if (NULL == ctx->pName) {
- taosMemoryFree(pTask->taskCtx);
+ taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(ctx->pName, name, sizeof(*name));
tNameGetFullDbName(ctx->pName, ctx->dbFName);
- qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, pTask->type, name->tname);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, task.type, name->tname);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_QNODE;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
- pTask->taskCtx = NULL;
+ task.type = CTG_TASK_GET_QNODE;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
+ task.taskCtx = NULL;
- qDebug("QID:%" PRIx64 " task %d type %d initialized", pJob->queryId, taskIdx, pTask->type);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized", pJob->queryId, taskIdx, task.type);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, char *name) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_INDEX;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_INDEX;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgIndexCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgIndexCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgIndexCtx* ctx = pTask->taskCtx;
+ SCtgIndexCtx* ctx = task.taskCtx;
strcpy(ctx->indexFName, name);
- qDebug("QID:%" PRIx64 " task %d type %d initialized, indexFName:%s", pJob->queryId, taskIdx, pTask->type, name);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, indexFName:%s", pJob->queryId, taskIdx, task.type, name);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, char *name) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_UDF;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_UDF;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgUdfCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgUdfCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgUdfCtx* ctx = pTask->taskCtx;
+ SCtgUdfCtx* ctx = task.taskCtx;
strcpy(ctx->udfName, name);
- qDebug("QID:%" PRIx64 " task %d type %d initialized, udfName:%s", pJob->queryId, taskIdx, pTask->type, name);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, udfName:%s", pJob->queryId, taskIdx, task.type, name);
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, SUserAuthInfo *user) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, taskIdx);
+ SCtgTask task = {0};
- pTask->type = CTG_TASK_GET_USER;
- pTask->taskId = taskIdx;
- pTask->pJob = pJob;
+ task.type = CTG_TASK_GET_USER;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
- pTask->taskCtx = taosMemoryCalloc(1, sizeof(SCtgUserCtx));
- if (NULL == pTask->taskCtx) {
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgUserCtx));
+ if (NULL == task.taskCtx) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgUserCtx* ctx = pTask->taskCtx;
+ SCtgUserCtx* ctx = task.taskCtx;
memcpy(&ctx->user, user, sizeof(*user));
- qDebug("QID:%" PRIx64 " task %d type %d initialized, user:%s", pJob->queryId, taskIdx, pTask->type, user->user);
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:%" PRIx64 " task %d type %d initialized, user:%s", pJob->queryId, taskIdx, task.type, user->user);
return TSDB_CODE_SUCCESS;
}
@@ -203,8 +243,9 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq*
int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg);
int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex);
int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser);
+ int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo);
- int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum;
+ int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum;
if (taskNum <= 0) {
ctgError("empty input for job, taskNum:%d", taskNum);
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
@@ -222,7 +263,7 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq*
pJob->userFp = fp;
pJob->pCtg = pCtg;
pJob->pTrans = pTrans;
- pJob->pMgmtEps = pMgmtEps;
+ pJob->pMgmtEps = *pMgmtEps;
pJob->userParam = param;
pJob->tbMetaNum = tbMetaNum;
@@ -233,6 +274,7 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq*
pJob->dbCfgNum = dbCfgNum;
pJob->indexNum = indexNum;
pJob->userNum = userNum;
+ pJob->dbInfoNum = dbInfoNum;
pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask));
@@ -252,6 +294,11 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq*
CTG_ERR_JRET(ctgInitGetDbCfgTask(pJob, taskIdx++, dbFName));
}
+ for (int32_t i = 0; i < dbInfoNum; ++i) {
+ char *dbFName = taosArrayGet(pReq->pDbInfo, i);
+ CTG_ERR_JRET(ctgInitGetDbInfoTask(pJob, taskIdx++, dbFName));
+ }
+
for (int32_t i = 0; i < tbMetaNum; ++i) {
SName *name = taosArrayGet(pReq->pTableMeta, i);
CTG_ERR_JRET(ctgInitGetTbMetaTask(pJob, taskIdx++, name));
@@ -303,15 +350,13 @@ _return:
int32_t ctgDumpTbMetaRes(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pTableMeta) {
- pJob->jobRes.pTableMeta = taosArrayInit(pJob->tbMetaNum, sizeof(STableMeta));
+ pJob->jobRes.pTableMeta = taosArrayInit(pJob->tbMetaNum, POINTER_BYTES);
if (NULL == pJob->jobRes.pTableMeta) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
- taosArrayPush(pJob->jobRes.pTableMeta, pTask->res);
-
- taosMemoryFreeClear(pTask->res);
+ taosArrayPush(pJob->jobRes.pTableMeta, &pTask->res);
return TSDB_CODE_SUCCESS;
}
@@ -340,7 +385,7 @@ int32_t ctgDumpTbHashRes(SCtgTask* pTask) {
}
}
- taosArrayPush(pJob->jobRes.pTableHash, &pTask->res);
+ taosArrayPush(pJob->jobRes.pTableHash, pTask->res);
return TSDB_CODE_SUCCESS;
}
@@ -376,7 +421,21 @@ int32_t ctgDumpDbCfgRes(SCtgTask* pTask) {
}
}
- taosArrayPush(pJob->jobRes.pDbCfg, &pTask->res);
+ taosArrayPush(pJob->jobRes.pDbCfg, pTask->res);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t ctgDumpDbInfoRes(SCtgTask* pTask) {
+ SCtgJob* pJob = pTask->pJob;
+ if (NULL == pJob->jobRes.pDbInfo) {
+ pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SDbInfo));
+ if (NULL == pJob->jobRes.pDbInfo) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ }
+
+ taosArrayPush(pJob->jobRes.pDbInfo, pTask->res);
return TSDB_CODE_SUCCESS;
}
@@ -451,7 +510,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
switch (reqType) {
case TDMT_MND_USE_DB: {
@@ -529,7 +588,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
taosMemoryFreeClear(pOut->tbMeta);
- CTG_ERR_JRET(ctgGetTbMetaFromMnode(CTG_PARAMS_LIST(), ctx->pName, NULL, pTask));
+ CTG_RET(ctgGetTbMetaFromMnode(CTG_PARAMS_LIST(), ctx->pName, NULL, pTask));
} else if (CTG_IS_META_BOTH(pOut->metaType)) {
int32_t exist = 0;
if (!CTG_FLAG_IS_FORCE_UPDATE(ctx->flag)) {
@@ -538,7 +597,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
if (0 == exist) {
TSWAP(pTask->msgCtx.lastOut, pTask->msgCtx.out);
- CTG_ERR_JRET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), pOut->dbFName, pOut->tbName, NULL, pTask));
+ CTG_RET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), pOut->dbFName, pOut->tbName, NULL, pTask));
} else {
taosMemoryFreeClear(pOut->tbMeta);
@@ -598,7 +657,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM
SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
switch (reqType) {
case TDMT_MND_USE_DB: {
@@ -606,7 +665,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM
CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res));
- CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
+ CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
break;
@@ -632,7 +691,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
switch (reqType) {
case TDMT_MND_USE_DB: {
@@ -645,7 +704,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
- CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
+ CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
break;
@@ -677,6 +736,11 @@ _return:
CTG_RET(code);
}
+int32_t ctgHandleGetDbInfoRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
+ CTG_RET(TSDB_CODE_APP_ERROR);
+}
+
+
int32_t ctgHandleGetQnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -724,7 +788,7 @@ int32_t ctgHandleGetUserRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM
SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
bool pass = false;
SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out;
@@ -755,8 +819,8 @@ _return:
}
}
- ctgPutUpdateUserToQueue(pCtg, pOut, false);
- pTask->msgCtx.out = NULL;
+ ctgUpdateUserEnqueue(pCtg, pOut, false);
+ taosMemoryFreeClear(pTask->msgCtx.out);
ctgHandleTaskEnd(pTask, code);
@@ -766,7 +830,7 @@ _return:
int32_t ctgAsyncRefreshTbMeta(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
int32_t code = 0;
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
@@ -788,7 +852,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTask *pTask) {
tNameGetFullDbName(ctx->pName, dbFName);
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
- if (NULL == dbCache) {
+ if (dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_RET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgInfo, ctx->pName, &vgInfo));
@@ -817,7 +881,7 @@ _return:
int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
CTG_ERR_RET(ctgGetTbMetaFromCache(CTG_PARAMS_LIST(), (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
if (pTask->res) {
@@ -834,7 +898,7 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
int32_t code = 0;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgDBCache *dbCache = NULL;
SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
@@ -866,7 +930,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
int32_t code = 0;
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgDBCache *dbCache = NULL;
SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
@@ -901,7 +965,7 @@ _return:
int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
CTG_ERR_RET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), NULL, pTask));
@@ -911,7 +975,7 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
CTG_ERR_RET(ctgGetDBCfgFromMnode(CTG_PARAMS_LIST(), pCtx->dbFName, NULL, pTask));
@@ -919,10 +983,45 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
+int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ void *pTrans = pTask->pJob->pTrans;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
+ SCtgDBCache *dbCache = NULL;
+ SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx;
+
+ pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo));
+ if (NULL == pTask->res) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ SDbInfo* pInfo = (SDbInfo*)pTask->res;
+ CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
+ if (NULL != dbCache) {
+ pInfo->vgVer = dbCache->vgInfo->vgVersion;
+ pInfo->dbId = dbCache->dbId;
+ pInfo->tbNum = dbCache->vgInfo->numOfTable;
+ } else {
+ pInfo->vgVer = CTG_DEFAULT_INVALID_VERSION;
+ }
+
+ CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0));
+
+_return:
+
+ if (dbCache) {
+ ctgReleaseVgInfo(dbCache);
+ ctgReleaseDBCache(pCtg, dbCache);
+ }
+
+ CTG_RET(code);
+}
+
int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
CTG_ERR_RET(ctgGetIndexInfoFromMnode(CTG_PARAMS_LIST(), pCtx->indexFName, NULL, pTask));
@@ -933,7 +1032,7 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
CTG_ERR_RET(ctgGetUdfInfoFromMnode(CTG_PARAMS_LIST(), pCtx->udfName, NULL, pTask));
@@ -944,7 +1043,7 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
void *pTrans = pTask->pJob->pTrans;
- const SEpSet* pMgmtEps = pTask->pJob->pMgmtEps;
+ const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps;
SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
bool inCache = false;
bool pass = false;
@@ -978,6 +1077,7 @@ SCtgAsyncFps gCtgAsyncFps[] = {
{ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes},
{ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes},
{ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes},
+ {ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes},
{ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes},
{ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes},
{ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes},
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index 6335a056b9461877745fb80b893c9235ef7622a5..0f1344c3432b2540c2daa33de33c2f8c570658f0 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -19,37 +19,43 @@
#include "catalogInt.h"
#include "systable.h"
-SCtgAction gCtgAction[CTG_ACT_MAX] = {
+SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = {
{
- CTG_ACT_UPDATE_VG,
+ CTG_OP_UPDATE_VGROUP,
"update vgInfo",
- ctgActUpdateVg
+ ctgOpUpdateVgroup
},
{
- CTG_ACT_UPDATE_TBL,
+ CTG_OP_UPDATE_TB_META,
"update tbMeta",
- ctgActUpdateTb
+ ctgOpUpdateTbMeta
},
{
- CTG_ACT_REMOVE_DB,
- "remove DB",
- ctgActRemoveDB
+ CTG_OP_DROP_DB_CACHE,
+ "drop DB",
+ ctgOpDropDbCache
},
{
- CTG_ACT_REMOVE_STB,
- "remove stbMeta",
- ctgActRemoveStb
+ CTG_OP_DROP_STB_META,
+ "drop stbMeta",
+ ctgOpDropStbMeta
},
{
- CTG_ACT_REMOVE_TBL,
- "remove tbMeta",
- ctgActRemoveTb
+ CTG_OP_DROP_TB_META,
+ "drop tbMeta",
+ ctgOpDropTbMeta
},
{
- CTG_ACT_UPDATE_USER,
+ CTG_OP_UPDATE_USER,
"update user",
- ctgActUpdateUser
+ ctgOpUpdateUser
+ },
+ {
+ CTG_OP_UPDATE_VG_EPSET,
+ "update epset",
+ ctgOpUpdateEpset
}
+
};
@@ -172,7 +178,7 @@ int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCac
*pCache = dbCache;
- CTG_CACHE_STAT_ADD(vgHitNum, 1);
+ CTG_CACHE_STAT_INC(vgHitNum, 1);
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
@@ -186,7 +192,7 @@ _return:
*pCache = NULL;
- CTG_CACHE_STAT_ADD(vgMissNum, 1);
+ CTG_CACHE_STAT_INC(vgMissNum, 1);
return TSDB_CODE_SUCCESS;
}
@@ -248,7 +254,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
- ctgDebug("db %s not in cache", ctx->pName->tname);
+ ctgDebug("db %d.%s not in cache", ctx->pName->acctId, ctx->pName->dbname);
return TSDB_CODE_SUCCESS;
}
@@ -273,7 +279,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("Got meta from cache, type:%d, dbFName:%s, tbName:%s", tbMeta->tableType, dbFName, ctx->pName->tname);
- CTG_CACHE_STAT_ADD(tblHitNum, 1);
+ CTG_CACHE_STAT_INC(tblHitNum, 1);
return TSDB_CODE_SUCCESS;
}
@@ -306,7 +312,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
ctgReleaseDBCache(pCtg, dbCache);
- CTG_CACHE_STAT_ADD(tblHitNum, 1);
+ CTG_CACHE_STAT_INC(tblHitNum, 1);
ctgDebug("Got tbmeta from cache, dbFName:%s, tbName:%s", dbFName, ctx->pName->tname);
@@ -317,14 +323,15 @@ _return:
ctgReleaseDBCache(pCtg, dbCache);
taosMemoryFreeClear(*pTableMeta);
- CTG_CACHE_STAT_ADD(tblMissNum, 1);
+ CTG_CACHE_STAT_INC(tblMissNum, 1);
CTG_RET(code);
}
-int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tbType, uint64_t *suid,
+int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid,
char *stbName) {
*sver = -1;
+ *tver = -1;
if (NULL == pCtg->dbCache) {
ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname);
@@ -348,6 +355,7 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t
*suid = tbMeta->suid;
if (*tbType != TSDB_CHILD_TABLE) {
*sver = tbMeta->sversion;
+ *tver = tbMeta->tversion;
}
}
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
@@ -359,7 +367,7 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t
if (*tbType != TSDB_CHILD_TABLE) {
ctgReleaseDBCache(pCtg, dbCache);
- ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname);
+ ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
}
@@ -391,18 +399,19 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t
stbName[nameLen] = 0;
*sver = (*stbMeta)->sversion;
+ *tver = (*stbMeta)->tversion;
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock);
ctgReleaseDBCache(pCtg, dbCache);
- ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname);
+ ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgGetTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) {
+int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) {
if (NULL == pCtg->dbCache) {
ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName);
return TSDB_CODE_SUCCESS;
@@ -453,7 +462,7 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFNam
*inCache = true;
ctgDebug("Got user from cache, user:%s", user);
- CTG_CACHE_STAT_ADD(userHitNum, 1);
+ CTG_CACHE_STAT_INC(userHitNum, 1);
if (pUser->superUser) {
*pass = true;
@@ -482,13 +491,13 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFNam
_return:
*inCache = false;
- CTG_CACHE_STAT_ADD(userMissNum, 1);
+ CTG_CACHE_STAT_INC(userMissNum, 1);
return TSDB_CODE_SUCCESS;
}
-void ctgWaitAction(SCtgMetaAction *action) {
+void ctgWaitOpDone(SCtgCacheOperation *action) {
while (true) {
tsem_wait(&gCtgMgmt.queue.rspSem);
@@ -506,54 +515,54 @@ void ctgWaitAction(SCtgMetaAction *action) {
}
}
-void ctgPopAction(SCtgMetaAction **action) {
+void ctgDequeue(SCtgCacheOperation **op) {
SCtgQNode *orig = gCtgMgmt.queue.head;
SCtgQNode *node = gCtgMgmt.queue.head->next;
gCtgMgmt.queue.head = gCtgMgmt.queue.head->next;
- CTG_QUEUE_SUB();
+ CTG_QUEUE_DEC();
taosMemoryFreeClear(orig);
- *action = &node->action;
+ *op = &node->op;
}
-int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) {
+int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode));
if (NULL == node) {
qError("calloc %d failed", (int32_t)sizeof(SCtgQNode));
CTG_RET(TSDB_CODE_CTG_MEM_ERROR);
}
- action->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1);
+ operation->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1);
- node->action = *action;
+ node->op = *operation;
CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
gCtgMgmt.queue.tail->next = node;
gCtgMgmt.queue.tail = node;
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
- CTG_QUEUE_ADD();
- CTG_RUNTIME_STAT_ADD(qNum, 1);
+ CTG_QUEUE_INC();
+ CTG_RT_STAT_INC(qNum, 1);
tsem_post(&gCtgMgmt.queue.reqSem);
- ctgDebug("action [%s] added into queue", gCtgAction[action->act].name);
+ ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name);
- if (action->syncReq) {
- ctgWaitAction(action);
+ if (operation->syncOp) {
+ ctgWaitOpDone(operation);
}
return TSDB_CODE_SUCCESS;
}
-int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) {
+int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_REMOVE_DB};
+ SCtgCacheOperation action= {.opId = CTG_OP_DROP_DB_CACHE};
SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg));
@@ -571,7 +580,7 @@ int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) {
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
return TSDB_CODE_SUCCESS;
@@ -582,9 +591,9 @@ _return:
}
-int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) {
+int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncOp) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_REMOVE_STB, .syncReq = syncReq};
+ SCtgCacheOperation action= {.opId = CTG_OP_DROP_STB_META, .syncOp = syncOp};
SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg));
@@ -599,7 +608,7 @@ int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, co
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
return TSDB_CODE_SUCCESS;
@@ -611,9 +620,9 @@ _return:
-int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) {
+int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncOp) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_REMOVE_TBL, .syncReq = syncReq};
+ SCtgCacheOperation action= {.opId = CTG_OP_DROP_TB_META, .syncOp = syncOp};
SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg));
@@ -627,7 +636,7 @@ int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, con
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
return TSDB_CODE_SUCCESS;
@@ -637,9 +646,9 @@ _return:
CTG_RET(code);
}
-int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) {
+int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncOp) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_UPDATE_VG, .syncReq = syncReq};
+ SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_VGROUP, .syncOp = syncOp};
SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg));
@@ -659,7 +668,7 @@ int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId,
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
return TSDB_CODE_SUCCESS;
@@ -670,9 +679,9 @@ _return:
CTG_RET(code);
}
-int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) {
+int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncOp) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_UPDATE_TBL, .syncReq = syncReq};
+ SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_TB_META, .syncOp = syncOp};
SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg));
@@ -689,7 +698,7 @@ int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syn
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
return TSDB_CODE_SUCCESS;
@@ -700,9 +709,38 @@ _return:
CTG_RET(code);
}
-int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) {
+int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) {
int32_t code = 0;
- SCtgMetaAction action= {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq};
+ SCtgCacheOperation operation= {.opId = CTG_OP_UPDATE_VG_EPSET};
+ SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg));
+ if (NULL == msg) {
+ ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg));
+ CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
+ }
+
+ msg->pCtg = pCtg;
+ strcpy(msg->dbFName, dbFName);
+ msg->vgId = vgId;
+ msg->epSet = *pEpSet;
+
+ operation.data = msg;
+
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &operation));
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ taosMemoryFreeClear(msg);
+
+ CTG_RET(code);
+}
+
+
+
+int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncOp) {
+ int32_t code = 0;
+ SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_USER, .syncOp = syncOp};
SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg));
@@ -714,8 +752,8 @@ int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syn
action.data = msg;
- CTG_ERR_JRET(ctgPushAction(pCtg, &action));
-
+ CTG_ERR_JRET(ctgEnqueue(pCtg, &action));
+
return TSDB_CODE_SUCCESS;
_return:
@@ -950,7 +988,7 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
}
- CTG_CACHE_STAT_ADD(dbNum, 1);
+ CTG_CACHE_STAT_INC(dbNum, 1);
SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1};
strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName));
@@ -1010,7 +1048,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
}
- CTG_CACHE_STAT_SUB(dbNum, 1);
+ CTG_CACHE_STAT_DEC(dbNum, 1);
ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId);
@@ -1149,7 +1187,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
if (taosHashRemove(tbCache->stbCache, &orig->suid, sizeof(orig->suid))) {
ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid);
} else {
- CTG_CACHE_STAT_SUB(stblNum, 1);
+ CTG_CACHE_STAT_DEC(stblNum, 1);
}
CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock);
@@ -1176,7 +1214,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
}
if (NULL == orig) {
- CTG_CACHE_STAT_ADD(tblNum, 1);
+ CTG_CACHE_STAT_INC(tblNum, 1);
}
ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
@@ -1195,7 +1233,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
}
- CTG_CACHE_STAT_ADD(stblNum, 1);
+ CTG_CACHE_STAT_INC(stblNum, 1);
CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock);
@@ -1216,7 +1254,7 @@ int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool sync
int32_t code = 0;
CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput));
- CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, pOutput, syncReq));
+ CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq));
return TSDB_CODE_SUCCESS;
@@ -1227,9 +1265,9 @@ _return:
}
-int32_t ctgActUpdateVg(SCtgMetaAction *action) {
+int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgUpdateVgMsg *msg = action->data;
+ SCtgUpdateVgMsg *msg = operation->data;
CTG_ERR_JRET(ctgWriteDBVgInfoToCache(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo));
@@ -1241,9 +1279,9 @@ _return:
CTG_RET(code);
}
-int32_t ctgActRemoveDB(SCtgMetaAction *action) {
+int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgRemoveDBMsg *msg = action->data;
+ SCtgRemoveDBMsg *msg = operation->data;
SCatalog* pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
@@ -1267,9 +1305,9 @@ _return:
}
-int32_t ctgActUpdateTb(SCtgMetaAction *action) {
+int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgUpdateTblMsg *msg = action->data;
+ SCtgUpdateTblMsg *msg = operation->data;
SCatalog* pCtg = msg->pCtg;
STableMetaOutput* output = msg->output;
SCtgDBCache *dbCache = NULL;
@@ -1313,9 +1351,9 @@ _return:
}
-int32_t ctgActRemoveStb(SCtgMetaAction *action) {
+int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgRemoveStbMsg *msg = action->data;
+ SCtgRemoveStbMsg *msg = operation->data;
SCatalog* pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
@@ -1333,14 +1371,14 @@ int32_t ctgActRemoveStb(SCtgMetaAction *action) {
if (taosHashRemove(dbCache->tbCache.stbCache, &msg->suid, sizeof(msg->suid))) {
ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else {
- CTG_CACHE_STAT_SUB(stblNum, 1);
+ CTG_CACHE_STAT_DEC(stblNum, 1);
}
CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock);
if (taosHashRemove(dbCache->tbCache.metaCache, msg->stbName, strlen(msg->stbName))) {
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else {
- CTG_CACHE_STAT_SUB(tblNum, 1);
+ CTG_CACHE_STAT_DEC(tblNum, 1);
}
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
@@ -1359,9 +1397,9 @@ _return:
CTG_RET(code);
}
-int32_t ctgActRemoveTb(SCtgMetaAction *action) {
+int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgRemoveTblMsg *msg = action->data;
+ SCtgRemoveTblMsg *msg = operation->data;
SCatalog* pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
@@ -1381,7 +1419,7 @@ int32_t ctgActRemoveTb(SCtgMetaAction *action) {
ctgError("stb not exist in cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
} else {
- CTG_CACHE_STAT_SUB(tblNum, 1);
+ CTG_CACHE_STAT_DEC(tblNum, 1);
}
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
@@ -1394,9 +1432,9 @@ _return:
CTG_RET(code);
}
-int32_t ctgActUpdateUser(SCtgMetaAction *action) {
+int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
int32_t code = 0;
- SCtgUpdateUserMsg *msg = action->data;
+ SCtgUpdateUserMsg *msg = operation->data;
SCatalog* pCtg = msg->pCtg;
if (NULL == pCtg->userCache) {
@@ -1457,10 +1495,61 @@ _return:
CTG_RET(code);
}
+int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
+ int32_t code = 0;
+ SCtgUpdateEpsetMsg *msg = operation->data;
+ SCatalog* pCtg = msg->pCtg;
+
+ SCtgDBCache *dbCache = NULL;
+ CTG_ERR_RET(ctgAcquireDBCache(pCtg, msg->dbFName, &dbCache));
+ if (NULL == dbCache) {
+ ctgDebug("db %s not exist, ignore epset update", msg->dbFName);
+ goto _return;
+ }
+
+ SDBVgInfo *vgInfo = NULL;
+ CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache));
+
+ if (NULL == dbCache->vgInfo) {
+ ctgWReleaseVgInfo(dbCache);
+ ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName);
+ goto _return;
+ }
+
+ SVgroupInfo* pInfo = taosHashGet(dbCache->vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId));
+ if (NULL == pInfo) {
+ ctgWReleaseVgInfo(dbCache);
+ ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName);
+ goto _return;
+ }
+
+ pInfo->epSet = msg->epSet;
+
+ ctgDebug("epset in vgroup %d updated, dbFName:%s", pInfo->vgId, msg->dbFName);
+
+ ctgWReleaseVgInfo(dbCache);
+
+_return:
+
+ if (dbCache) {
+ ctgReleaseDBCache(msg->pCtg, dbCache);
+ }
+
+ taosMemoryFreeClear(msg);
+
+ CTG_RET(code);
+}
+
+
+void ctgUpdateThreadUnexpectedStopped(void) {
+ if (CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
+}
void* ctgUpdateThreadFunc(void* param) {
setThreadName("catalog");
-
+#ifdef WINDOWS
+ atexit(ctgUpdateThreadUnexpectedStopped);
+#endif
qInfo("catalog update thread started");
CTG_LOCK(CTG_READ, &gCtgMgmt.lock);
@@ -1475,26 +1564,26 @@ void* ctgUpdateThreadFunc(void* param) {
break;
}
- SCtgMetaAction *action = NULL;
- ctgPopAction(&action);
- SCatalog *pCtg = ((SCtgUpdateMsgHeader *)action->data)->pCtg;
+ SCtgCacheOperation *operation = NULL;
+ ctgDequeue(&operation);
+ SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg;
- ctgDebug("process [%s] action", gCtgAction[action->act].name);
+ ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name);
- (*gCtgAction[action->act].func)(action);
+ (*gCtgCacheOperation[operation->opId].func)(operation);
- gCtgMgmt.queue.seqDone = action->seqId;
+ gCtgMgmt.queue.seqDone = operation->seqId;
- if (action->syncReq) {
+ if (operation->syncOp) {
tsem_post(&gCtgMgmt.queue.rspSem);
}
- CTG_RUNTIME_STAT_ADD(qDoneNum, 1);
+ CTG_RT_STAT_INC(qDoneNum, 1);
ctgdShowClusterCache(pCtg);
}
- CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
+ if (CTG_IS_LOCKED(&gCtgMgmt.lock)) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
qInfo("catalog update thread stopped");
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index 1d4ad0082c7e0736dc2ccad54609319e29e426f7..fdab50db0f65fd67d16d6f5b134f847dc0f882bc 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -21,6 +21,192 @@
extern SCatalogMgmt gCtgMgmt;
SCtgDebug gCTGDebug = {0};
+void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
+ ASSERT(*(int32_t*)param == 1);
+ taosMemoryFree(param);
+
+ qDebug("async call result: %s", tstrerror(code));
+ if (NULL == pResult) {
+ qDebug("empty meta result");
+ return;
+ }
+
+ int32_t num = 0;
+
+ if (pResult->pTableMeta && taosArrayGetSize(pResult->pTableMeta) > 0) {
+ num = taosArrayGetSize(pResult->pTableMeta);
+ for (int32_t i = 0; i < num; ++i) {
+ STableMeta *p = *(STableMeta **)taosArrayGet(pResult->pTableMeta, i);
+ STableComInfo *c = &p->tableInfo;
+
+ if (TSDB_CHILD_TABLE == p->tableType) {
+ qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, p->tableType, p->vgId, p->uid, p->suid);
+ } else {
+ qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
+ p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize);
+ }
+
+ int32_t colNum = c->numOfColumns + c->numOfTags;
+ for (int32_t j = 0; j < colNum; ++j) {
+ SSchema *s = &p->schema[j];
+ qDebug("[%d] name:%s, type:%d, colId:%d, bytes:%d", j, s->name, s->type, s->colId, s->bytes);
+ }
+ }
+ } else {
+ qDebug("empty table meta");
+ }
+
+ if (pResult->pDbVgroup && taosArrayGetSize(pResult->pDbVgroup) > 0) {
+ num = taosArrayGetSize(pResult->pDbVgroup);
+ for (int32_t i = 0; i < num; ++i) {
+ SArray *pDb = *(SArray**)taosArrayGet(pResult->pDbVgroup, i);
+ int32_t vgNum = taosArrayGetSize(pDb);
+ qDebug("db %d vgInfo:", i);
+ for (int32_t j = 0; j < vgNum; ++j) {
+ SVgroupInfo* pInfo = taosArrayGet(pDb, j);
+ qDebug("vg %d info: vgId:%d", j, pInfo->vgId);
+ }
+ }
+ } else {
+ qDebug("empty db vgroup");
+ }
+
+ if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) {
+ num = taosArrayGetSize(pResult->pDbInfo);
+ for (int32_t i = 0; i < num; ++i) {
+ SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i);
+ qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId);
+ }
+ } else {
+ qDebug("empty db info");
+ }
+
+ if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) {
+ num = taosArrayGetSize(pResult->pTableHash);
+ for (int32_t i = 0; i < num; ++i) {
+ SVgroupInfo* pInfo = taosArrayGet(pResult->pTableHash, i);
+ qDebug("table %d vg info: vgId:%d", i, pInfo->vgId);
+ }
+ } else {
+ qDebug("empty table hash vgroup");
+ }
+
+ if (pResult->pUdfList && taosArrayGetSize(pResult->pUdfList) > 0) {
+ num = taosArrayGetSize(pResult->pUdfList);
+ for (int32_t i = 0; i < num; ++i) {
+ SFuncInfo* pInfo = taosArrayGet(pResult->pUdfList, i);
+ qDebug("udf %d info: name:%s, funcType:%d", i, pInfo->name, pInfo->funcType);
+ }
+ } else {
+ qDebug("empty udf info");
+ }
+
+ if (pResult->pDbCfg && taosArrayGetSize(pResult->pDbCfg) > 0) {
+ num = taosArrayGetSize(pResult->pDbCfg);
+ for (int32_t i = 0; i < num; ++i) {
+ SDbCfgInfo* pInfo = taosArrayGet(pResult->pDbCfg, i);
+ qDebug("db %d info: numOFVgroups:%d, numOfStables:%d", i, pInfo->numOfVgroups, pInfo->numOfStables);
+ }
+ } else {
+ qDebug("empty db cfg info");
+ }
+
+ if (pResult->pUser && taosArrayGetSize(pResult->pUser) > 0) {
+ num = taosArrayGetSize(pResult->pUser);
+ for (int32_t i = 0; i < num; ++i) {
+ bool* auth = taosArrayGet(pResult->pUser, i);
+ qDebug("user auth %d info: %d", i, *auth);
+ }
+ } else {
+ qDebug("empty user auth info");
+ }
+
+ if (pResult->pQnodeList && taosArrayGetSize(pResult->pQnodeList) > 0) {
+ num = taosArrayGetSize(pResult->pQnodeList);
+ for (int32_t i = 0; i < num; ++i) {
+ SQueryNodeAddr* qaddr = taosArrayGet(pResult->pQnodeList, i);
+ qDebug("qnode %d info: id:%d", i, qaddr->nodeId);
+ }
+ } else {
+ qDebug("empty qnode info");
+ }
+}
+
+int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId) {
+ int32_t code = 0;
+ SCatalogReq req = {0};
+ req.pTableMeta = taosArrayInit(2, sizeof(SName));
+ req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN);
+ req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN);
+ req.pTableHash = taosArrayInit(2, sizeof(SName));
+ req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN);
+ req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN);
+ req.pIndex = NULL;//taosArrayInit(2, TSDB_INDEX_FNAME_LEN);
+ req.pUser = taosArrayInit(2, sizeof(SUserAuthInfo));
+ req.qNodeRequired = true;
+
+ SName name = {0};
+ char dbFName[TSDB_DB_FNAME_LEN] = {0};
+ char funcName[TSDB_FUNC_NAME_LEN] = {0};
+ SUserAuthInfo user = {0};
+
+ tNameFromString(&name, "1.db1.tb1", T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ taosArrayPush(req.pTableMeta, &name);
+ taosArrayPush(req.pTableHash, &name);
+ tNameFromString(&name, "1.db1.st1", T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ taosArrayPush(req.pTableMeta, &name);
+ taosArrayPush(req.pTableHash, &name);
+
+ strcpy(dbFName, "1.db1");
+ taosArrayPush(req.pDbVgroup, dbFName);
+ taosArrayPush(req.pDbCfg, dbFName);
+ taosArrayPush(req.pDbInfo, dbFName);
+ strcpy(dbFName, "1.db2");
+ taosArrayPush(req.pDbVgroup, dbFName);
+ taosArrayPush(req.pDbCfg, dbFName);
+ taosArrayPush(req.pDbInfo, dbFName);
+
+ strcpy(funcName, "udf1");
+ taosArrayPush(req.pUdf, funcName);
+ strcpy(funcName, "udf2");
+ taosArrayPush(req.pUdf, funcName);
+
+ strcpy(user.user, "root");
+ strcpy(user.dbFName, "1.db1");
+ user.type = AUTH_TYPE_READ;
+ taosArrayPush(req.pUser, &user);
+ user.type = AUTH_TYPE_WRITE;
+ taosArrayPush(req.pUser, &user);
+ user.type = AUTH_TYPE_OTHER;
+ taosArrayPush(req.pUser, &user);
+
+ strcpy(user.user, "user1");
+ strcpy(user.dbFName, "1.db2");
+ user.type = AUTH_TYPE_READ;
+ taosArrayPush(req.pUser, &user);
+ user.type = AUTH_TYPE_WRITE;
+ taosArrayPush(req.pUser, &user);
+ user.type = AUTH_TYPE_OTHER;
+ taosArrayPush(req.pUser, &user);
+
+ int32_t *param = taosMemoryCalloc(1, sizeof(int32_t));
+ *param = 1;
+
+ int64_t jobId = 0;
+ CTG_ERR_JRET(catalogAsyncGetAllMeta(pCtg, pTrans, pMgmtEps, reqId, &req, ctgdUserCallback, param, &jobId));
+
+_return:
+
+ taosArrayDestroy(req.pTableMeta);
+ taosArrayDestroy(req.pDbVgroup);
+ taosArrayDestroy(req.pTableHash);
+ taosArrayDestroy(req.pUdf);
+ taosArrayDestroy(req.pDbCfg);
+ taosArrayDestroy(req.pUser);
+
+ CTG_RET(code);
+}
+
int32_t ctgdEnableDebug(char *option) {
if (0 == strcasecmp(option, "lock")) {
gCTGDebug.lockEnable = true;
diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c
index 9e86b863f425fafa4403e0ea03841ae696753a03..b16a082f75ff54946bdb20ef8c25989e8f597ec0 100644
--- a/source/libs/catalog/src/ctgRemote.c
+++ b/source/libs/catalog/src/ctgRemote.c
@@ -264,17 +264,18 @@ int32_t ctgGetQnodeListFromMnode(CTG_PARAMS, SArray *out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_QNODE_LIST;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get qnode list from mnode, mgmtEpInUse:%d", pMgmtEps->inUse);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build qnode list msg failed, error:%s", tstrerror(code));
CTG_ERR_RET(code);
}
if (pTask) {
- void* pOut = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
+ void* pOut = taosArrayInit(4, sizeof(SQueryNodeLoad));
if (NULL == pOut) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
@@ -301,10 +302,11 @@ int32_t ctgGetDBVgInfoFromMnode(CTG_PARAMS, SBuildUseDBInput *input, SUseDbOutpu
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_USE_DB;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get db vgInfo from mnode, dbFName:%s", input->db);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](input, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](input, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build use db msg failed, code:%x, db:%s", code, input->db);
CTG_ERR_RET(code);
@@ -338,10 +340,11 @@ int32_t ctgGetDBCfgFromMnode(CTG_PARAMS, const char *dbFName, SDbCfgInfo *out, S
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_GET_DB_CFG;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get db cfg from mnode, dbFName:%s", dbFName);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)dbFName, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)dbFName, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build get db cfg msg failed, code:%x, db:%s", code, dbFName);
CTG_ERR_RET(code);
@@ -375,10 +378,11 @@ int32_t ctgGetIndexInfoFromMnode(CTG_PARAMS, const char *indexName, SIndexInfo *
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_GET_INDEX;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get index from mnode, indexName:%s", indexName);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)indexName, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)indexName, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build get index msg failed, code:%x, db:%s", code, indexName);
CTG_ERR_RET(code);
@@ -412,10 +416,11 @@ int32_t ctgGetUdfInfoFromMnode(CTG_PARAMS, const char *funcName, SFuncInfo *out,
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_RETRIEVE_FUNC;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get udf info from mnode, funcName:%s", funcName);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)funcName, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)funcName, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build get udf msg failed, code:%x, db:%s", code, funcName);
CTG_ERR_RET(code);
@@ -449,10 +454,11 @@ int32_t ctgGetUserDbAuthFromMnode(CTG_PARAMS, const char *user, SGetUserAuthRsp
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_GET_USER_AUTH;
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get user auth from mnode, user:%s", user);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)user, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)user, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build get user auth msg failed, code:%x, db:%s", code, user);
CTG_ERR_RET(code);
@@ -491,10 +497,11 @@ int32_t ctgGetTbMetaFromMnodeImpl(CTG_PARAMS, char *dbFName, char* tbName, STabl
int32_t reqType = TDMT_MND_TABLE_META;
char tbFName[TSDB_TABLE_FNAME_LEN];
sprintf(tbFName, "%s.%s", dbFName, tbName);
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get table meta from mnode, tbFName:%s", tbFName);
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build mnode stablemeta msg failed, code:%x", code);
CTG_ERR_RET(code);
@@ -537,6 +544,7 @@ int32_t ctgGetTbMetaFromVnode(CTG_PARAMS, const SName* pTableName, SVgroupInfo *
int32_t reqType = TDMT_VND_TABLE_META;
char tbFName[TSDB_TABLE_FNAME_LEN];
sprintf(tbFName, "%s.%s", dbFName, pTableName->tname);
+ void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get table meta from vnode, vgId:%d, tbFName:%s", vgroupInfo->vgId, tbFName);
@@ -544,7 +552,7 @@ int32_t ctgGetTbMetaFromVnode(CTG_PARAMS, const SName* pTableName, SVgroupInfo *
char *msg = NULL;
int32_t msgLen = 0;
- int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen);
+ int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build vnode tablemeta msg failed, code:%x, tbFName:%s", code, tbFName);
CTG_ERR_RET(code);
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index 2d7fb8aa97af13b8eaa589f0316882e3223e810e..4625203dd8d20a6a96af8ea8b748533d4b0a1534 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -35,13 +35,16 @@ void ctgFreeSMetaData(SMetaData* pData) {
taosArrayDestroy(pData->pUdfList);
pData->pUdfList = NULL;
-
+
for (int32_t i = 0; i < taosArrayGetSize(pData->pDbCfg); ++i) {
SDbCfgInfo* pInfo = taosArrayGet(pData->pDbCfg, i);
taosArrayDestroy(pInfo->pRetensions);
}
taosArrayDestroy(pData->pDbCfg);
pData->pDbCfg = NULL;
+
+ taosArrayDestroy(pData->pDbInfo);
+ pData->pDbInfo = NULL;
taosArrayDestroy(pData->pIndex);
pData->pIndex = NULL;
@@ -82,7 +85,7 @@ void ctgFreeTbMetaCache(SCtgTbMetaCache *cache) {
int32_t stblNum = taosHashGetSize(cache->stbCache);
taosHashCleanup(cache->stbCache);
cache->stbCache = NULL;
- CTG_CACHE_STAT_SUB(stblNum, stblNum);
+ CTG_CACHE_STAT_DEC(stblNum, stblNum);
}
CTG_UNLOCK(CTG_WRITE, &cache->stbLock);
@@ -91,7 +94,7 @@ void ctgFreeTbMetaCache(SCtgTbMetaCache *cache) {
int32_t tblNum = taosHashGetSize(cache->metaCache);
taosHashCleanup(cache->metaCache);
cache->metaCache = NULL;
- CTG_CACHE_STAT_SUB(tblNum, tblNum);
+ CTG_CACHE_STAT_DEC(tblNum, tblNum);
}
CTG_UNLOCK(CTG_WRITE, &cache->metaLock);
}
@@ -142,7 +145,7 @@ void ctgFreeHandle(SCatalog* pCtg) {
taosHashCleanup(pCtg->dbCache);
- CTG_CACHE_STAT_SUB(dbNum, dbNum);
+ CTG_CACHE_STAT_DEC(dbNum, dbNum);
}
if (pCtg->userCache) {
@@ -159,7 +162,7 @@ void ctgFreeHandle(SCatalog* pCtg) {
taosHashCleanup(pCtg->userCache);
- CTG_CACHE_STAT_SUB(userNum, userNum);
+ CTG_CACHE_STAT_DEC(userNum, userNum);
}
taosMemoryFree(pCtg);
@@ -167,12 +170,15 @@ void ctgFreeHandle(SCatalog* pCtg) {
void ctgFreeSUseDbOutput(SUseDbOutput* pOutput) {
- if (NULL == pOutput || NULL == pOutput->dbVgroup) {
+ if (NULL == pOutput) {
return;
}
- taosHashCleanup(pOutput->dbVgroup->vgHash);
- taosMemoryFreeClear(pOutput->dbVgroup);
+ if (pOutput->dbVgroup) {
+ taosHashCleanup(pOutput->dbVgroup->vgHash);
+ taosMemoryFreeClear(pOutput->dbVgroup);
+ }
+
taosMemoryFree(pOutput);
}
@@ -267,6 +273,7 @@ void ctgFreeTask(SCtgTask* pTask) {
switch (pTask->type) {
case CTG_TASK_GET_QNODE: {
taosArrayDestroy((SArray*)pTask->res);
+ taosMemoryFreeClear(pTask->taskCtx);
pTask->res = NULL;
break;
}
@@ -277,24 +284,30 @@ void ctgFreeTask(SCtgTask* pTask) {
ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut);
pTask->msgCtx.lastOut = NULL;
}
+ taosMemoryFreeClear(pTask->taskCtx);
taosMemoryFreeClear(pTask->res);
break;
}
case CTG_TASK_GET_DB_VGROUP: {
taosArrayDestroy((SArray*)pTask->res);
+ taosMemoryFreeClear(pTask->taskCtx);
pTask->res = NULL;
break;
}
case CTG_TASK_GET_DB_CFG: {
- if (pTask->res) {
- taosArrayDestroy(((SDbCfgInfo*)pTask->res)->pRetensions);
- taosMemoryFreeClear(pTask->res);
- }
+ taosMemoryFreeClear(pTask->taskCtx);
+ taosMemoryFreeClear(pTask->res);
+ break;
+ }
+ case CTG_TASK_GET_DB_INFO: {
+ taosMemoryFreeClear(pTask->taskCtx);
+ taosMemoryFreeClear(pTask->res);
break;
}
case CTG_TASK_GET_TB_HASH: {
SCtgTbHashCtx* taskCtx = (SCtgTbHashCtx*)pTask->taskCtx;
taosMemoryFreeClear(taskCtx->pName);
+ taosMemoryFreeClear(pTask->taskCtx);
taosMemoryFreeClear(pTask->res);
break;
}
diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp
index 6c7d1ac4ca554e69b92bcde3e4c64f20a46d0dcb..e4ae2c004f412e356feba406fca07f1c83863abe 100644
--- a/source/libs/catalog/test/catalogTests.cpp
+++ b/source/libs/catalog/test/catalogTests.cpp
@@ -41,7 +41,6 @@
namespace {
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type);
-extern "C" int32_t ctgActUpdateTb(SCtgMetaAction *action);
extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);
@@ -138,7 +137,7 @@ void ctgTestInitLogFile() {
tsAsyncLog = 0;
qDebugFlag = 159;
- strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsLogDir, TD_LOG_DIR_PATH);
ctgdEnableDebug("api");
ctgdEnableDebug("meta");
@@ -888,9 +887,9 @@ void *ctgTestSetCtableMetaThread(void *param) {
int32_t n = 0;
STableMetaOutput *output = NULL;
- SCtgMetaAction action = {0};
+ SCtgCacheOperation operation = {0};
- action.act = CTG_ACT_UPDATE_TBL;
+ operation.opId = CTG_OP_UPDATE_TB_META;
while (!ctgTestStop) {
output = (STableMetaOutput *)taosMemoryMalloc(sizeof(STableMetaOutput));
@@ -899,9 +898,9 @@ void *ctgTestSetCtableMetaThread(void *param) {
SCtgUpdateTblMsg *msg = (SCtgUpdateTblMsg *)taosMemoryMalloc(sizeof(SCtgUpdateTblMsg));
msg->pCtg = pCtg;
msg->output = output;
- action.data = msg;
+ operation.data = msg;
- code = ctgActUpdateTb(&action);
+ code = ctgOpUpdateTbMeta(&operation);
if (code) {
assert(0);
}
@@ -1381,7 +1380,7 @@ TEST(tableMeta, updateStbMeta) {
STableMetaRsp rsp = {0};
ctgTestBuildSTableMetaRsp(&rsp);
- code = catalogUpdateSTableMeta(pCtg, &rsp);
+ code = catalogUpdateTableMeta(pCtg, &rsp);
ASSERT_EQ(code, 0);
taosMemoryFreeClear(rsp.pSchemas);
diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h
index 775dee28a4e2528cbf8509cb5955d567b658a5dd..100e35bc3c61015c1c109adef95851de73d1e3a0 100644
--- a/source/libs/command/inc/commandInt.h
+++ b/source/libs/command/inc/commandInt.h
@@ -36,6 +36,8 @@ extern "C" {
#define EXPLAIN_SORT_FORMAT "Sort"
#define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s"
#define EXPLAIN_SESSION_FORMAT "Session"
+#define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s"
+#define EXPLAIN_PARITION_FORMAT "Partition on Column %s"
#define EXPLAIN_ORDER_FORMAT "Order: %s"
#define EXPLAIN_FILTER_FORMAT "Filter: "
#define EXPLAIN_FILL_FORMAT "Fill: %s"
@@ -60,7 +62,7 @@ extern "C" {
#define EXPLAIN_GROUPS_FORMAT "groups=%d"
#define EXPLAIN_WIDTH_FORMAT "width=%d"
#define EXPLAIN_FUNCTIONS_FORMAT "functions=%d"
-#define EXPLAIN_EXECINFO_FORMAT "cost=%" PRIu64 "..%" PRIu64 " rows=%" PRIu64
+#define EXPLAIN_EXECINFO_FORMAT "cost=%.3f..%.3f rows=%" PRIu64
typedef struct SExplainGroup {
int32_t nodeNum;
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 2e94ec8d0c3a79f7068580e95f30373aeff6ac5f..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -16,6 +16,7 @@
#include "commandInt.h"
#include "plannodes.h"
#include "query.h"
+#include "tcommon.h"
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level);
@@ -162,6 +163,16 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
pPhysiChildren = pSessNode->window.node.pChildren;
break;
}
+ case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: {
+ SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*) pNode;
+ pPhysiChildren = pStateNode->window.node.pChildren;
+ break;
+ }
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ SPartitionPhysiNode* partitionPhysiNode = (SPartitionPhysiNode*) pNode;
+ pPhysiChildren = partitionPhysiNode->node.pChildren;
+ break;
+ }
default:
qError("not supported physical node type %d", pNode->type);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
@@ -339,7 +350,6 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize);
- EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@@ -381,6 +391,35 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+ // basic analyze output
+ if (EXPLAIN_MODE_ANALYZE == ctx->mode) {
+ EXPLAIN_ROW_NEW(level + 1, "I/O: ");
+
+ int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo);
+ for (int32_t i = 0; i < nodeNum; ++i) {
+ SExplainExecInfo * execInfo = taosArrayGet(pResNode->pExecInfo, i);
+ STableScanAnalyzeInfo *pScanInfo = (STableScanAnalyzeInfo *)execInfo->verboseInfo;
+
+ EXPLAIN_ROW_APPEND("total_blocks=%d", pScanInfo->totalBlocks);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+
+ EXPLAIN_ROW_APPEND("load_blocks=%d", pScanInfo->loadBlocks);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+
+ EXPLAIN_ROW_APPEND("load_block_SMAs=%d", pScanInfo->loadBlockStatis);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+
+ EXPLAIN_ROW_APPEND("total_rows=%" PRIu64, pScanInfo->totalRows);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+
+ EXPLAIN_ROW_APPEND("check_rows=%" PRIu64, pScanInfo->totalCheckedRows);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ }
+
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+ }
+
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
@@ -390,8 +429,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
- EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pTblScanNode->scanRange.skey,
- pTblScanNode->scanRange.ekey);
+ EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pTblScanNode->scanRange.skey, pTblScanNode->scanRange.ekey);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
@@ -522,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
- EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
- EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ if (pAggNode->pAggFuncs) {
+ EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ }
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize);
if (pAggNode->pGroupKeys) {
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
@@ -600,13 +640,48 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
- EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pSortNode->pSortKeys->length);
+
+ SDataBlockDescNode* pDescNode = pSortNode->node.pOutputDataBlockDesc;
+ EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
- EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->totalRowSize);
+ EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize);
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+ if (EXPLAIN_MODE_ANALYZE == ctx->mode) {
+ // sort key
+ EXPLAIN_ROW_NEW(level, "Sort Key: ");
+ if (pResNode->pExecInfo) {
+ for (int32_t i = 0; i < LIST_LENGTH(pSortNode->pSortKeys); ++i) {
+ SOrderByExprNode *ptn = nodesListGetNode(pSortNode->pSortKeys, i);
+ EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr));
+ }
+ }
+
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+
+ // sort method
+ EXPLAIN_ROW_NEW(level, "Sort Method: ");
+
+ int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo);
+ SExplainExecInfo *execInfo = taosArrayGet(pResNode->pExecInfo, 0);
+ SSortExecInfo * pExecInfo = (SSortExecInfo *)execInfo->verboseInfo;
+ EXPLAIN_ROW_APPEND("%s", pExecInfo->sortMethod == SORT_QSORT_T ? "quicksort" : "merge sort");
+ if (pExecInfo->sortBuffer > 1024 * 1024) {
+ EXPLAIN_ROW_APPEND(" Buffers:%.2f Mb", pExecInfo->sortBuffer / (1024 * 1024.0));
+ } else if (pExecInfo->sortBuffer > 1024) {
+ EXPLAIN_ROW_APPEND(" Buffers:%.2f Kb", pExecInfo->sortBuffer / (1024.0));
+ } else {
+ EXPLAIN_ROW_APPEND(" Buffers:%d b", pExecInfo->sortBuffer);
+ }
+
+ EXPLAIN_ROW_APPEND(" loops:%d", pExecInfo->loops);
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+ }
+
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
@@ -637,6 +712,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
+ EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@@ -705,6 +781,80 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
break;
}
+ case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: {
+ SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode;
+
+ EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT, nodesGetNameFromColumnNode(((STargetNode*)pStateNode->pStateKey)->pExpr));
+ EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
+ if (pResNode->pExecInfo) {
+ QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ }
+
+ EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pStateNode->window.pFuncs->length);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->totalRowSize);
+ EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+
+ if (verbose) {
+ EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
+ nodesGetOutputNumFromSlotList(pStateNode->window.node.pOutputDataBlockDesc->pSlots));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->outputRowSize);
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+
+ if (pStateNode->window.node.pConditions) {
+ EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
+ QRY_ERR_RET(nodesNodeToSQL(pStateNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE,
+ TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+ }
+ }
+ break;
+ }
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ SPartitionPhysiNode *pPartNode = (SPartitionPhysiNode *)pNode;
+
+ SNode* p = nodesListGetNode(pPartNode->pPartitionKeys, 0);
+ EXPLAIN_ROW_NEW(level, EXPLAIN_PARITION_FORMAT, nodesGetNameFromColumnNode(p));
+ EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
+ if (pResNode->pExecInfo) {
+ QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ }
+ EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->totalRowSize);
+
+ EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
+
+ if (verbose) {
+ EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
+ nodesGetOutputNumFromSlotList(pPartNode->node.pOutputDataBlockDesc->pSlots));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->outputRowSize);
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+
+ if (pPartNode->node.pConditions) {
+ EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
+ QRY_ERR_RET(nodesNodeToSQL(pPartNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
+ TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
+ EXPLAIN_ROW_END();
+ QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
+ }
+ }
+ break;
+ }
default:
qError("not supported physical node type %d", pNode->type);
return TSDB_CODE_QRY_APP_ERROR;
diff --git a/source/libs/executor/inc/dataSinkInt.h b/source/libs/executor/inc/dataSinkInt.h
index 85356a862ce282ac53aaad4ee72f0a77b19f115c..8f49440105c813b512835717e861d3da1b2065df 100644
--- a/source/libs/executor/inc/dataSinkInt.h
+++ b/source/libs/executor/inc/dataSinkInt.h
@@ -37,6 +37,7 @@ typedef void (*FEndPut)(struct SDataSinkHandle* pHandle, uint64_t useconds);
typedef void (*FGetDataLength)(struct SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryEnd);
typedef int32_t (*FGetDataBlock)(struct SDataSinkHandle* pHandle, SOutputData* pOutput);
typedef int32_t (*FDestroyDataSinker)(struct SDataSinkHandle* pHandle);
+typedef int32_t (*FGetCacheSize)(struct SDataSinkHandle* pHandle, uint64_t* size);
typedef struct SDataSinkHandle {
FPutDataBlock fPut;
@@ -44,6 +45,7 @@ typedef struct SDataSinkHandle {
FGetDataLength fGetLen;
FGetDataBlock fGetData;
FDestroyDataSinker fDestroy;
+ FGetCacheSize fGetCacheSize;
} SDataSinkHandle;
int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle);
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index 63c398618f38446260124978a803b2a63c6f0688..b8975854c9446eab43cd4a7d8c3ccb6e38b93016 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -75,15 +75,15 @@ typedef struct SResultRowInfo {
int32_t size; // number of result set
int32_t capacity; // max capacity
SResultRowPosition cur;
+ SList* openWindow;
} SResultRowInfo;
struct SqlFunctionCtx;
-size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
+size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size);
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
-int32_t numOfClosedResultRows(SResultRowInfo* pResultRowInfo);
void closeAllResultRows(SResultRowInfo* pResultRowInfo);
void initResultRow(SResultRow *pResultRow);
@@ -92,15 +92,6 @@ bool isResultRowClosed(SResultRow* pResultRow);
struct SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset);
-static FORCE_INLINE SResultRow *getResultRow(SDiskbasedBuf* pBuf, SResultRowInfo *pResultRowInfo, int32_t slot) {
- ASSERT(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size);
- SResultRowPosition* pos = &pResultRowInfo->pPosition[slot];
-
- SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId);
- SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset);
- return pRow;
-}
-
static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) {
SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId);
SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 0dacbba8e590b010405571727775cdcda983629c..e7a3390cf3114b07be4439b23f346e11ded0f78f 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -49,7 +49,7 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u)
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
-#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
+//#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
@@ -86,49 +86,16 @@ typedef struct STableQueryInfo {
// SVariant tag;
} STableQueryInfo;
-typedef enum {
- QUERY_PROF_BEFORE_OPERATOR_EXEC = 0,
- QUERY_PROF_AFTER_OPERATOR_EXEC,
- QUERY_PROF_QUERY_ABORT
-} EQueryProfEventType;
-
-typedef struct {
- EQueryProfEventType eventType;
- int64_t eventTime;
-
- union {
- uint8_t operatorType; // for operator event
- int32_t abortCode; // for query abort event
- };
-} SQueryProfEvent;
-
-typedef struct {
- uint8_t operatorType;
- int64_t sumSelfTime;
- int64_t sumRunTimes;
-} SOperatorProfResult;
-
typedef struct SLimit {
int64_t limit;
int64_t offset;
} SLimit;
-typedef struct SFileBlockLoadRecorder {
- uint64_t totalRows;
- uint64_t totalCheckedRows;
- uint32_t totalBlocks;
- uint32_t loadBlocks;
- uint32_t loadBlockStatis;
- uint32_t skipBlocks;
- uint32_t filterOutBlocks;
- uint64_t elapsedTime;
-} SFileBlockLoadRecorder;
+typedef struct STableScanAnalyzeInfo SFileBlockLoadRecorder;
typedef struct STaskCostInfo {
- int64_t created;
- int64_t start;
- int64_t end;
-
+ int64_t created;
+ int64_t start;
uint64_t loadStatisTime;
uint64_t loadFileBlockTime;
uint64_t loadDataInCacheTime;
@@ -152,8 +119,8 @@ typedef struct STaskCostInfo {
} STaskCostInfo;
typedef struct SOperatorCostInfo {
- uint64_t openCost;
- uint64_t totalCost;
+ double openCost;
+ double totalCost;
} SOperatorCostInfo;
// The basic query information extracted from the SQueryInfo tree to support the
@@ -184,23 +151,21 @@ typedef struct STaskAttr {
int32_t numOfFilterCols;
int64_t* fillVal;
void* tsdb;
- STableGroupInfo tableGroupInfo; // table list SArray
+// STableListInfo tableGroupInfo; // table list
int32_t vgId;
} STaskAttr;
struct SOperatorInfo;
-struct SAggSupporter;
-struct SOptrBasicInfo;
+//struct SAggSupporter;
+//struct SOptrBasicInfo;
-typedef void (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup,
- struct SOptrBasicInfo* pInfo, char** result, int32_t* length);
-typedef bool (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup,
- struct SOptrBasicInfo* pInfo, char* result, int32_t length);
+typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length);
+typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result);
typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
typedef void (*__optr_close_fn_t)(void* param, int32_t num);
-typedef int32_t (*__optr_get_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain);
+typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
typedef struct STaskIdInfo {
uint64_t queryId; // this is also a request id
@@ -216,7 +181,7 @@ typedef struct SExecTaskInfo {
STaskCostInfo cost;
int64_t owner; // if it is in execution
int32_t code;
- uint64_t totalRows; // total number of rows
+// uint64_t totalRows; // total number of rows
struct {
char *tablename;
char *dbname;
@@ -224,7 +189,7 @@ typedef struct SExecTaskInfo {
int32_t tversion;
} schemaVer;
- STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure
+ STableListInfo tableqinfoList; // this is a table list
char* sql; // query sql string
jmp_buf env; // jump to this position when error happens.
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
@@ -246,7 +211,7 @@ typedef struct STaskRuntimeEnv {
STSCursor cur;
char* tagVal; // tag value of current data block
- STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure
+// STableGroupInfo tableqinfoGroupInfo; // this is a table list
struct SOperatorInfo* proot;
SGroupResInfo groupResInfo;
int64_t currentOffset; // dynamic offset value
@@ -264,14 +229,14 @@ enum {
};
typedef struct SOperatorFpSet {
- __optr_open_fn_t _openFn; // DO NOT invoke this function directly
- __optr_fn_t getNextFn;
- __optr_fn_t getStreamResFn; // execute the aggregate in the stream model, todo remove it
- __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP
- __optr_close_fn_t closeFn;
- __optr_encode_fn_t encodeResultRow;
- __optr_decode_fn_t decodeResultRow;
- __optr_get_explain_fn_t getExplainFn;
+ __optr_open_fn_t _openFn; // DO NOT invoke this function directly
+ __optr_fn_t getNextFn;
+ __optr_fn_t getStreamResFn; // execute the aggregate in the stream model, todo remove it
+ __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP
+ __optr_close_fn_t closeFn;
+ __optr_encode_fn_t encodeResultRow;
+ __optr_decode_fn_t decodeResultRow;
+ __optr_explain_fn_t getExplainFn;
} SOperatorFpSet;
typedef struct SOperatorInfo {
@@ -367,6 +332,8 @@ typedef struct STableScanInfo {
int32_t dataBlockLoadFlag;
double sampleRatio; // data block sample ratio, 1 by default
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
+
+ int32_t curTWinIdx;
} STableScanInfo;
typedef struct STagScanInfo {
@@ -375,7 +342,7 @@ typedef struct STagScanInfo {
SArray *pColMatchInfo;
int32_t curPos;
SReadHandle readHandle;
- STableGroupInfo *pTableGroups;
+ STableListInfo *pTableList;
} STagScanInfo;
typedef enum EStreamScanMode {
@@ -392,40 +359,56 @@ typedef struct SCatchSupporter {
int64_t* pKeyBuf;
} SCatchSupporter;
+typedef struct SStreamAggSupporter {
+ SArray* pResultRows; // SResultWindowInfo
+ int32_t keySize;
+ char* pKeyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+} SStreamAggSupporter;
+
+typedef struct SessionWindowSupporter {
+ SStreamAggSupporter* pStreamAggSup;
+ int64_t gap;
+} SessionWindowSupporter;
+
typedef struct SStreamBlockScanInfo {
- SArray* pBlockLists; // multiple SSDatablock.
- SSDataBlock* pRes; // result SSDataBlock
- SSDataBlock* pUpdateRes; // update SSDataBlock
- int32_t updateResIndex;
- int32_t blockType; // current block type
- int32_t validBlockIndex; // Is current data has returned?
- SColumnInfo* pCols; // the output column info
- uint64_t numOfRows; // total scanned rows
- uint64_t numOfExec; // execution times
- void* streamBlockReader;// stream block reader handle
- SArray* pColMatchInfo; //
- SNode* pCondition;
- SArray* tsArray;
- SUpdateInfo* pUpdateInfo;
- int32_t primaryTsIndex; // primary time stamp slot id
- void* pDataReader;
- SReadHandle readHandle;
- uint64_t tableUid; // queried super table uid
+ SArray* pBlockLists; // multiple SSDatablock.
+ SSDataBlock* pRes; // result SSDataBlock
+ SSDataBlock* pUpdateRes; // update SSDataBlock
+ int32_t updateResIndex;
+ int32_t blockType; // current block type
+ int32_t validBlockIndex; // Is current data has returned?
+ SColumnInfo* pCols; // the output column info
+ uint64_t numOfExec; // execution times
+ void* streamBlockReader;// stream block reader handle
+ SArray* pColMatchInfo; //
+ SNode* pCondition;
+ SArray* tsArray;
+ SUpdateInfo* pUpdateInfo;
+
+ SExprInfo* pPseudoExpr;
+ int32_t numOfPseudoExpr;
+
+ int32_t primaryTsIndex; // primary time stamp slot id
+ void* pDataReader;
+ SReadHandle readHandle;
+ uint64_t tableUid; // queried super table uid
EStreamScanMode scanMode;
SOperatorInfo* pOperatorDumy;
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
- SCatchSupporter childAggSup;
- SArray* childIds;
+ SArray* childIds;
+ SessionWindowSupporter sessionSup;
+ bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
} SStreamBlockScanInfo;
typedef struct SSysTableScanInfo {
- SReadHandle readHandle;
-
SRetrieveMetaTableRsp* pRsp;
SRetrieveTableReq req;
SEpSet epSet;
tsem_t ready;
+ SReadHandle readHandle;
int32_t accountId;
bool showRewrite;
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
@@ -455,40 +438,50 @@ typedef struct SAggSupporter {
typedef struct STimeWindowSupp {
int8_t calTrigger;
int64_t waterMark;
+ TSKEY maxTs;
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
+ SHashObj *winMap;
} STimeWindowAggSupp;
typedef struct SIntervalAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
STimeWindow win; // query time range
bool timeWindowInterpo; // interpolation needed or not
char** pRow; // previous row/tuple of already processed datablock
- SAggSupporter aggSup; // aggregate supporter
+ SArray* pInterpCols; // interpolation columns
STableQueryInfo* pCurrent; // current tableQueryInfo struct
int32_t order; // current SSDataBlock scan order
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
SArray* pUpdatedWindow; // updated time window due to the input data block from the downstream operator.
STimeWindowAggSupp twAggSup;
- struct SFillInfo* pFillInfo; // fill info
bool invertible;
+ SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
} SIntervalAggOperatorInfo;
typedef struct SStreamFinalIntervalOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- SAggSupporter aggSup; // aggregate supporter
int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
+ SArray* pChildren;
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
STableQueryInfo *current;
uint64_t groupId;
SGroupResInfo groupResInfo;
@@ -501,8 +494,10 @@ typedef struct SAggOperatorInfo {
} SAggOperatorInfo;
typedef struct SProjectOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SSDataBlock* existDataBlock;
SArray* pPseudoColInfo;
SLimit limit;
@@ -526,7 +521,10 @@ typedef struct SFillOperatorInfo {
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
SArray* pGroupCols; // group by columns, SArray
SArray* pGroupColVals; // current group column values, SArray
SNode* pCondition;
@@ -534,7 +532,6 @@ typedef struct SGroupbyOperatorInfo {
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
SGroupResInfo groupResInfo;
- SAggSupporter aggSup;
SExprInfo* pScalarExprInfo;
int32_t numOfScalarExpr; // the number of scalar expression in group operator
SqlFunctionCtx* pScalarFuncCtx;
@@ -571,8 +568,10 @@ typedef struct SWindowRowsSup {
} SWindowRowsSup;
typedef struct SSessionAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
bool reptScan; // next round scan
@@ -581,6 +580,29 @@ typedef struct SSessionAggOperatorInfo {
STimeWindowAggSupp twAggSup;
} SSessionAggOperatorInfo;
+typedef struct SResultWindowInfo {
+ SResultRowPosition pos;
+ STimeWindow win;
+ bool isOutput;
+ bool isClosed;
+} SResultWindowInfo;
+
+typedef struct SStreamSessionAggOperatorInfo {
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SGroupResInfo groupResInfo;
+ int64_t gap; // session window gap
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pWinBlock; // window result
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSDataBlock* pDelRes;
+ SHashObj* pStDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result;
+} SStreamSessionAggOperatorInfo;
+
typedef struct STimeSliceOperatorInfo {
SOptrBasicInfo binfo;
SInterval interval;
@@ -588,8 +610,10 @@ typedef struct STimeSliceOperatorInfo {
} STimeSliceOperatorInfo;
typedef struct SStateWindowOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
SColumn stateCol; // start row index
@@ -601,8 +625,10 @@ typedef struct SStateWindowOperatorInfo {
} SStateWindowOperatorInfo;
typedef struct SSortedMergeOperatorInfo {
-
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
SArray* pSortInfo;
int32_t numOfSources;
SSortHandle *pSortHandle;
@@ -614,23 +640,18 @@ typedef struct SSortedMergeOperatorInfo {
int32_t numOfResPerPage;
char** groupVal;
SArray *groupInfo;
- SAggSupporter aggSup;
} SSortedMergeOperatorInfo;
typedef struct SSortOperatorInfo {
SOptrBasicInfo binfo;
- uint32_t sortBufSize; // max buffer size for in-memory sort
+ uint32_t sortBufSize; // max buffer size for in-memory sort
SArray* pSortInfo;
SSortHandle* pSortHandle;
SArray* pColMatchInfo; // for index map from table scan output
int32_t bufPageSize;
- // TODO extact struct
- int64_t startTs; // sort start time
- uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
- uint64_t totalSize; // total load bytes from remote
- uint64_t totalRows; // total number of rows
- uint64_t totalElapsed; // total elapsed time
+ int64_t startTs; // sort start time
+ uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
} SSortOperatorInfo;
typedef struct STagFilterOperatorInfo {
@@ -656,7 +677,7 @@ typedef struct SJoinOperatorInfo {
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn,
__optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
- __optr_decode_fn_t decode, __optr_get_explain_fn_t explain);
+ __optr_decode_fn_t decode, __optr_explain_fn_t explain);
int32_t operatorDummyOpenFn(SOperatorInfo* pOperator);
void operatorDummyCloseFn(void* param, int32_t numOfCols);
@@ -676,6 +697,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
SArray* pColList);
void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
+int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo);
@@ -708,7 +730,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo,
- int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo);
+ int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SExecTaskInfo* pTaskInfo);
SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
@@ -721,26 +743,25 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* pSysTableReadHandle, SSDataB
SExecTaskInfo* pTaskInfo, bool showRewrite, int32_t accountId);
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
+ STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
+ STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
-
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
+ STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition,
- SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo,
- const STableGroupInfo* pTableGroupInfo);
+ SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle,
- uint64_t uid, SSDataBlock* pResBlock, SArray* pColList,
- SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition,
- SOperatorInfo* pOperatorDumy);
+
+SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
+ SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
+ STimeWindowAggSupp* pTwSup);
+
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal,
@@ -749,15 +770,17 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo,
- const STableGroupInfo* pTableGroupInfo);
+ SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableListInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
+ SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap,
+ int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo);
#if 0
SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
#endif
@@ -769,40 +792,57 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo
void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput);
-STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win);
-
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
int32_t checkForQueryBuf(size_t numOfTables);
void setTaskKilled(SExecTaskInfo* pTaskInfo);
-
-void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType);
-void publishQueryAbortEvent(SExecTaskInfo* pTaskInfo, int32_t code);
-
void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
int32_t getMaximumIdleDurationSec();
+/*
+ * ops: root operator
+ * data: *data save the result of encode, need to be freed by caller
+ * length: *length save the length of *data
+ * return: result code, 0 means success
+ */
+int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length);
+
+/*
+ * ops: root operator, created by caller
+ * data: save the result of decode
+ * length: the length of data
+ * return: result code, 0 means success
+ */
+int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length);
+
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
EOPTR_EXEC_MODEL model);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
int32_t* resNum);
-bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result,
- int32_t length);
-void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result,
- int32_t* length);
+int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
+int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);
+
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts,
SInterval* pInterval, int32_t precision, STimeWindow* win);
-int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos,
- TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item,
- int32_t order);
+int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn,
+ int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item,
+ int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
-int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, size_t keyBufSize,
- const char* pKey, const char* pDir);
-
+int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
+SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap,
+ int32_t* pIndex);
+int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows,
+ int32_t start, int64_t gap, SHashObj* pStDeleted);
+bool functionNeedToExecute(SqlFunctionCtx* pCtx);
+int64_t getSmaWaterMark(int64_t interval, double filesFactor);
+bool isSmaStream(int8_t triggerType);
+
+int32_t compareTimeWindow(const void* p1, const void* p2, const void* param);
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h
index d74628a72fb4723d1837a0547574da414253bef6..c8b1b3ee513bc508de5187c8d39ace4ae5e4b7f8 100644
--- a/source/libs/executor/inc/tsort.h
+++ b/source/libs/executor/inc/tsort.h
@@ -137,6 +137,14 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
*/
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle);
+/**
+ * return the sort execution information.
+ *
+ * @param pHandle
+ * @return
+ */
+SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c
index fa9e27a5f810268f057a53d10b4d946dbd6825ea..080cf5c2ad44f31f11f0fce0e2350fe121c2c1fb 100644
--- a/source/libs/executor/src/dataDispatcher.c
+++ b/source/libs/executor/src/dataDispatcher.c
@@ -22,6 +22,8 @@
#include "tglobal.h"
#include "tqueue.h"
+extern SDataSinkStat gDataSinkStat;
+
typedef struct SDataDispatchBuf {
int32_t useSize;
int32_t allocSize;
@@ -45,6 +47,7 @@ typedef struct SDataDispatchHandle {
int32_t status;
bool queryEnd;
uint64_t useconds;
+ uint64_t cachedSize;
TdThreadMutex mutex;
} SDataDispatchHandle;
@@ -71,7 +74,7 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) {
// +----------------+--------------+----------+--------------------------------------+-------------+-----------+-------------+-----------+
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
// recorded in the first segment, next to the struct header
-static void toDataCacheEntry(const SDataDispatchHandle* pHandle, const SInputData* pInput, SDataDispatchBuf* pBuf) {
+static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pInput, SDataDispatchBuf* pBuf) {
int32_t numOfCols = LIST_LENGTH(pHandle->pSchema->pSlots);
SDataCacheEntry* pEntry = (SDataCacheEntry*)pBuf->pData;
@@ -84,6 +87,9 @@ static void toDataCacheEntry(const SDataDispatchHandle* pHandle, const SInputDat
blockCompressEncode(pInput->pData, pEntry->data, &pEntry->dataLen, numOfCols, pEntry->compressed);
pBuf->useSize += pEntry->dataLen;
+
+ atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen);
+ atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
}
static bool allocBuf(SDataDispatchHandle* pDispatcher, const SInputData* pInput, SDataDispatchBuf* pBuf) {
@@ -156,6 +162,7 @@ static void getDataLength(SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryE
taosFreeQitem(pBuf);
*pLen = ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->dataLen;
*pQueryEnd = pDispatcher->queryEnd;
+ qDebug("got data len %d, row num %d in sink", *pLen, ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->numOfRows);
}
static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
@@ -173,6 +180,10 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
pOutput->numOfRows = pEntry->numOfRows;
pOutput->numOfCols = pEntry->numOfCols;
pOutput->compressed = pEntry->compressed;
+
+ atomic_sub_fetch_64(&pDispatcher->cachedSize, pEntry->dataLen);
+ atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
+
taosMemoryFreeClear(pDispatcher->nextOutput.pData); // todo persistent
pOutput->bufStatus = updateStatus(pDispatcher);
taosThreadMutexLock(&pDispatcher->mutex);
@@ -180,11 +191,14 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
pOutput->useconds = pDispatcher->useconds;
pOutput->precision = pDispatcher->pSchema->precision;
taosThreadMutexUnlock(&pDispatcher->mutex);
+
+
return TSDB_CODE_SUCCESS;
}
static int32_t destroyDataSinker(SDataSinkHandle* pHandle) {
SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle;
+ atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pDispatcher->cachedSize);
taosMemoryFreeClear(pDispatcher->nextOutput.pData);
while (!taosQueueEmpty(pDispatcher->pDataBlocks)) {
SDataDispatchBuf* pBuf = NULL;
@@ -197,6 +211,13 @@ static int32_t destroyDataSinker(SDataSinkHandle* pHandle) {
return TSDB_CODE_SUCCESS;
}
+int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) {
+ SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle;
+
+ *size = atomic_load_64(&pDispatcher->cachedSize);
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) {
SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle));
if (NULL == dispatcher) {
@@ -208,6 +229,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD
dispatcher->sink.fGetLen = getDataLength;
dispatcher->sink.fGetData = getDataBlock;
dispatcher->sink.fDestroy = destroyDataSinker;
+ dispatcher->sink.fGetCacheSize = getCacheSize;
dispatcher->pManager = pManager;
dispatcher->pSchema = pDataSink->pInputDataBlockDesc;
dispatcher->status = DS_BUF_EMPTY;
diff --git a/source/libs/executor/src/dataSinkMgt.c b/source/libs/executor/src/dataSinkMgt.c
index 64206fc10aac0ab9835d65333322657a0ccaecbf..9016ca274a3567d8cbc45d522d5e1cb93b176e68 100644
--- a/source/libs/executor/src/dataSinkMgt.c
+++ b/source/libs/executor/src/dataSinkMgt.c
@@ -19,6 +19,7 @@
#include "planner.h"
static SDataSinkManager gDataSinkManager = {0};
+SDataSinkStat gDataSinkStat = {0};
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg *cfg) {
gDataSinkManager.cfg = *cfg;
@@ -26,6 +27,13 @@ int32_t dsDataSinkMgtInit(SDataSinkMgtCfg *cfg) {
return 0; // to avoid compiler eror
}
+int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat) {
+ pStat->cachedSize = atomic_load_64(&gDataSinkStat.cachedSize);
+
+ return 0;
+}
+
+
int32_t dsCreateDataSinker(const SDataSinkNode *pDataSink, DataSinkHandle* pHandle) {
if (QUERY_NODE_PHYSICAL_PLAN_DISPATCH == nodeType(pDataSink)) {
return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle);
@@ -53,6 +61,12 @@ int32_t dsGetDataBlock(DataSinkHandle handle, SOutputData* pOutput) {
return pHandleImpl->fGetData(pHandleImpl, pOutput);
}
+int32_t dsGetCacheSize(DataSinkHandle handle, uint64_t *pSize) {
+ SDataSinkHandle* pHandleImpl = (SDataSinkHandle*)handle;
+ return pHandleImpl->fGetCacheSize(pHandleImpl, pSize);
+}
+
+
void dsScheduleProcess(void* ahandle, void* pItem) {
// todo
}
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 5a02547f58aa4cf73c5297dda771ba0900bce141..1c45e38b632d29340472c1955d2b097377478ce0 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -101,20 +101,8 @@ void resetResultRowInfo(STaskRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRow
pResultRowInfo->size = 0;
}
-int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) {
- int32_t i = 0;
-// while (i < pResultRowInfo->size && pResultRowInfo->pResult[i]->closed) {
-// ++i;
-// }
-
- return i;
-}
-
void closeAllResultRows(SResultRowInfo *pResultRowInfo) {
- assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size);
-
- for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
- }
+// do nothing
}
bool isResultRowClosed(SResultRow* pRow) {
@@ -233,7 +221,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) {
if (pGroupResInfo->pRows != NULL) {
- taosArrayDestroy(pGroupResInfo->pRows);
+ taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree);
}
pGroupResInfo->pRows = pArrayList;
@@ -258,32 +246,6 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
return (int32_t) taosArrayGetSize(pGroupResInfo->pRows);
}
-static int64_t getNumOfResultWindowRes(STaskRuntimeEnv* pRuntimeEnv, SResultRowPosition *pos, int32_t* rowCellInfoOffset) {
- STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
- ASSERT(0);
-
- for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) {
- int32_t functionId = 0;//pQueryAttr->pExpr1[j].base.functionId;
-
- /*
- * ts, tag, tagprj function can not decide the output number of current query
- * the number of output result is decided by main output
- */
- if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) {
- continue;
- }
-
-// SResultRowEntryInfo *pResultInfo = getResultCell(pResultRow, j, rowCellInfoOffset);
-// assert(pResultInfo != NULL);
-//
-// if (pResultInfo->numOfRes > 0) {
-// return pResultInfo->numOfRes;
-// }
- }
-
- return 0;
-}
-
static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) {
int32_t left = *(int32_t *)pLeft;
int32_t right = *(int32_t *)pRight;
@@ -381,7 +343,7 @@ static int32_t mergeIntoGroupResultImplRv(STaskRuntimeEnv *pRuntimeEnv, SGroupRe
}
- int64_t num = getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset);
+ int64_t num = 0;//getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset);
if (num <= 0) {
continue;
}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 2811c8dce84918bc61339597150b15f56690b99d..fd62849e56805c22472a5ea438140ec655e20df0 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -19,7 +19,7 @@
#include "tdatablock.h"
#include "vnode.h"
-static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) {
+static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, char* id) {
ASSERT(pOperator != NULL);
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -32,11 +32,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
return TSDB_CODE_QRY_APP_ERROR;
}
pOperator->status = OP_NOT_OPENED;
- return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id);
+ return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id);
} else {
pOperator->status = OP_NOT_OPENED;
SStreamBlockScanInfo* pInfo = pOperator->info;
+ pInfo->assignBlockUid = assignUid;
// the block type can not be changed in the streamscan operators
if (pInfo->blockType == 0) {
@@ -67,11 +68,11 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
-int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type) {
- return qSetMultiStreamInput(tinfo, input, 1, type);
+int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
+ return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
}
-int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
+int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid) {
if (tinfo == NULL) {
return TSDB_CODE_QRY_APP_ERROR;
}
@@ -82,7 +83,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
- int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo));
+ int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo));
} else {
diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c
index d4d8696abaa1906969077ed8829dff9113680b05..7757825733153741d6e83404051578f7f4e2aef8 100644
--- a/source/libs/executor/src/executorMain.c
+++ b/source/libs/executor/src/executorMain.c
@@ -30,13 +30,6 @@
#include "tlosertree.h"
#include "ttypes.h"
-typedef struct STaskMgmt {
- TdThreadMutex lock;
- SCacheObj *qinfoPool; // query handle pool
- int32_t vgId;
- bool closed;
-} STaskMgmt;
-
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, EOPTR_EXEC_MODEL model) {
assert(readHandle != NULL && pSubplan != NULL);
@@ -131,36 +124,30 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) {
// error occurs, record the error code and return to client
int32_t ret = setjmp(pTaskInfo->env);
if (ret != TSDB_CODE_SUCCESS) {
- publishQueryAbortEvent(pTaskInfo, ret);
pTaskInfo->code = ret;
cleanUpUdfs();
- qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo),
- tstrerror(pTaskInfo->code));
+ qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
return pTaskInfo->code;
}
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
- publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
-
int64_t st = taosGetTimestampUs();
*pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot);
uint64_t el = (taosGetTimestampUs() - st);
pTaskInfo->cost.elapsedTime += el;
-
- publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (NULL == *pRes) {
*useconds = pTaskInfo->cost.elapsedTime;
}
+ cleanUpUdfs();
+
int32_t current = (*pRes != NULL)? (*pRes)->info.rows:0;
- pTaskInfo->totalRows += current;
+ uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
- cleanUpUdfs();
qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
- GET_TASKID(pTaskInfo), current, pTaskInfo->totalRows, 0, el/1000.0);
+ GET_TASKID(pTaskInfo), current, total, 0, el/1000.0);
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
@@ -210,7 +197,7 @@ int32_t qIsTaskCompleted(qTaskInfo_t qinfo) {
void qDestroyTask(qTaskInfo_t qTaskHandle) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*) qTaskHandle;
- qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->totalRows);
+ qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows);
queryCostStatis(pTaskInfo); // print the query cost summary
doDestroyTask(pTaskInfo);
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 1da5770c6d7aa1b49a4250b147afd21795d76468..3c46f46e198bd1fae7ebd6173df64b8dec0e6737 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -28,6 +28,7 @@
#include "ttime.h"
#include "executorimpl.h"
+#include "index.h"
#include "query.h"
#include "tcompare.h"
#include "tcompression.h"
@@ -86,8 +87,8 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
-#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
-#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
+#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
+//#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; }
@@ -98,7 +99,6 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
}
static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
-static bool functionNeedToExecute(SqlFunctionCtx* pCtx);
static void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
@@ -124,6 +124,8 @@ static void destroySysTableScannerOperatorInfo(void* param, int32_t numOfOutput)
void doSetOperatorCompleted(SOperatorInfo* pOperator) {
pOperator->status = OP_EXEC_DONE;
+
+ pOperator->cost.totalCost = (taosGetTimestampUs() - pOperator->pTaskInfo->cost.start * 1000) / 1000.0;
if (pOperator->pTaskInfo != NULL) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
}
@@ -137,7 +139,7 @@ int32_t operatorDummyOpenFn(SOperatorInfo* pOperator) {
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn,
__optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
- __optr_decode_fn_t decode, __optr_get_explain_fn_t explain) {
+ __optr_decode_fn_t decode, __optr_explain_fn_t explain) {
SOperatorFpSet fpSet = {
._openFn = openFn,
.getNextFn = nextFn,
@@ -237,36 +239,6 @@ static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) {
return true;
}
-static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, jmp_buf env) {
- int64_t newCapacity = 0;
-
- // more than the capacity, reallocate the resources
- if (pResultRowInfo->size < pResultRowInfo->capacity) {
- return;
- }
-
- if (pResultRowInfo->capacity > 10000) {
- newCapacity = (int64_t)(pResultRowInfo->capacity * 1.25);
- } else {
- newCapacity = (int64_t)(pResultRowInfo->capacity * 1.5);
- }
-
- if (newCapacity <= pResultRowInfo->capacity) {
- newCapacity += 4;
- }
-
- char* p = taosMemoryRealloc(pResultRowInfo->pPosition, newCapacity * sizeof(SResultRowPosition));
- if (p == NULL) {
- longjmp(env, TSDB_CODE_OUT_OF_MEMORY);
- }
-
- pResultRowInfo->pPosition = (SResultRowPosition*)p;
-
- int32_t inc = (int32_t)newCapacity - pResultRowInfo->capacity;
- memset(&pResultRowInfo->pPosition[pResultRowInfo->capacity], 0, sizeof(SResultRowPosition) * inc);
- pResultRowInfo->capacity = (int32_t)newCapacity;
-}
-
static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t uid) {
bool existed = false;
@@ -304,7 +276,7 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
return p1 != NULL;
}
-SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
@@ -373,6 +345,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// In case of group by column query, the required SResultRow object must be existInCurrentResusltRowInfo in the
// pResultRowInfo object.
if (p1 != NULL) {
+
+ // todo
pResult = getResultRowByPos(pResultBuf, p1);
ASSERT(pResult->pageId == p1->pageId && pResult->offset == p1->offset);
}
@@ -381,34 +355,28 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// 1. close current opened time window
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId &&
pResult->offset != pResultRowInfo->cur.offset))) {
- // todo extract function
SResultRowPosition pos = pResultRowInfo->cur;
- SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
- SResultRow* pRow = (SResultRow*)((char*)pPage + pos.offset);
- closeResultRow(pRow);
+ SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
releaseBufPage(pResultBuf, pPage);
}
// allocate a new buffer page
- prepareResultListBuffer(pResultRowInfo, pTaskInfo->env);
if (pResult == NULL) {
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow_rv(pResultBuf, groupId, pSup->resultRowSize);
+ pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
+
initResultRow(pResult);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
- taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
- sizeof(SResultRowPosition));
+ taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
- pResultRowInfo->pPosition[pResultRowInfo->size++] =
- (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
// too many time window in query
- if (pResultRowInfo->size > MAX_INTERVAL_TIME_WINDOW) {
+ if (taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
@@ -583,11 +551,13 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow
colDataAppendInt64(pColData, 4, &pQueryWindow->ekey);
}
+
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin,
SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, TSKEY* tsCol,
int32_t numOfTotal, int32_t numOfOutput, int32_t order) {
for (int32_t k = 0; k < numOfOutput; ++k) {
// keep it temporarily
+ // todo no need this??
bool hasAgg = pCtx[k].input.colDataAggIsSet;
int32_t numOfRows = pCtx[k].input.numOfRows;
int32_t startOffset = pCtx[k].input.startRowIndex;
@@ -607,7 +577,8 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow
if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]);
- char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
+
+ char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
SColumnInfoData idata = {0};
idata.info.type = TSDB_DATA_TYPE_BIGINT;
@@ -618,22 +589,23 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow
SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData};
pCtx[k].sfp.process(&tw, 1, &out);
pEntryInfo->numOfRes = 1;
- continue;
- }
- int32_t code = TSDB_CODE_SUCCESS;
- if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) {
- code = pCtx[k].fpSet.process(&pCtx[k]);
- if (code != TSDB_CODE_SUCCESS) {
- qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
- taskInfo->code = code;
- longjmp(taskInfo->env, code);
+ } else {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) {
+ code = pCtx[k].fpSet.process(&pCtx[k]);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
+ taskInfo->code = code;
+ longjmp(taskInfo->env, code);
+ }
}
- }
- // restore it
- pCtx[k].input.colDataAggIsSet = hasAgg;
- pCtx[k].input.startRowIndex = startOffset;
- pCtx[k].input.numOfRows = numOfRows;
+ // restore it
+ pCtx[k].input.colDataAggIsSet = hasAgg;
+ pCtx[k].input.startRowIndex = startOffset;
+ pCtx[k].input.numOfRows = numOfRows;
+ }
}
}
@@ -772,12 +744,14 @@ static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunct
for (int32_t k = 0; k < pOperator->numOfExprs; ++k) {
if (functionNeedToExecute(&pCtx[k])) {
// todo add a dummy funtion to avoid process check
- if (pCtx[k].fpSet.process != NULL) {
- int32_t code = pCtx[k].fpSet.process(&pCtx[k]);
- if (code != TSDB_CODE_SUCCESS) {
- qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
- return code;
- }
+ if (pCtx[k].fpSet.process == NULL) {
+ continue;
+ }
+
+ int32_t code = pCtx[k].fpSet.process(&pCtx[k]);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
+ return code;
}
}
}
@@ -935,7 +909,7 @@ int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char*
return TSDB_CODE_SUCCESS;
}
-static bool functionNeedToExecute(SqlFunctionCtx* pCtx) {
+bool functionNeedToExecute(SqlFunctionCtx* pCtx) {
struct SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
// in case of timestamp column, always generated results.
@@ -1216,7 +1190,6 @@ static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
taosVariantDestroy(&pCtx[i].param[j].param);
}
- taosVariantDestroy(&pCtx[i].tag);
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
@@ -1246,9 +1219,9 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo) { pTaskInfo->code = TSDB_CODE_TSC_Q
static bool isCachedLastQuery(STaskAttr* pQueryAttr) {
for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]);
- if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) {
- continue;
- }
+// if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) {
+// continue;
+// }
return false;
}
@@ -1298,7 +1271,7 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]);
-
+#if 0
if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG ||
functionId == FUNCTION_TAG_DUMMY) {
continue;
@@ -1309,6 +1282,8 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
} else {
hasOtherFunc = true;
}
+#endif
+
}
if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) {
@@ -1746,8 +1721,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
pTaskInfo, false, pSup);
- ASSERT(pDataBlock->info.numOfCols == numOfExprs);
- for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
+ for (int32_t i = 0; i < numOfExprs; ++i) {
struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset);
cleanupResultRowEntry(pEntry);
@@ -1755,7 +1729,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
pCtx[i].scanFlag = stage;
}
- initCtxOutputBuffer(pCtx, pDataBlock->info.numOfCols);
+ initCtxOutputBuffer(pCtx, numOfExprs);
}
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOfInputRows) {
@@ -1785,41 +1759,13 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOf
// set the correct pointer after the memory buffer reallocated.
int32_t functionId = pBInfo->pCtx[i].functionId;
-
+#if 0
if (functionId == FUNCTION_TOP || functionId == FUNCTION_BOTTOM || functionId == FUNCTION_DIFF ||
functionId == FUNCTION_DERIVATIVE) {
// if (i > 0) pBInfo->pCtx[i].pTsOutput = pBInfo->pCtx[i - 1].pOutput;
}
- }
-}
+#endif
-void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
- bool needCopyTs = false;
- int32_t tsNum = 0;
- char* src = NULL;
- for (int32_t i = 0; i < numOfOutput; i++) {
- int32_t functionId = pCtx[i].functionId;
- if (functionId == FUNCTION_DIFF || functionId == FUNCTION_DERIVATIVE) {
- needCopyTs = true;
- if (i > 0 && pCtx[i - 1].functionId == FUNCTION_TS_DUMMY) {
- SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data
- src = pColRes->pData;
- }
- } else if (functionId == FUNCTION_TS_DUMMY) {
- tsNum++;
- }
- }
-
- if (!needCopyTs) return;
- if (tsNum < 2) return;
- if (src == NULL) return;
-
- for (int32_t i = 0; i < numOfOutput; i++) {
- int32_t functionId = pCtx[i].functionId;
- if (functionId == FUNCTION_TS_DUMMY) {
- SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i);
- memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows);
- }
}
}
@@ -1845,12 +1791,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
}
}
-STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win) {
- STableQueryInfo* pTableQueryInfo = buf;
- pTableQueryInfo->lastKey = win.skey;
- return pTableQueryInfo;
-}
-
void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
if (pTableQueryInfo == NULL) {
return;
@@ -2140,102 +2080,6 @@ int32_t doFillTimeIntervalGapsInResults(struct SFillInfo* pFillInfo, SSDataBlock
return pBlock->info.rows;
}
-void publishOperatorProfEvent(SOperatorInfo* pOperator, EQueryProfEventType eventType) {
- SQueryProfEvent event = {0};
-
- event.eventType = eventType;
- event.eventTime = taosGetTimestampUs();
- event.operatorType = pOperator->operatorType;
- // if (pQInfo->summary.queryProfEvents) {
- // taosArrayPush(pQInfo->summary.queryProfEvents, &event);
- // }
-}
-
-void publishQueryAbortEvent(SExecTaskInfo* pTaskInfo, int32_t code) {
- SQueryProfEvent event;
- event.eventType = QUERY_PROF_QUERY_ABORT;
- event.eventTime = taosGetTimestampUs();
- event.abortCode = code;
-
- if (pTaskInfo->cost.queryProfEvents) {
- taosArrayPush(pTaskInfo->cost.queryProfEvents, &event);
- }
-}
-
-typedef struct {
- uint8_t operatorType;
- int64_t beginTime;
- int64_t endTime;
- int64_t selfTime;
- int64_t descendantsTime;
-} SOperatorStackItem;
-
-static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* event, SArray* opStack,
- SHashObj* profResults) {
- item->endTime = event->eventTime;
- item->selfTime = (item->endTime - item->beginTime) - (item->descendantsTime);
-
- for (int32_t j = 0; j < taosArrayGetSize(opStack); ++j) {
- SOperatorStackItem* ancestor = taosArrayGet(opStack, j);
- ancestor->descendantsTime += item->selfTime;
- }
-
- uint8_t operatorType = item->operatorType;
- SOperatorProfResult* result = taosHashGet(profResults, &operatorType, sizeof(operatorType));
- if (result != NULL) {
- result->sumRunTimes++;
- result->sumSelfTime += item->selfTime;
- } else {
- SOperatorProfResult opResult;
- opResult.operatorType = operatorType;
- opResult.sumSelfTime = item->selfTime;
- opResult.sumRunTimes = 1;
- taosHashPut(profResults, &(operatorType), sizeof(operatorType), &opResult, sizeof(opResult));
- }
-}
-
-void calculateOperatorProfResults(void) {
- // if (pQInfo->summary.queryProfEvents == NULL) {
- // // qDebug("QInfo:0x%"PRIx64" query prof events array is null", pQInfo->qId);
- // return;
- // }
- //
- // if (pQInfo->summary.operatorProfResults == NULL) {
- // // qDebug("QInfo:0x%"PRIx64" operator prof results hash is null", pQInfo->qId);
- // return;
- // }
-
- SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem));
- if (opStack == NULL) {
- return;
- }
-#if 0
- size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents);
- SHashObj* profResults = pQInfo->summary.operatorProfResults;
-
- for (int i = 0; i < size; ++i) {
- SQueryProfEvent* event = taosArrayGet(pQInfo->summary.queryProfEvents, i);
- if (event->eventType == QUERY_PROF_BEFORE_OPERATOR_EXEC) {
- SOperatorStackItem opItem;
- opItem.operatorType = event->operatorType;
- opItem.beginTime = event->eventTime;
- opItem.descendantsTime = 0;
- taosArrayPush(opStack, &opItem);
- } else if (event->eventType == QUERY_PROF_AFTER_OPERATOR_EXEC) {
- SOperatorStackItem* item = taosArrayPop(opStack);
- assert(item->operatorType == event->operatorType);
- doOperatorExecProfOnce(item, event, opStack, profResults);
- } else if (event->eventType == QUERY_PROF_QUERY_ABORT) {
- SOperatorStackItem* item;
- while ((item = taosArrayPop(opStack)) != NULL) {
- doOperatorExecProfOnce(item, event, opStack, profResults);
- }
- }
- }
-#endif
- taosArrayDestroy(opStack);
-}
-
void queryCostStatis(SExecTaskInfo* pTaskInfo) {
STaskCostInfo* pSummary = &pTaskInfo->cost;
@@ -2268,15 +2112,6 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
// qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb,
// hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
// pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
-
- if (pSummary->operatorProfResults) {
- SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL);
- while (opRes != NULL) {
- // qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64,
- // pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime);
- opRes = taosHashIterate(pSummary->operatorProfResults, opRes);
- }
- }
}
// static void updateOffsetVal(STaskRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) {
@@ -2535,7 +2370,7 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
return TSDB_CODE_SUCCESS;
}
-static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo);
+static void doDestroyTableList(STableListInfo* pTableqinfoList);
static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) {
#if 0
@@ -2687,46 +2522,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList) {
if (pColList == NULL) { // data from other sources
- blockDataEnsureCapacity(pRes, numOfRows);
-
- int32_t dataLen = *(int32_t*)pData;
- pData += sizeof(int32_t);
-
- pRes->info.groupId = *(uint64_t*)pData;
- pData += sizeof(uint64_t);
-
- int32_t* colLen = (int32_t*)pData;
-
- char* pStart = pData + sizeof(int32_t) * numOfOutput;
- for (int32_t i = 0; i < numOfOutput; ++i) {
- colLen[i] = htonl(colLen[i]);
- ASSERT(colLen[i] >= 0);
-
- SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, i);
- if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
- pColInfoData->varmeta.length = colLen[i];
- pColInfoData->varmeta.allocLen = colLen[i];
-
- memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows);
- pStart += sizeof(int32_t) * numOfRows;
-
- if (colLen[i] > 0) {
- pColInfoData->pData = taosMemoryMalloc(colLen[i]);
- }
- } else {
- memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows));
- pStart += BitmapLen(numOfRows);
- }
-
- if (colLen[i] > 0) {
- memcpy(pColInfoData->pData, pStart, colLen[i]);
- }
-
- // TODO setting this flag to true temporarily so aggregate function on stable will
- // examine NULL value for non-primary key column
- pColInfoData->hasNull = true;
- pStart += colLen[i];
- }
+ blockCompressDecode(pRes, numOfOutput, numOfRows, pData);
} else { // extract data according to pColList
ASSERT(numOfOutput == taosArrayGetSize(pColList));
char* pStart = pData;
@@ -2824,6 +2620,7 @@ static void* setAllSourcesCompleted(SOperatorInfo* pOperator, int64_t startTs) {
int64_t el = taosGetTimestampUs() - startTs;
SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo;
+
pLoadInfo->totalElapsed += el;
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
@@ -2867,6 +2664,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pExchangeInfo->loadInfo.totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
completed += 1;
+ taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@@ -2874,6 +2672,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) {
+ taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
@@ -2894,10 +2693,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pDataInfo->status = EX_SOURCE_DATA_NOT_READY;
code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i);
if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
}
+ taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
@@ -3000,6 +2801,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1;
+ taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@@ -3024,6 +2826,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
pLoadInfo->totalSize);
}
+ pOperator->resultInfo.totalRows += pRes->info.rows;
+ taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
}
@@ -3033,10 +2837,10 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
+ int64_t st = taosGetTimestampUs();
+
SExchangeInfo* pExchangeInfo = pOperator->info;
- if (pExchangeInfo->seqLoadData) {
- // do nothing for sequentially load data
- } else {
+ if (!pExchangeInfo->seqLoadData) {
int32_t code = prepareConcurrentlyLoad(pOperator);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -3044,6 +2848,7 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
}
OPTR_SET_OPENED(pOperator);
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
return TSDB_CODE_SUCCESS;
}
@@ -3071,15 +2876,6 @@ static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) {
} else {
return concurrentlyLoadRemoteData(pOperator);
}
-
-#if 0
- _error:
- taosMemoryFreeClear(pMsg);
- taosMemoryFreeClear(pMsgSendInfo);
-
- terrno = pTaskInfo->code;
- return NULL;
-#endif
}
static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo) {
@@ -3108,12 +2904,8 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, const SNodeList* p
SExecTaskInfo* pTaskInfo) {
SExchangeInfo* pInfo = taosMemoryCalloc(1, sizeof(SExchangeInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
-
if (pInfo == NULL || pOperator == NULL) {
- taosMemoryFreeClear(pInfo);
- taosMemoryFreeClear(pOperator);
- terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
- return NULL;
+ goto _error;
}
size_t numOfSources = LIST_LENGTH(pSources);
@@ -3149,7 +2941,6 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, const SNodeList* p
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
destroyExchangeOperatorInfo, NULL, NULL, NULL);
pInfo->pTransporter = pTransporter;
-
return pOperator;
_error:
@@ -3498,7 +3289,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
// todo add more information about exchange operation
int32_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN ||
- type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN) {
*order = TSDB_ORDER_ASC;
*scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS;
@@ -3528,14 +3319,13 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SOptrBasicInfo* pInfo = &pAggInfo->binfo;
SOperatorInfo* downstream = pOperator->pDownstream[0];
+ int64_t st = taosGetTimestampUs();
+
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (pBlock == NULL) {
break;
}
@@ -3563,14 +3353,14 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
}
#if 0 // test for encode/decode result info
- if(pOperator->encodeResultRow){
+ if(pOperator->fpSet.encodeResultRow){
char *result = NULL;
int32_t length = 0;
- SAggSupporter *pSup = &pAggInfo->aggSup;
- pOperator->encodeResultRow(pOperator, pSup, pInfo, &result, &length);
+ pOperator->fpSet.encodeResultRow(pOperator, &result, &length);
+ SAggSupporter* pSup = &pAggInfo->aggSup;
taosHashClear(pSup->pResultRowHashTable);
pInfo->resultRowInfo.size = 0;
- pOperator->decodeResultRow(pOperator, pSup, pInfo, result, length);
+ pOperator->fpSet.decodeResultRow(pOperator, result);
if(result){
taosMemoryFree(result);
}
@@ -3581,6 +3371,8 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
closeAllResultRows(&pAggInfo->binfo.resultRowInfo);
initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0);
OPTR_SET_OPENED(pOperator);
+
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
return TSDB_CODE_SUCCESS;
}
@@ -3595,6 +3387,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
pTaskInfo->code = pOperator->fpSet._openFn(pOperator);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
+ doSetOperatorCompleted(pOperator);
return NULL;
}
@@ -3604,20 +3397,31 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- return (blockDataGetNumOfRows(pInfo->pRes) != 0) ? pInfo->pRes : NULL;
+ size_t rows = blockDataGetNumOfRows(pInfo->pRes); // pInfo->pRes : NULL;
+ pOperator->resultInfo.totalRows += rows;
+
+ return (rows == 0) ? NULL : pInfo->pRes;
}
-void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result,
- int32_t* length) {
- int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
- size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
- int32_t totalSize = sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
- *result = taosMemoryCalloc(1, totalSize);
+int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) {
+ if (result == NULL || length == NULL) {
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
+ SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
+ int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
+ size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
+ int32_t totalSize =
+ sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
+
+ *result = (char*)taosMemoryCalloc(1, totalSize);
if (*result == NULL) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ return TSDB_CODE_OUT_OF_MEMORY;
}
- *(int32_t*)(*result) = size;
+
int32_t offset = sizeof(int32_t);
+ *(int32_t*)(*result + offset) = size;
+ offset += sizeof(int32_t);
// prepare memory
SResultRowPosition* pos = &pInfo->resultRowInfo.cur;
@@ -3639,12 +3443,11 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
// recalculate the result size
int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize;
if (realTotalSize > totalSize) {
- char* tmp = taosMemoryRealloc(*result, realTotalSize);
+ char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize);
if (tmp == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(*result);
*result = NULL;
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ return TSDB_CODE_OUT_OF_MEMORY;
} else {
*result = tmp;
}
@@ -3664,30 +3467,34 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
pIter = taosHashIterate(pSup->pResultRowHashTable, pIter);
}
- if (length) {
- *length = offset;
- }
- return;
+ *(int32_t*)(*result) = offset;
+ *length = offset;
+
+ return TDB_CODE_SUCCESS;
}
-bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result,
- int32_t length) {
- if (!result || length <= 0) {
- return false;
+int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
+ if (result == NULL) {
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
+ SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
+ SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
// int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
- int32_t count = *(int32_t*)(result);
-
+ int32_t length = *(int32_t*)(result);
int32_t offset = sizeof(int32_t);
+
+ int32_t count = *(int32_t*)(result + offset);
+ offset += sizeof(int32_t);
+
while (count-- > 0 && length > offset) {
int32_t keyLen = *(int32_t*)(result + offset);
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow_rv(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
if (!resultRow) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
// add a new result set for a new group
@@ -3697,7 +3504,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
offset += keyLen;
int32_t valueLen = *(int32_t*)(result + offset);
if (valueLen != pSup->resultRowSize) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
offset += sizeof(int32_t);
int32_t pageId = resultRow->pageId;
@@ -3708,17 +3515,13 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
offset += valueLen;
initResultRow(resultRow);
- prepareResultListBuffer(&pInfo->resultRowInfo, pOperator->pTaskInfo->env);
- // pInfo->resultRowInfo.cur = pInfo->resultRowInfo.size;
- // pInfo->resultRowInfo.pPosition[pInfo->resultRowInfo.size++] =
- // (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
}
if (offset != length) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
- return true;
+ return TDB_CODE_SUCCESS;
}
enum {
@@ -3830,22 +3633,25 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
#endif
+ int64_t st = 0;
int32_t order = 0;
int32_t scanFlag = 0;
+ if (pOperator->cost.openCost == 0) {
+ st = taosGetTimestampUs();
+ }
+
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
// The downstream exec may change the value of the newgroup, so use a local variable instead.
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (pBlock == NULL) {
- setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
+ doSetOperatorCompleted(pOperator);
break;
}
+#if 0
// Return result of the previous group in the firstly.
if (false) {
if (pRes->info.rows > 0) {
@@ -3855,6 +3661,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
initCtxOutputBuffer(pInfo->pCtx, pOperator->numOfExprs);
}
}
+#endif
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
@@ -3881,8 +3688,14 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
pProjectInfo->curOutput += pInfo->pRes->info.rows;
- // copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfExprs);
- return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL;
+ size_t rows = pInfo->pRes->info.rows;
+ pOperator->resultInfo.totalRows += rows;
+
+ if (pOperator->cost.openCost == 0) {
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+ }
+
+ return (rows > 0) ? pInfo->pRes : NULL;
}
static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, bool* newgroup,
@@ -3939,10 +3752,7 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) {
SOperatorInfo* pDownstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(pDownstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream);
- publishOperatorProfEvent(pDownstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (*newgroup) {
assert(pBlock != NULL);
}
@@ -3996,18 +3806,6 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) {
}
}
-// todo set the attribute of query scan count
-static int32_t getNumOfScanTimes(STaskAttr* pQueryAttr) {
- for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
- int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]);
- if (functionId == FUNCTION_STDDEV || functionId == FUNCTION_PERCT) {
- return 2;
- }
- }
-
- return 1;
-}
-
static void destroyOperatorInfo(SOperatorInfo* pOperator) {
if (pOperator == NULL) {
return;
@@ -4042,6 +3840,21 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) {
taosMemoryFreeClear(pOperator);
}
+int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) {
+ *defaultPgsz = 4096;
+ while (*defaultPgsz < rowSize * 4) {
+ *defaultPgsz <<= 1u;
+ }
+
+ // at least four pages need to be in buffer
+ *defaultBufsz = 4096 * 256;
+ if ((*defaultBufsz) <= (*defaultPgsz)) {
+ (*defaultBufsz) = (*defaultPgsz) * 4;
+ }
+
+ return 0;
+}
+
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
@@ -4054,16 +3867,9 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
return TSDB_CODE_OUT_OF_MEMORY;
}
- uint32_t defaultPgsz = 4096;
- while (defaultPgsz < pAggSup->resultRowSize * 4) {
- defaultPgsz <<= 1u;
- }
-
- // at least four pages need to be in buffer
- int32_t defaultBufsz = 4096 * 256;
- if (defaultBufsz <= defaultPgsz) {
- defaultBufsz = defaultPgsz * 4;
- }
+ uint32_t defaultPgsz = 0;
+ uint32_t defaultBufsz = 0;
+ getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH);
if (code != TSDB_CODE_SUCCESS) {
@@ -4102,35 +3908,30 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
}
}
-static STableQueryInfo* initTableQueryInfo(const STableGroupInfo* pTableGroupInfo) {
- if (pTableGroupInfo->numOfTables == 0) {
- return NULL;
- }
-
- STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
- if (pTableQueryInfo == NULL) {
- return NULL;
- }
-
- int32_t index = 0;
- for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pGroupList); ++i) {
- SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i);
- for (int32_t j = 0; j < taosArrayGetSize(pa); ++j) {
- STableKeyInfo* pk = taosArrayGet(pa, j);
- STableQueryInfo* pTQueryInfo = &pTableQueryInfo[index++];
- pTQueryInfo->lastKey = pk->lastKey;
- }
- }
-
- STimeWindow win = {0, INT64_MAX};
- createTableQueryInfo(pTableQueryInfo, win);
- return pTableQueryInfo;
-}
+// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) {
+// int32_t size = taosArrayGetSize(pTableListInfo->pTableList);
+// if (size == 0) {
+// return NULL;
+// }
+//
+// STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(size, sizeof(STableQueryInfo));
+// if (pTableQueryInfo == NULL) {
+// return NULL;
+// }
+//
+// for (int32_t j = 0; j < size; ++j) {
+// STableKeyInfo* pk = taosArrayGet(pTableListInfo->pTableList, j);
+// STableQueryInfo* pTQueryInfo = &pTableQueryInfo[j];
+// pTQueryInfo->lastKey = pk->lastKey;
+// }
+//
+// pTableQueryInfo->lastKey = 0;
+// return pTableQueryInfo;
+//}
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo,
- int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo,
- const STableGroupInfo* pTableGroupInfo) {
+ int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -4143,7 +3944,6 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
initResultSizeInfo(pOperator, numOfRows);
int32_t code =
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, keyBufSize, pTaskInfo->id.str);
- pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4541,11 +4341,12 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
}
static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId);
+ STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId,
+ SNode* pTagCond);
-static int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo,
- uint64_t queryId, uint64_t taskId);
-static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo);
+static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
+ SNode* pTagCond);
+static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo);
static SArray* extractColumnInfo(SNodeList* pNodeList);
static SArray* createSortInfo(SNodeList* pNodeList);
@@ -4559,29 +4360,30 @@ void extractTableSchemaVersion(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo
pTaskInfo->schemaVer.tablename = strdup(mr.me.name);
if (mr.me.type == TSDB_SUPER_TABLE) {
- pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schema.sver;
- pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.sver;
+ pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schemaRow.version;
+ pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version;
} else if (mr.me.type == TSDB_CHILD_TABLE) {
tb_uid_t suid = mr.me.ctbEntry.suid;
metaGetTableEntryByUid(&mr, suid);
- pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schema.sver;
- pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.sver;
+ pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schemaRow.version;
+ pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version;
} else {
- pTaskInfo->schemaVer.sversion = mr.me.ntbEntry.schema.sver;
+ pTaskInfo->schemaVer.sversion = mr.me.ntbEntry.schemaRow.version;
}
metaReaderClear(&mr);
}
SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
- uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo) {
+ uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo, SNode* pTagCond) {
int32_t type = nodeType(pPhyNode);
if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
- tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId);
+ tsdbReaderT pDataReader =
+ doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
if (pDataReader == NULL && terrno != 0) {
return NULL;
}
@@ -4600,35 +4402,26 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
-
- int32_t numOfCols = 0;
-
+ STimeWindowAggSupp twSup = {
+ .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN};
tsdbReaderT pDataReader = NULL;
if (pHandle->vnode) {
- pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId);
+ pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
} else {
- doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId);
+ getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond);
}
if (pDataReader == NULL && terrno != 0) {
- qDebug("pDataReader is NULL");
+ qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo));
// return NULL;
} else {
- qDebug("pDataReader is not NULL");
+ qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo));
}
+ SArray* tableIdList = extractTableIdList(pTableListInfo);
- SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
- SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
-
- SArray* tableIdList = extractTableIdList(pTableGroupInfo);
-
- SSDataBlock* pResBlock = createResDataBlock(pDescNode);
- SArray* pCols =
- extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
+ SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle,
+ tableIdList, pTableScanNode, pTaskInfo, &twSup);
- SOperatorInfo* pOperator =
- createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols,
- tableIdList, pTaskInfo, pScanPhyNode->node.pConditions, pOperatorDumy);
taosArrayDestroy(tableIdList);
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
@@ -4653,8 +4446,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SSDataBlock* pResBlock = createResDataBlock(pDescNode);
- int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo,
- queryId, taskId);
+ int32_t code = getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
@@ -4667,7 +4459,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
COL_MATCH_FROM_COL_ID);
SOperatorInfo* pOperator =
- createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableGroupInfo, pTaskInfo);
+ createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableListInfo, pTaskInfo);
return pOperator;
} else {
ASSERT(0);
@@ -4680,7 +4472,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES);
for (int32_t i = 0; i < size; ++i) {
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
- ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableGroupInfo);
+ ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo, pTagCond);
if (ops[i] == NULL) {
return NULL;
}
@@ -4709,10 +4501,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
if (pAggNode->pGroupKeys != NULL) {
SArray* pColList = extractColumnInfo(pAggNode->pGroupKeys);
pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions,
- pScalarExprInfo, numOfScalarExpr, pTaskInfo, NULL);
+ pScalarExprInfo, numOfScalarExpr, pTaskInfo);
} else {
- pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr,
- pTaskInfo, pTableGroupInfo);
+ pOptr =
+ createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo);
}
} else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@@ -4728,11 +4520,23 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
.precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark,
- .calTrigger = pIntervalPhyNode->window.triggerType};
+ .calTrigger = pIntervalPhyNode->window.triggerType,
+ .maxTs = INT64_MIN,
+ .winMap = NULL,};
+ if (isSmaStream(pIntervalPhyNode->window.triggerType)) {
+ if (FLT_LESS(pIntervalPhyNode->window.filesFactor, 1.000000)) {
+ as.calTrigger = STREAM_TRIGGER_AT_ONCE_SMA;
+ } else {
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
+ as.winMap = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
+ as.waterMark = getSmaWaterMark(interval.interval,
+ pIntervalPhyNode->window.filesFactor);
+ as.calTrigger = STREAM_TRIGGER_WINDOW_CLOSE_SMA;
+ }
+ }
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTableGroupInfo,
- pTaskInfo);
+ pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode;
@@ -4762,13 +4566,26 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr =
createSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW == type) {
+ SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
+
+ STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark,
+ .calTrigger = pSessionNode->window.triggerType};
+
+ SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
+ int32_t tsSlotId = ((SColumnNode*)pSessionNode->window.pTspk)->slotId;
+
+ pOptr = createStreamSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as,
+ pTaskInfo);
+
} else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
SPartitionPhysiNode* pPartNode = (SPartitionPhysiNode*)pPhyNode;
SArray* pColList = extractPartitionColInfo(pPartNode->pPartitionKeys);
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &num);
- pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo, NULL);
+ pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW == type) {
SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
@@ -4803,6 +4620,18 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return pOptr;
}
+int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) {
+ const SQueryTableDataCond* pCond = param;
+ const STimeWindow* pWin1 = p1;
+ const STimeWindow* pWin2 = p2;
+ if (pCond->order == TSDB_ORDER_ASC) {
+ return pWin1->skey - pWin2->skey;
+ } else if (pCond->order == TSDB_ORDER_DESC) {
+ return pWin2->skey - pWin1->skey;
+ }
+ return 0;
+}
+
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
pCond->loadExternalRows = false;
@@ -4814,16 +4643,30 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
return terrno;
}
- pCond->twindow = pTableScanNode->scanRange;
+ // pCond->twindow = pTableScanNode->scanRange;
+ // TODO: get it from stable scan node
+ pCond->numOfTWindows = 1;
+ pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow));
+ pCond->twindows[0] = pTableScanNode->scanRange;
#if 1
// todo work around a problem, remove it later
- if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) ||
- (pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) {
- TSWAP(pCond->twindow.skey, pCond->twindow.ekey);
+ for (int32_t i = 0; i < pCond->numOfTWindows; ++i) {
+ if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) ||
+ (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) {
+ TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
+ }
}
#endif
+ for (int32_t i = 0; i < pCond->numOfTWindows; ++i) {
+ if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) ||
+ (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) {
+ TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
+ }
+ }
+ taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow);
+
pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER;
// pCond->type = pTableScanNode->scanFlag;
@@ -4982,46 +4825,62 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
return pList;
}
-int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo,
- uint64_t queryId, uint64_t taskId) {
- int32_t code = 0;
+int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
+ SNode* pTagCond) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
+
if (tableType == TSDB_SUPER_TABLE) {
- code = tsdbQuerySTableByTagCond(metaHandle, tableUid, 0, NULL, 0, 0, NULL, pGroupInfo, NULL, 0, queryId, taskId);
+ if (pTagCond) {
+ SIndexMetaArg metaArg = {.metaEx = metaHandle, .metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid};
+
+ SArray* res = taosArrayInit(8, sizeof(uint64_t));
+ code = doFilterTag(pTagCond, &metaArg, res);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
+ taosArrayDestroy(res);
+ terrno = code;
+ return code;
+ } else {
+ qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
+ }
+ for (int i = 0; i < taosArrayGetSize(res); i++) {
+ STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)};
+ taosArrayPush(pListInfo->pTableList, &info);
+ }
+ taosArrayDestroy(res);
+ } else {
+ code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
+ }
} else { // Create one table group.
- code = tsdbGetOneTableGroup(metaHandle, tableUid, 0, pGroupInfo);
+ STableKeyInfo info = {.lastKey = 0, .uid = tableUid};
+ taosArrayPush(pListInfo->pTableList, &info);
}
return code;
}
-SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo) {
+SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t));
- if (pTableGroupInfo->numOfTables > 0) {
- SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, 0);
- ASSERT(taosArrayGetSize(pTableGroupInfo->pGroupList) == 1);
-
- // Transfer the Array of STableKeyInfo into uid list.
- size_t numOfTables = taosArrayGetSize(pa);
- for (int32_t i = 0; i < numOfTables; ++i) {
- STableKeyInfo* pkeyInfo = taosArrayGet(pa, i);
- taosArrayPush(tableIdList, &pkeyInfo->uid);
- }
+ // Transfer the Array of STableKeyInfo into uid list.
+ for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pTableList); ++i) {
+ STableKeyInfo* pkeyInfo = taosArrayGet(pTableGroupInfo->pTableList, i);
+ taosArrayPush(tableIdList, &pkeyInfo->uid);
}
return tableIdList;
}
tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId) {
- uint64_t uid = pTableScanNode->scan.uid;
- int32_t code =
- doCreateTableGroup(pHandle->meta, pTableScanNode->scan.tableType, uid, pTableGroupInfo, queryId, taskId);
+ STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) {
+ int32_t code =
+ getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- if (pTableGroupInfo->numOfTables == 0) {
+ if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
code = 0;
qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId);
goto _error;
@@ -5033,13 +4892,100 @@ tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle*
goto _error;
}
- return tsdbQueryTables(pHandle->vnode, &cond, pTableGroupInfo, queryId, taskId);
+ return tsdbQueryTables(pHandle->vnode, &cond, pTableListInfo, queryId, taskId);
_error:
terrno = code;
return NULL;
}
+int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) {
+ int32_t code = TDB_CODE_SUCCESS;
+ char* pCurrent = NULL;
+ int32_t currLength = 0;
+ if (ops->fpSet.encodeResultRow) {
+ if (result == NULL || length == NULL) {
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength);
+
+ if (code != TDB_CODE_SUCCESS) {
+ if (*result != NULL) {
+ taosMemoryFree(*result);
+ *result = NULL;
+ }
+ return code;
+ }
+
+ if (*result == NULL) {
+ *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t));
+ if (*result == NULL) {
+ taosMemoryFree(pCurrent);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ memcpy(*result + sizeof(int32_t), pCurrent, currLength);
+ *(int32_t*)(*result) = currLength + sizeof(int32_t);
+ } else {
+ int32_t sizePre = *(int32_t*)(*result);
+ char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength);
+ if (tmp == NULL) {
+ taosMemoryFree(pCurrent);
+ taosMemoryFree(*result);
+ *result = NULL;
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ *result = tmp;
+ memcpy(*result + sizePre, pCurrent, currLength);
+ *(int32_t*)(*result) += currLength;
+ }
+ taosMemoryFree(pCurrent);
+ *length = *(int32_t*)(*result);
+ }
+
+ for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
+ code = encodeOperator(ops->pDownstream[i], result, length);
+ if (code != TDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+ return TDB_CODE_SUCCESS;
+}
+
+int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) {
+ int32_t code = TDB_CODE_SUCCESS;
+ if (ops->fpSet.decodeResultRow) {
+ if (result == NULL) {
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ ASSERT(length == *(int32_t*)result);
+ char* data = result + sizeof(int32_t);
+ code = ops->fpSet.decodeResultRow(ops, data);
+ if (code != TDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ int32_t totalLength = *(int32_t*)result;
+ int32_t dataLength = *(int32_t*)data;
+
+ if (totalLength == dataLength + sizeof(int32_t)) { // the last data
+ result = NULL;
+ length = 0;
+ } else {
+ result += dataLength;
+ *(int32_t*)(result) = totalLength - dataLength;
+ length = totalLength - dataLength;
+ }
+ }
+
+ for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
+ code = decodeOperator(ops->pDownstream[i], result, length);
+ if (code != TDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+ return TDB_CODE_SUCCESS;
+}
+
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
EOPTR_EXEC_MODEL model) {
uint64_t queryId = pPlan->id.queryId;
@@ -5051,8 +4997,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
- (*pTaskInfo)->pRoot =
- createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo);
+ (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId,
+ &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond);
if (NULL == (*pTaskInfo)->pRoot) {
code = terrno;
goto _complete;
@@ -5111,34 +5057,18 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
taosMemoryFree(pFilter);
}
-static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) {
- if (pTableqinfoGroupInfo->pGroupList != NULL) {
- int32_t numOfGroups = (int32_t)taosArrayGetSize(pTableqinfoGroupInfo->pGroupList);
- for (int32_t i = 0; i < numOfGroups; ++i) {
- SArray* p = taosArrayGetP(pTableqinfoGroupInfo->pGroupList, i);
-
- size_t num = taosArrayGetSize(p);
- for (int32_t j = 0; j < num; ++j) {
- STableQueryInfo* item = taosArrayGetP(p, j);
- destroyTableQueryInfoImpl(item);
- }
-
- taosArrayDestroy(p);
- }
- }
-
- taosArrayDestroy(pTableqinfoGroupInfo->pGroupList);
- taosHashCleanup(pTableqinfoGroupInfo->map);
+static void doDestroyTableList(STableListInfo* pTableqinfoList) {
+ taosArrayDestroy(pTableqinfoList->pTableList);
+ taosHashCleanup(pTableqinfoList->map);
- pTableqinfoGroupInfo->pGroupList = NULL;
- pTableqinfoGroupInfo->map = NULL;
- pTableqinfoGroupInfo->numOfTables = 0;
+ pTableqinfoList->pTableList = NULL;
+ pTableqinfoList->map = NULL;
}
void doDestroyTask(SExecTaskInfo* pTaskInfo) {
qDebug("%s execTask is freed", GET_TASKID(pTaskInfo));
- doDestroyTableQueryInfo(&pTaskInfo->tableqinfoGroupInfo);
+ doDestroyTableList(&pTaskInfo->tableqinfoList);
destroyOperatorInfo(pTaskInfo->pRoot);
// taosArrayDestroy(pTaskInfo->summary.queryProfEvents);
// taosHashCleanup(pTaskInfo->summary.operatorProfResults);
@@ -5222,16 +5152,21 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo
}
}
- (*pRes)[*resNum].numOfRows = operatorInfo->resultInfo.totalRows;
- (*pRes)[*resNum].startupCost = operatorInfo->cost.openCost;
- (*pRes)[*resNum].totalCost = operatorInfo->cost.totalCost;
+ SExplainExecInfo* pInfo = &(*pRes)[*resNum];
+
+ pInfo->numOfRows = operatorInfo->resultInfo.totalRows;
+ pInfo->startupCost = operatorInfo->cost.openCost;
+ pInfo->totalCost = operatorInfo->cost.totalCost;
if (operatorInfo->fpSet.getExplainFn) {
- int32_t code = (*operatorInfo->fpSet.getExplainFn)(operatorInfo, &(*pRes)->verboseInfo);
+ int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pInfo->verboseInfo, &pInfo->verboseLen);
if (code) {
- qError("operator getExplainFn failed, error:%s", tstrerror(code));
+ qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
return code;
}
+ } else {
+ pInfo->verboseLen = 0;
+ pInfo->verboseInfo = NULL;
}
++(*resNum);
@@ -5248,15 +5183,37 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo
return TSDB_CODE_SUCCESS;
}
-int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, size_t keyBufSize, const char* pKey,
- const char* pDir) {
- pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY);
- pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize);
- int32_t pageSize = rowSize * 32;
- int32_t bufSize = pageSize * 4096;
- createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir);
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK);
- ;
- return TSDB_CODE_SUCCESS;
+int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) {
+ pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
+ pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
+ pSup->pResultRows = taosArrayInit(1024, sizeof(SResultWindowInfo));
+ if (pSup->pKeyBuf == NULL || pSup->pResultRows == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ int32_t pageSize = 4096;
+ while (pageSize < pSup->resultRowSize * 4) {
+ pageSize <<= 1u;
+ }
+ // at least four pages need to be in buffer
+ int32_t bufSize = 4096 * 256;
+ if (bufSize <= pageSize) {
+ bufSize = pageSize * 4;
+ }
+ return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH);
+}
+
+int64_t getSmaWaterMark(int64_t interval, double filesFactor) {
+ int64_t waterMark = 0;
+ ASSERT(FLT_GREATEREQUAL(filesFactor,0.000000));
+ waterMark = -1 * filesFactor;
+ return waterMark;
+}
+
+bool isSmaStream(int8_t triggerType) {
+ if (triggerType == STREAM_TRIGGER_AT_ONCE ||
+ triggerType == STREAM_TRIGGER_WINDOW_CLOSE) {
+ return false;
+ }
+ return true;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 484d439dea47e83e99edcbe02dd543399f17f9de..8c3a0c0e6e712ad07a381b3baa709b095ba955fb 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -110,9 +110,11 @@ static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlo
return true;
}
-static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols) {
+static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex) {
SColumnDataAgg* pColAgg = NULL;
+ size_t numOfGroupCols = taosArrayGetSize(pGroupCols);
+
for (int32_t i = 0; i < numOfGroupCols; ++i) {
SColumn* pCol = taosArrayGet(pGroupCols, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pCol->slotId);
@@ -208,7 +210,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (!pInfo->isInit) {
- recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols);
+ recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
pInfo->isInit = true;
num++;
continue;
@@ -223,7 +225,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
// The first row of a new block does not belongs to the previous existed group
if (j == 0) {
num++;
- recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols);
+ recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
continue;
}
@@ -238,7 +240,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
// assign the group keys or user input constant values if required
doAssignGroupKeys(pCtx, pOperator->numOfExprs, pBlock->info.rows, rowIndex);
- recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols);
+ recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
num = 1;
}
@@ -269,25 +271,35 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- if (pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+
+ size_t rows = pRes->info.rows;
+ if (rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) {
+ doSetOperatorCompleted(pOperator);
}
+
+ pOperator->resultInfo.totalRows += rows;
return (pRes->info.rows == 0)? NULL:pRes;
}
- int32_t order = TSDB_ORDER_ASC;
+ int32_t order = TSDB_ORDER_ASC;
+ int32_t scanFlag = MAIN_SCAN;
+
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
+ int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
+ if (code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, code);
+ }
+
// the pDataBlock are always the same one, no need to call this again
- setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
+ setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, scanFlag, true);
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->pScalarExprInfo != NULL) {
@@ -297,7 +309,6 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
}
}
- // setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->binfo.pCtx, pOperator->numOfExprs);
doHashGroupbyAgg(pOperator, pBlock);
}
@@ -309,17 +320,32 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
// updateNumOfRowsInResultRows(pInfo->binfo.pCtx, pOperator->numOfExprs, &pInfo->binfo.resultRowInfo,
// pInfo->binfo.rowCellInfoOffset);
// }
-
+#if 0
+ if(pOperator->fpSet.encodeResultRow){
+ char *result = NULL;
+ int32_t length = 0;
+ pOperator->fpSet.encodeResultRow(pOperator, &result, &length);
+ SAggSupporter* pSup = &pInfo->aggSup;
+ taosHashClear(pSup->pResultRowHashTable);
+ pInfo->binfo.resultRowInfo.size = 0;
+ pOperator->fpSet.decodeResultRow(pOperator, result);
+ if(result){
+ taosMemoryFree(result);
+ }
+ }
+#endif
blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity);
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0);
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
while(1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes, NULL);
bool hasRemain = hashRemainDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
break;
}
@@ -328,11 +354,14 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
}
}
- return (pRes->info.rows == 0)? NULL:pRes;
+ size_t rows = pRes->info.rows;
+ pOperator->resultInfo.totalRows += rows;
+
+ return (rows == 0)? NULL:pRes;
}
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList,
- SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) {
+ SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
SGroupbyOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SGroupbyOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -382,7 +411,7 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
int32_t numOfGroupCols = taosArrayGetSize(pInfo->pGroupCols);
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
- recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols);
+ recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
SDataGroupInfo* pGInfo = NULL;
@@ -425,7 +454,6 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
memcpy(data + (*columnLen), src, varDataTLen(src));
int32_t v = (data + (*columnLen) + varDataTLen(src) - (char*)pPage);
ASSERT(v > 0);
- printf("len:%d\n", v);
contentLen = varDataTLen(src);
}
@@ -476,16 +504,13 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
int32_t *rows = (int32_t*) pPage;
if (*rows >= pInfo->rowCapacity) {
+ // release buffer
+ releaseBufPage(pInfo->pBuf, pPage);
+
// add a new page for current group
int32_t pageId = 0;
pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
taosArrayPush(p->pPageList, &pageId);
-
-// // number of rows
-// *(int32_t*) pPage = 0;
-//
-// uint64_t* groupId = (pPage + sizeof(int32_t));
-// *groupId = 0;
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
}
@@ -538,7 +563,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
// try next group data
pInfo->pGroupIter = taosHashIterate(pInfo->pGroupSet, pInfo->pGroupIter);
if (pInfo->pGroupIter == NULL) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
return NULL;
}
@@ -552,9 +577,12 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
pInfo->pageIndex += 1;
+ releaseBufPage(pInfo->pBuf, page);
blockDataUpdateTsWindow(pInfo->binfo.pRes, 0);
pInfo->binfo.pRes->info.groupId = pGroupInfo->groupId;
+
+ pOperator->resultInfo.totalRows += pInfo->binfo.pRes->info.rows;
return pInfo->binfo.pRes;
}
@@ -571,12 +599,11 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
return buildPartitionResult(pOperator);
}
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -584,6 +611,8 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
doHashPartition(pOperator, pBlock);
}
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
pOperator->status = OP_RES_TO_RETURN;
blockDataEnsureCapacity(pRes, 4096);
return buildPartitionResult(pOperator);
@@ -599,7 +628,7 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
}
SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList,
- SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) {
+ SExecTaskInfo* pTaskInfo) {
SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -614,7 +643,11 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo*
goto _error;
}
- int32_t code = createDiskbasedBuf(&pInfo->pBuf, 4096, 4096 * 256, pTaskInfo->id.str, TD_TMP_DIR_PATH);
+ uint32_t defaultPgsz = 0;
+ uint32_t defaultBufsz = 0;
+ getBufferPgSize(pResultBlock->info.rowSize, &defaultPgsz, &defaultBufsz);
+
+ int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, TD_TMP_DIR_PATH);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -627,13 +660,14 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo*
}
pOperator->name = "PartitionOperator";
- pOperator->blocking = true;
+ pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PARTITION;
pInfo->binfo.pRes = pResultBlock;
- pOperator->numOfExprs = numOfCols;
+ pOperator->numOfExprs = numOfCols;
pOperator->pExpr = pExprInfo;
pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashPartition, NULL, NULL, destroyPartitionOperatorInfo,
NULL, NULL, NULL);
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index d7d6d963463bb400f940119d2192b63ddb7de16a..ad9e4d63f0a7475e990fe9f161d419458a5a9cf8 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -98,9 +98,7 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
// todo extract method
if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
SOperatorInfo* ds1 = pOperator->pDownstream[0];
- publishOperatorProfEvent(ds1, QUERY_PROF_BEFORE_OPERATOR_EXEC);
pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1);
- publishOperatorProfEvent(ds1, QUERY_PROF_AFTER_OPERATOR_EXEC);
pJoinInfo->leftPos = 0;
if (pJoinInfo->pLeft == NULL) {
@@ -111,9 +109,7 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
SOperatorInfo* ds2 = pOperator->pDownstream[1];
- publishOperatorProfEvent(ds2, QUERY_PROF_BEFORE_OPERATOR_EXEC);
pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2);
- publishOperatorProfEvent(ds2, QUERY_PROF_AFTER_OPERATOR_EXEC);
pJoinInfo->rightPos = 0;
if (pJoinInfo->pRight == NULL) {
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index d0eb0ae8dad4a182413af4818bd0ece474e9d238..348d85943e9ce306347a5530617131b6e00cf89a 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#include "function.h"
#include "filter.h"
+#include "function.h"
#include "functionMgt.h"
#include "os.h"
#include "querynodes.h"
@@ -142,7 +142,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
return true;
}
- while(1) {
+ while (1) {
getNextTimeWindow(pInterval, &w, order);
if (w.ekey < pBlockInfo->window.skey) {
break;
@@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
return false;
}
-static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock);
+static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock);
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
@@ -190,7 +190,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
pCost->skipBlocks += 1;
// clear all data in pBlock that are set when handing the previous block
- for(int32_t i = 0; i < pBlockInfo->numOfCols; ++i) {
+ for (int32_t i = 0; i < pBlockInfo->numOfCols; ++i) {
SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i);
pcol->pData = NULL;
}
@@ -250,12 +250,15 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
// currently only the tbname pseudo column
if (pTableScanInfo->numOfPseudoExpr > 0) {
- addTagPseudoColumnData(pTableScanInfo, pBlock);
+ addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock);
}
- // todo record the filter time cost
+ int64_t st = taosGetTimestampMs();
doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
+ int64_t et = taosGetTimestampMs();
+ pTableScanInfo->readRecorder.filterTime += (et - st);
+
if (pBlock->info.rows == 0) {
pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
@@ -271,54 +274,62 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction
switchCtxOrder(pCtx, numOfOutput);
// setupQueryRangeForReverseScan(pTableScanInfo);
- STimeWindow* pTWindow = &pTableScanInfo->cond.twindow;
- TSWAP(pTWindow->skey, pTWindow->ekey);
pTableScanInfo->cond.order = TSDB_ORDER_DESC;
+ for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) {
+ STimeWindow* pTWindow = &pTableScanInfo->cond.twindows[i];
+ TSWAP(pTWindow->skey, pTWindow->ekey);
+ }
+ SQueryTableDataCond *pCond = &pTableScanInfo->cond;
+ taosqsort(pCond->twindows,
+ pCond->numOfTWindows,
+ sizeof(STimeWindow),
+ pCond,
+ compareTimeWindow);
}
-void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) {
+void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) {
// currently only the tbname pseudo column
- if (pTableScanInfo->numOfPseudoExpr == 0) {
+ if (numOfPseudoExpr == 0) {
return;
}
SMetaReader mr = {0};
- metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0);
+ metaReaderInit(&mr, pHandle->meta, 0);
metaGetTableEntryByUid(&mr, pBlock->info.uid);
- for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) {
- SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j];
+ for (int32_t j = 0; j < numOfPseudoExpr; ++j) {
+ SExprInfo* pExpr = &pPseudoExpr[j];
int32_t dstSlotId = pExpr->base.resSchema.slotId;
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+
colInfoDataEnsureCapacity(pColInfoData, 0, pBlock->info.rows);
+ colInfoDataCleanup(pColInfoData, pBlock->info.rows);
int32_t functionId = pExpr->pExpr->_function.functionId;
// this is to handle the tbname
if (fmIsScanPseudoColumnFunc(functionId)) {
- setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
+ setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId);
} else { // these are tags
- const char* p = NULL;
- if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
- const uint8_t *tmp = mr.me.ctbEntry.pTags;
- char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
- if(data == NULL){
- qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
- return;
- }
- *data = TSDB_DATA_TYPE_JSON;
- memcpy(data+1, tmp, kvRowLen(tmp));
- p = data;
- }else{
- p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
+ STagVal tagVal = {0};
+ tagVal.cid = pExpr->base.pParam[0].pCol->colId;
+ const char *p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal);
+
+ char *data = NULL;
+ if(pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL){
+ data = tTagValToData((const STagVal *)p, false);
+ }else {
+ data = (char*)p;
}
+
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
- colDataAppend(pColInfoData, i, p, (p == NULL));
+ colDataAppend(pColInfoData, i, data, (data == NULL));
}
- if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
- taosMemoryFree((void*)p);
+ if (data && (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) && p != NULL &&
+ IS_VAR_DATA_TYPE(((const STagVal*)p)->type)) {
+ taosMemoryFree(data);
}
}
}
@@ -335,9 +346,8 @@ void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* p
infoData.info.bytes = sizeof(uint64_t);
colInfoDataEnsureCapacity(&infoData, 0, 1);
- colDataAppendInt64(&infoData, 0, (int64_t*) &pBlock->info.uid);
- SScalarParam srcParam = {
- .numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData};
+ colDataAppendInt64(&infoData, 0, (int64_t*)&pBlock->info.uid);
+ SScalarParam srcParam = {.numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData};
SScalarParam param = {.columnData = pColInfoData};
fpSet.process(&srcParam, 1, ¶m);
@@ -347,6 +357,8 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
STableScanInfo* pTableScanInfo = pOperator->info;
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
+ int64_t st = taosGetTimestampUs();
+
while (tsdbNextDataBlock(pTableScanInfo->dataReader)) {
if (isTaskKilled(pOperator->pTaskInfo)) {
longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
@@ -366,9 +378,12 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
continue;
}
+ pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
+ pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
+
+ pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime;
return pBlock;
}
-
return NULL;
}
@@ -383,9 +398,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
// do the ascending order traverse in the first place.
while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
- SSDataBlock* p = doTableScanImpl(pOperator);
- if (p != NULL) {
- return p;
+ while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) {
+ SSDataBlock* p = doTableScanImpl(pOperator);
+ if (p != NULL) {
+ return p;
+ }
+ pTableScanInfo->curTWinIdx += 1;
+ if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) {
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx);
+ }
}
pTableScanInfo->scanTimes += 1;
@@ -393,13 +414,14 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
-
- STimeWindow* pWin = &pTableScanInfo->cond.twindow;
- qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64
- "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
-
+ qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
+ for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) {
+ STimeWindow* pWin = &pTableScanInfo->cond.twindows[i];
+ qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
+ }
// do prepare for the next round table scan operation
- tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
+ pTableScanInfo->curTWinIdx = 0;
}
}
@@ -407,31 +429,40 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < total) {
if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) {
prepareForDescendingScan(pTableScanInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput);
- tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
+ pTableScanInfo->curTWinIdx = 0;
}
- STimeWindow* pWin = &pTableScanInfo->cond.twindow;
- qDebug("%s start to descending order scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
- GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
-
+ qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
+ for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) {
+ STimeWindow* pWin = &pTableScanInfo->cond.twindows[i];
+ qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
+ }
while (pTableScanInfo->scanTimes < total) {
- SSDataBlock* p = doTableScanImpl(pOperator);
- if (p != NULL) {
- return p;
+ while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) {
+ SSDataBlock* p = doTableScanImpl(pOperator);
+ if (p != NULL) {
+ return p;
+ }
+ pTableScanInfo->curTWinIdx += 1;
+ if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) {
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx);
+ }
}
pTableScanInfo->scanTimes += 1;
- if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
+ if (pTableScanInfo->scanTimes < total) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
- qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64
- "-%" PRId64,
- GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey);
-
- // do prepare for the next round table scan operation
- tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
+ for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) {
+ STimeWindow* pWin = &pTableScanInfo->cond.twindows[i];
+ qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
+ }
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
+ pTableScanInfo->curTWinIdx = 0;
}
}
}
@@ -452,6 +483,15 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
return interval;
}
+static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
+ SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder));
+ STableScanInfo* pTableScanInfo = pOptr->info;
+ *pRecorder = pTableScanInfo->readRecorder;
+ *pOptrExplain = pRecorder;
+ *len = sizeof(SFileBlockLoadRecorder);
+ return 0;
+}
+
static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
taosMemoryFree(pTableScanInfo->pResBlock);
@@ -462,7 +502,8 @@ static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
}
}
-SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader,
+ SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -476,7 +517,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
- SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
+ SArray* pColList =
+ extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
@@ -485,38 +527,36 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
- pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowCellInfoOffset);
+ pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowCellInfoOffset);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
-// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
+// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
- pInfo->readHandle = *readHandle;
- pInfo->interval = extractIntervalInfo(pTableScanNode);
- pInfo->sampleRatio = pTableScanNode->ratio;
+ pInfo->readHandle = *readHandle;
+ pInfo->interval = extractIntervalInfo(pTableScanNode);
+ pInfo->sampleRatio = pTableScanNode->ratio;
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
- pInfo->pResBlock = createResDataBlock(pDescNode);
- pInfo->pFilterNode = pTableScanNode->scan.node.pConditions;
- pInfo->dataReader = pDataReader;
- pInfo->scanFlag = MAIN_SCAN;
- pInfo->pColMatchInfo = pColList;
+ pInfo->pResBlock = createResDataBlock(pDescNode);
+ pInfo->pFilterNode = pTableScanNode->scan.node.pConditions;
+ pInfo->dataReader = pDataReader;
+ pInfo->scanFlag = MAIN_SCAN;
+ pInfo->pColMatchInfo = pColList;
+ pInfo->curTWinIdx = 0;
- pOperator->name = "TableScanOperator"; // for debug purpose
+ pOperator->name = "TableScanOperator"; // for debug purpose
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->info = pInfo;
- pOperator->numOfExprs = numOfCols;
- pOperator->pTaskInfo = pTaskInfo;
-
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, destroyTableScanOperatorInfo, NULL, NULL, NULL);
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->numOfExprs = numOfCols;
+ pOperator->pTaskInfo = pTaskInfo;
- static int32_t cost = 0;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, destroyTableScanOperatorInfo,
+ NULL, NULL, getTableScannerExecInfo);
// for non-blocking operator, the open cost is always 0
pOperator->cost.openCost = 0;
- pOperator->cost.totalCost = ++cost;
- pOperator->resultInfo.totalRows = ++cost;
return pOperator;
}
@@ -631,20 +671,35 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) {
taosArrayClear(pInfo->pBlockLists);
}
+static bool isSessionWindow(SStreamBlockScanInfo* pInfo) { return pInfo->sessionSup.pStreamAggSup != NULL; }
+
static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
SSDataBlock* pSDB = pInfo->pUpdateRes;
if (pInfo->updateResIndex < pSDB->info.rows) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, 0);
- TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
- SResultRowInfo dumyInfo;
+ TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
+ SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval,
- pInfo->interval.precision, NULL);
+ STimeWindow win;
+ if (isSessionWindow(pInfo)) {
+ SStreamAggSupporter* pAggSup = pInfo->sessionSup.pStreamAggSup;
+ int64_t gap = pInfo->sessionSup.gap;
+ int32_t winIndex = 0;
+ SResultWindowInfo* pCurWin =
+ getSessionTimeWindow(pAggSup->pResultRows, tsCols[pInfo->updateResIndex], gap, &winIndex);
+ win = pCurWin->win;
+ pInfo->updateResIndex +=
+ updateSessionWindowInfo(pCurWin, tsCols, pSDB->info.rows, pInfo->updateResIndex, gap, NULL);
+ } else {
+ win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval,
+ pInfo->interval.precision, NULL);
+ pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex, win.ekey,
+ binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ }
STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info;
- pTableScanInfo->cond.twindow = win;
- tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
- pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex,
- win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ pTableScanInfo->cond.twindows[0] = win;
+ pTableScanInfo->curTWinIdx = 0;
+ tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
pTableScanInfo->scanTimes = 0;
return true;
} else {
@@ -679,8 +734,8 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti
// p->info.type = STREAM_INVERT;
// taosArrayClear(pInfo->tsArray);
// return p;
- SSDataBlock* pDataBlock = createOneDataBlock(pInfo->pRes, false);
- SColumnInfoData* pCol = (SColumnInfoData*) taosArrayGet(pDataBlock->pDataBlock, 0);
+ SSDataBlock* pDataBlock = createOneDataBlock(pInfo->pRes, false);
+ SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, 0);
ASSERT(pCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
colInfoDataEnsureCapacity(pCol, 0, size);
for (int32_t i = 0; i < size; i++) {
@@ -696,96 +751,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti
return NULL;
}
-void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) {
- int64_t* pKey = (int64_t*)pSup->pKeyBuf;
- pKey[0] = groupId;
- pKey[1] = childId;
- pKey[2] = ts;
-}
-
-static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup,
- int32_t pageId, int32_t tsIndex, int64_t childId) {
- SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex);
- TSKEY* tsCols = (int64_t*)pColDataInfo->pData;
- for (int32_t i = 0; i < pDataBlock->info.rows; i++) {
- setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]);
- SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable,
- pSup->pKeyBuf, pSup->keySize);
- if (p1 == NULL) {
- SWindowPosition pos = {.pageId = pageId, .rowId = i};
- int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos,
- sizeof(SWindowPosition));
- if (code != TSDB_CODE_SUCCESS ) {
- return code;
- }
- } else {
- p1->pageId = pageId;
- p1->rowId = i;
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup,
- int32_t tsIndex, int64_t childId) {
- int32_t start = 0;
- int32_t stop = 0;
- int32_t pageSize = getBufPageSize(pSup->pDataBuf);
- while(start < pDataBlock->info.rows) {
- blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize);
- SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1);
- if (pDB == NULL) {
- return terrno;
- }
- int32_t pageId = -1;
- void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId);
- if (pPage == NULL) {
- blockDataDestroy(pDB);
- return terrno;
- }
- int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t);
- assert(size <= pageSize);
- blockDataToBuf(pPage, pDB);
- setBufPageDirty(pPage, true);
- releaseBufPage(pSup->pDataBuf, pPage);
- blockDataDestroy(pDB);
- start = stop + 1;
- int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId);
- if (code != TSDB_CODE_SUCCESS ) {
- return code;
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) {
- SSDataBlock* pBlock = pInfo->pUpdateRes;
- if (pInfo->updateResIndex < pBlock->info.rows) {
- blockDataCleanup(pInfo->pRes);
- SCatchSupporter* pCSup = &pInfo->childAggSup;
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0);
- TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
- int32_t size = taosArrayGetSize(pInfo->childIds);
- for (int32_t i = 0; i < size; i++) {
- int64_t id = *(int64_t *)taosArrayGet(pInfo->childIds, i);
- setSupKeyBuf(pCSup, pBlock->info.groupId, id,
- tsCols[pInfo->updateResIndex]);
- SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable,
- pCSup->pKeyBuf, pCSup->keySize);
- void* buf = getBufPage(pCSup->pDataBuf, pos->pageId);
- SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false);
- blockDataFromBuf(pDB, buf);
- SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1);
- blockDataMerge(pInfo->pRes, pSub, NULL);
- blockDataDestroy(pDB);
- blockDataDestroy(pSub);
- }
- pInfo->updateResIndex++;
- return pInfo->pRes;
- }
- return NULL;
-}
-
static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -799,15 +764,6 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
size_t total = taosArrayGetSize(pInfo->pBlockLists);
if (pInfo->blockType == STREAM_DATA_TYPE_SSDATA_BLOCK) {
- if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) {
- SSDataBlock* pDB = getDataFromCatch(pInfo);
- if (pDB != NULL) {
- return pDB;
- } else {
- pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- }
- }
-
if (pInfo->validBlockIndex >= total) {
doClearBufferedBlocks(pInfo);
pOperator->status = OP_EXEC_DONE;
@@ -815,17 +771,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
int32_t current = pInfo->validBlockIndex++;
- SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
- if (pBlock->info.type == STREAM_REPROCESS) {
- pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
- } else {
- int32_t code = catchDatablock(pBlock, &pInfo->childAggSup, pInfo->primaryTsIndex, 0);
- if (code != TDB_CODE_SUCCESS) {
- pTaskInfo->code = code;
- longjmp(pTaskInfo->env, code);
- }
- }
- return pBlock;
+ return taosArrayGetP(pInfo->pBlockLists, current);
} else {
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
blockDataDestroy(pInfo->pUpdateRes);
@@ -834,6 +780,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
} else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) {
blockDataCleanup(pInfo->pRes);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
+ prepareDataScan(pInfo);
return pInfo->pUpdateRes;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
SSDataBlock* pSDB = doDataScan(pInfo);
@@ -866,8 +813,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
pInfo->pRes->info.uid = uid;
pInfo->pRes->info.type = STREAM_NORMAL;
- int32_t numOfCols = pInfo->pRes->info.numOfCols;
- for (int32_t i = 0; i < numOfCols; ++i) {
+ // for generating rollup SMA result, each time is an independent time serie.
+ // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this
+ if (pInfo->assignBlockUid) {
+ pInfo->pRes->info.groupId = uid;
+ } else {
+ pInfo->pRes->info.groupId = groupId;
+ }
+
+ for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) {
SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i);
if (!pColMatchInfo->output) {
continue;
@@ -897,26 +851,31 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
pTaskInfo->code = terrno;
return NULL;
}
+
rows = pBlockInfo->rows;
+
+ // currently only the tbname pseudo column
+ if (pInfo->numOfPseudoExpr > 0) {
+ addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes);
+ }
+
doFilter(pInfo->pCondition, pInfo->pRes, NULL);
blockDataUpdateTsWindow(pInfo->pRes, 0);
-
break;
}
// record the scan action.
pInfo->numOfExec++;
- pInfo->numOfRows += pBlockInfo->rows;
+ pOperator->resultInfo.totalRows += pBlockInfo->rows;
if (rows == 0) {
pOperator->status = OP_EXEC_DONE;
- } else if (pInfo->interval.interval > 0) {
- SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); //TODO(liuyao) get invertible from plan
+ } else if (pInfo->pUpdateInfo) {
+ SSDataBlock* upRes = getUpdateDataBlock(pInfo, true);
if (upRes) {
pInfo->pUpdateRes = upRes;
if (upRes->info.type == STREAM_REPROCESS) {
pInfo->updateResIndex = 0;
- prepareDataScan(pInfo);
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (upRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
@@ -929,10 +888,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
}
-SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle,
- uint64_t uid, SSDataBlock* pResBlock, SArray* pColList,
- SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition,
- SOperatorInfo* pOperatorDumy) {
+SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
+ SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
+ STimeWindowAggSupp* pTwSup) {
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -940,22 +898,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR
goto _error;
}
- STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info;
+ SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan;
+
+ SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
+ SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
- int32_t numOfOutput = taosArrayGetSize(pColList);
+ STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info;
+
+ int32_t numOfCols = 0;
+ pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
- SArray* pColIds = taosArrayInit(4, sizeof(int16_t));
+ int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo);
+ SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t));
for (int32_t i = 0; i < numOfOutput; ++i) {
- SColMatchInfo* id = taosArrayGet(pColList, i);
+ SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i);
+
int16_t colId = id->colId;
taosArrayPush(pColIds, &colId);
}
- pInfo->pColMatchInfo = pColList;
-
// set the extract column id to streamHandle
- tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds);
- int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList);
+ tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds);
+ int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList);
if (code != 0) {
goto _error;
}
@@ -971,37 +935,43 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR
goto _error;
}
- pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan
- if (pSTInfo->interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan
+ if (isSmaStream(pTableScanNode->triggerType)) {
+ pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval,
+ pTableScanNode->filesFactor);
+ }
+ pInfo->primaryTsIndex = 0; // pTableScanNode->tsColId;
+ if (pSTInfo->interval.interval > 0 && pDataReader) {
+ pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark);
} else {
pInfo->pUpdateInfo = NULL;
}
- pInfo->readHandle = *pHandle;
- pInfo->tableUid = uid;
- pInfo->streamBlockReader = streamReadHandle;
- pInfo->pRes = pResBlock;
- pInfo->pCondition = pCondition;
- pInfo->pDataReader = pDataReader;
- pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- pInfo->pOperatorDumy = pOperatorDumy;
- pInfo->interval = pSTInfo->interval;
-
- size_t childKeyBufSize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY);
- initCatchSupporter(&pInfo->childAggSup, 1024, childKeyBufSize,
- "StreamFinalInterval", TD_TMP_DIR_PATH); // TODO(liuyao) get row size from phy plan
-
- pOperator->name = "StreamBlockScanOperator";
+ // create the pseduo columns info
+ if (pTableScanNode->scan.pScanPseudoCols != NULL) {
+ pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
+ }
+
+ pInfo->readHandle = *pHandle;
+ pInfo->tableUid = pScanPhyNode->uid;
+ pInfo->streamBlockReader = pHandle->reader;
+ pInfo->pRes = createResDataBlock(pDescNode);
+ pInfo->pCondition = pScanPhyNode->node.pConditions;
+ pInfo->pDataReader = pDataReader;
+ pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
+ pInfo->pOperatorDumy = pTableScanDummy;
+ pInfo->interval = pSTInfo->interval;
+ pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
+
+ pOperator->name = "StreamBlockScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->info = pInfo;
- pOperator->numOfExprs = pResBlock->info.numOfCols;
- pOperator->pTaskInfo = pTaskInfo;
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->numOfExprs = pInfo->pRes->info.numOfCols;
+ pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL);
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL,
+ NULL, operatorDummyCloseFn, NULL, NULL, NULL);
return pOperator;
@@ -1017,8 +987,9 @@ static void destroySysScanOperator(void* param, int32_t numOfOutput) {
blockDataDestroy(pInfo->pRes);
const char* name = tNameGetTableName(&pInfo->name);
- if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
+ if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
metaCloseTbCursor(pInfo->pCur);
+ pInfo->pCur = NULL;
}
taosArrayDestroy(pInfo->scanCols);
@@ -1189,18 +1160,18 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// retrieve local table list info from vnode
const char* name = tNameGetTableName(&pInfo->name);
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
// the retrieve is executed on the mnode, so return tables that belongs to the information schema database.
if (pInfo->readHandle.mnd != NULL) {
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
-
buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity);
doFilterResult(pInfo);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
} else {
if (pInfo->pCur == NULL) {
@@ -1226,7 +1197,9 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(p, pOperator->resultInfo.capacity);
char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- while (metaTbCursorNext(pInfo->pCur) == 0) {
+
+ int32_t ret = 0;
+ while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
// table name
@@ -1260,7 +1233,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
- colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schema.nCols, false);
+ colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false);
// super table name
STR_TO_VARSTR(str, mr.me.name);
@@ -1284,7 +1257,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
- colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schema.nCols, false);
+ colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false);
// super table name
pColInfoData = taosArrayGet(p->pDataBlock, 4);
@@ -1309,6 +1282,13 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
}
+ // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
+ if (ret != 0) {
+ metaCloseTbCursor(pInfo->pCur);
+ pInfo->pCur = NULL;
+ doSetOperatorCompleted(pOperator);
+ }
+
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
@@ -1580,20 +1560,19 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
SExprInfo* pExprInfo = &pOperator->pExpr[0];
SSDataBlock* pRes = pInfo->pRes;
- if (taosArrayGetSize(pInfo->pTableGroups->pGroupList) == 0) {
+ int32_t size = taosArrayGetSize(pInfo->pTableList->pTableList);
+ if (size == 0) {
setTaskStatus(pTaskInfo, TASK_COMPLETED);
return NULL;
}
- SArray* pa = taosArrayGetP(pInfo->pTableGroups->pGroupList, 0);
-
char str[512] = {0};
int32_t count = 0;
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
- while (pInfo->curPos < pInfo->pTableGroups->numOfTables && count < pOperator->resultInfo.capacity) {
- STableKeyInfo* item = taosArrayGet(pa, pInfo->curPos);
+ while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) {
+ STableKeyInfo* item = taosArrayGet(pInfo->pTableList->pTableList, pInfo->curPos);
metaGetTableEntryByUid(&mr, item->uid);
for (int32_t j = 0; j < pOperator->numOfExprs; ++j) {
@@ -1603,28 +1582,29 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) {
STR_TO_VARSTR(str, mr.me.name);
colDataAppend(pDst, count, str, false);
- } else { // it is a tag value
- if(pDst->info.type == TSDB_DATA_TYPE_JSON){
- const uint8_t *tmp = mr.me.ctbEntry.pTags;
- char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
- if(data == NULL){
- qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
- return NULL;
- }
- *data = TSDB_DATA_TYPE_JSON;
- memcpy(data+1, tmp, kvRowLen(tmp));
- colDataAppend(pDst, count, data, false);
+ } else { // it is a tag value
+ STagVal val = {0};
+ val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
+ const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val);
+
+ char *data = NULL;
+ if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL){
+ data = tTagValToData((const STagVal *)p, false);
+ }else {
+ data = (char*)p;
+ }
+ colDataAppend(pDst, count, data, (data == NULL));
+
+ if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL
+ && IS_VAR_DATA_TYPE(((const STagVal *)p)->type) && data != NULL){
taosMemoryFree(data);
- }else{
- const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId);
- colDataAppend(pDst, count, p, (p == NULL));
}
}
}
count += 1;
- if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) {
- pOperator->status = OP_EXEC_DONE;
+ if (++pInfo->curPos >= size) {
+ doSetOperatorCompleted(pOperator);
}
}
@@ -1636,6 +1616,8 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
}
pRes->info.rows = count;
+ pOperator->resultInfo.totalRows += count;
+
return (pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
@@ -1645,15 +1627,15 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
}
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput,
- SSDataBlock* pResBlock, SArray* pColMatchInfo,
- STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) {
+ SSDataBlock* pResBlock, SArray* pColMatchInfo, STableListInfo* pTableListInfo,
+ SExecTaskInfo* pTaskInfo) {
STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
- pInfo->pTableGroups = pTableGroupInfo;
+ pInfo->pTableList = pTableListInfo;
pInfo->pColMatchInfo = pColMatchInfo;
pInfo->pRes = pResBlock;
pInfo->readHandle = *pReadHandle;
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 990dc0f20002134eebc0cbe15a0fc4d0e34e6dc8..8f5fa88070fde1625385fd6e691ccccf424c2094 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -2,6 +2,9 @@
#include "executorimpl.h"
static SSDataBlock* doSort(SOperatorInfo* pOperator);
+static int32_t doOpenSortOperator(SOperatorInfo* pOperator);
+static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
+
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
@@ -35,7 +38,7 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, NULL);
+ createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, getExplainExecInfo);
int32_t code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@@ -121,20 +124,17 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) {
}
}
-SSDataBlock* doSort(SOperatorInfo* pOperator) {
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
-
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
SSortOperatorInfo* pInfo = pOperator->info;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- if (pOperator->status == OP_RES_TO_RETURN) {
- return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
+ if (OPTR_IS_OPENED(pOperator)) {
+ return TSDB_CODE_SUCCESS;
}
-// pInfo->binfo.pRes is not equalled to the input datablock.
-// int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
+ pInfo->startTs = taosGetTimestampUs();
+
+ // pInfo->binfo.pRes is not equalled to the input datablock.
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT,
-1, -1, NULL, pTaskInfo->id.str);
@@ -146,12 +146,39 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
taosMemoryFreeClear(ps);
+
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, terrno);
}
+ pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs)/1000.0;
pOperator->status = OP_RES_TO_RETURN;
- return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
+
+ OPTR_SET_OPENED(pOperator);
+ return TSDB_CODE_SUCCESS;
+}
+
+SSDataBlock* doSort(SOperatorInfo* pOperator) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SSortOperatorInfo* pInfo = pOperator->info;
+
+ int32_t code = pOperator->fpSet._openFn(pOperator);
+ if (code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, code);
+ }
+
+ SSDataBlock* pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
+
+ if (pBlock != NULL) {
+ pOperator->resultInfo.totalRows += pBlock->info.rows;
+ } else {
+ doSetOperatorCompleted(pOperator);
+ }
+ return pBlock;
}
void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
@@ -161,3 +188,15 @@ void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
taosArrayDestroy(pInfo->pSortInfo);
taosArrayDestroy(pInfo->pColMatchInfo);
}
+
+int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
+ ASSERT(pOptr != NULL);
+ SSortExecInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortExecInfo));
+
+ SSortOperatorInfo *pOperatorInfo = (SSortOperatorInfo*)pOptr->info;
+
+ *pInfo = tsortGetSortExecInfo(pOperatorInfo->pSortHandle);
+ *pOptrExplain = pInfo;
+ *len = sizeof(SSortExecInfo);
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 9e073ec05bdc31c392b91bfa13d857e3a65465f4..41037e9f163ff476e6ba583d316357ade84e773a 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -1,3 +1,4 @@
+#include "function.h"
#include "executorimpl.h"
#include "functionMgt.h"
#include "tdatablock.h"
@@ -9,6 +10,12 @@ typedef enum SResultTsInterpType {
} SResultTsInterpType;
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator);
+static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator);
+
+static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
+
+static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult);
+static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult);
/*
* There are two cases to handle:
@@ -20,47 +27,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator);
* is a previous result generated or not.
*/
static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) {
- // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo;
- // if (pResultRowInfo->curPos != -1) {
- // return;
- // }
-
- // pTableQueryInfo->win.skey = key;
- // STimeWindow win = {.skey = key, .ekey = pQRange->ekey};
-
- /**
- * In handling the both ascending and descending order super table query, we need to find the first qualified
- * timestamp of this table, and then set the first qualified start timestamp.
- * In ascending query, the key is the first qualified timestamp. However, in the descending order query, additional
- * operations involve.
- */
- // STimeWindow w = TSWINDOW_INITIALIZER;
- //
- // TSKEY sk = TMIN(win.skey, win.ekey);
- // TSKEY ek = TMAX(win.skey, win.ekey);
- // getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w);
-
- // if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) {
- // if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
- // assert(win.ekey == pQueryAttr->window.ekey);
- // }
- //
- // pResultRowInfo->prevSKey = w.skey;
- // }
-
- // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
+ // do nothing
}
-static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols, int32_t rows, bool ascQuery) {
- TSKEY ts = TSKEY_INITIAL_VAL;
- if (tsCols == NULL) {
- ts = ascQuery ? win->skey : win->ekey;
- } else {
-// int32_t offset = ascQuery ? 0 : rows - 1;
- ts = tsCols[0];
- }
-
- return ts;
+static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) {
+ return tsCols == NULL? win->skey:tsCols[0];
}
static void getInitialStartTimeWindow(SInterval* pInterval, int32_t precision, TSKEY ts, STimeWindow* w,
@@ -133,8 +104,10 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo
// set time window for current result
pResultRow->win = (*win);
+
*pResult = pResultRow;
setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowCellInfoOffset);
+
return TSDB_CODE_SUCCESS;
}
@@ -162,38 +135,38 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL
static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey,
int16_t pos, int16_t order, int64_t* pData) {
- int32_t forwardStep = 0;
+ int32_t forwardRows = 0;
if (order == TSDB_ORDER_ASC) {
int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order);
if (end >= 0) {
- forwardStep = end;
+ forwardRows = end;
if (pData[end + pos] == ekey) {
- forwardStep += 1;
+ forwardRows += 1;
}
}
} else {
int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order);
if (end >= 0) {
- forwardStep = end;
+ forwardRows = end;
if (pData[end + pos] == ekey) {
- forwardStep += 1;
+ forwardRows += 1;
}
}
// int32_t end = searchFn((char*)pData, pos + 1, ekey, order);
// if (end >= 0) {
-// forwardStep = pos - end;
+// forwardRows = pos - end;
//
// if (pData[end] == ekey) {
-// forwardStep += 1;
+// forwardRows += 1;
// }
// }
}
- assert(forwardStep >= 0);
- return forwardStep;
+ assert(forwardRows >= 0);
+ return forwardRows;
}
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
@@ -338,34 +311,40 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o
tw->ekey -= 1;
}
-void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SArray* pDataBlock, TSKEY prevTs,
+void doTimeWindowInterpolation(SIntervalAggOperatorInfo *pInfo, int32_t numOfExprs, SArray* pDataBlock, TSKEY prevTs,
int32_t prevRowIndex, TSKEY curTs, int32_t curRowIndex, TSKEY windowKey, int32_t type) {
- SExprInfo* pExpr = pOperator->pExpr;
+ SqlFunctionCtx* pCtx = pInfo->binfo.pCtx;
- SqlFunctionCtx* pCtx = pInfo->pCtx;
+ int32_t index = 1;
+ for (int32_t k = 0; k < numOfExprs; ++k) {
- for (int32_t k = 0; k < pOperator->numOfExprs; ++k) {
- int32_t functionId = pCtx[k].functionId;
- if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) {
+ // todo use flag instead of function name
+ if (strcmp(pCtx[k].pExpr->pExpr->_function.functionName, "twa") != 0) {
pCtx[k].start.key = INT64_MIN;
continue;
}
- SColIndex* pColIndex = NULL /*&pExpr[k].base.colInfo*/;
- int16_t index = pColIndex->colIndex;
- SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, index);
+// if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) {
+// pCtx[k].start.key = INT64_MIN;
+// continue;
+// }
- // assert(pColInfo->info.colId == pColIndex->info.colId && curTs != windowKey);
- double v1 = 0, v2 = 0, v = 0;
+ SFunctParam* pParam = &pCtx[k].param[0];
+ SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, pParam->pCol->slotId);
+ ASSERT(pColInfo->info.colId == pParam->pCol->colId && curTs != windowKey);
+
+ double v1 = 0, v2 = 0, v = 0;
if (prevRowIndex == -1) {
- // GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pRuntimeEnv->prevRow[index]);
+ SGroupKeys* p = taosArrayGet(pInfo->pPrevValues, index);
+ GET_TYPED_DATA(v1, double, pColInfo->info.type, p->pData);
} else {
- GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pColInfo->pData + prevRowIndex * pColInfo->info.bytes);
+ GET_TYPED_DATA(v1, double, pColInfo->info.type, colDataGetData(pColInfo, prevRowIndex));
}
- GET_TYPED_DATA(v2, double, pColInfo->info.type, (char*)pColInfo->pData + curRowIndex * pColInfo->info.bytes);
+ GET_TYPED_DATA(v2, double, pColInfo->info.type, colDataGetData(pColInfo, curRowIndex));
+#if 0
if (functionId == FUNCTION_INTERP) {
if (type == RESULT_ROW_START_INTERP) {
pCtx[k].start.key = prevTs;
@@ -385,6 +364,8 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
}
}
} else if (functionId == FUNCTION_TWA) {
+#endif
+
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
SPoint point2 = (SPoint){.key = curTs, .val = &v2};
SPoint point = (SPoint){.key = windowKey, .val = &v};
@@ -398,8 +379,13 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = point.key;
pCtx[k].end.val = v;
}
+
+ index += 1;
}
+#if 0
}
+#endif
+
}
static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t type) {
@@ -414,62 +400,59 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in
}
}
-static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t pos,
- int32_t numOfRows, SArray* pDataBlock, const TSKEY* tsCols,
- STimeWindow* win) {
- bool ascQuery = true;
+static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t pos,
+ SSDataBlock* pBlock, const TSKEY* tsCols, STimeWindow* win) {
+ bool ascQuery = (pInfo->order == TSDB_ORDER_ASC);
+
TSKEY curTs = tsCols[pos];
- TSKEY lastTs = 0; //*(TSKEY*)pRuntimeEnv->prevRow[0];
+
+ SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0);
+ TSKEY lastTs = *(int64_t*) pTsKey->pData;
// lastTs == INT64_MIN and pos == 0 means this is the first time window, interpolation is not needed.
// start exactly from this point, no need to do interpolation
TSKEY key = ascQuery ? win->skey : win->ekey;
if (key == curTs) {
- setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP);
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP);
return true;
}
- if (lastTs == INT64_MIN && ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery))) {
- setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP);
- return true;
+ // it is the first time window, no need to do interpolation
+ if (pTsKey->isNull && pos == 0) {
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP);
+ } else {
+ TSKEY prevTs = ((pos == 0) ? lastTs : tsCols[pos - 1]);
+ doTimeWindowInterpolation(pInfo, numOfExprs, pBlock->pDataBlock, prevTs, pos - 1, curTs, pos, key,
+ RESULT_ROW_START_INTERP);
}
- int32_t step = 1; // GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
- TSKEY prevTs = ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery)) ? lastTs : tsCols[pos - step];
-
- doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, prevTs, pos - step, curTs, pos, key,
- RESULT_ROW_START_INTERP);
return true;
}
-static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t endRowIndex,
- SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey,
- STimeWindow* win) {
- int32_t order = TSDB_ORDER_ASC;
- int32_t numOfOutput = pOperatorInfo->numOfExprs;
+static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t endRowIndex,
+ SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, STimeWindow* win) {
+ int32_t order = pInfo->order;
TSKEY actualEndKey = tsCols[endRowIndex];
- TSKEY key = order ? win->ekey : win->skey;
+ TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey;
// not ended in current data block, do not invoke interpolation
- if ((key > blockEkey /*&& QUERY_IS_ASC_QUERY(pQueryAttr)*/) ||
- (key < blockEkey /*&& !QUERY_IS_ASC_QUERY(pQueryAttr)*/)) {
- setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP);
+ if ((key > blockEkey && (order == TSDB_ORDER_ASC)) || (key < blockEkey && (order == TSDB_ORDER_DESC))) {
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP);
return false;
}
- // there is actual end point of current time window, no interpolation need
+ // there is actual end point of current time window, no interpolation needs
if (key == actualEndKey) {
- setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP);
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP);
return true;
}
- int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
- int32_t nextRowIndex = endRowIndex + step;
+ int32_t nextRowIndex = endRowIndex + 1;
assert(nextRowIndex >= 0);
TSKEY nextKey = tsCols[nextRowIndex];
- doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, actualEndKey, endRowIndex, nextKey,
+ doTimeWindowInterpolation(pInfo, numOfExprs, pDataBlock, actualEndKey, endRowIndex, nextKey,
nextRowIndex, key, RESULT_ROW_END_INTERP);
return true;
}
@@ -541,8 +524,8 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
return startPos;
}
-static bool resultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) {
- assert(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP));
+static bool isResultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) {
+ ASSERT(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP));
if (type == RESULT_ROW_START_INTERP) {
return pResult->startInterp == true;
} else {
@@ -559,34 +542,29 @@ static void setResultRowInterpo(SResultRow* pResult, SResultTsInterpType type) {
}
}
-static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlock, SqlFunctionCtx* pCtx,
- SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardStep,
- int32_t order, bool timeWindowInterpo) {
- if (!timeWindowInterpo) {
+static void doWindowBorderInterpolation(SIntervalAggOperatorInfo *pInfo, SSDataBlock* pBlock, int32_t numOfExprs, SqlFunctionCtx* pCtx,
+ SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardRows) {
+ if (!pInfo->timeWindowInterpo) {
return;
}
- assert(pBlock != NULL);
- int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
-
+ ASSERT(pBlock != NULL);
if (pBlock->pDataBlock == NULL) {
// tscError("pBlock->pDataBlock == NULL");
return;
}
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, 0);
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
TSKEY* tsCols = (TSKEY*)(pColInfo->pData);
- bool done = resultRowInterpolated(pResult, RESULT_ROW_START_INTERP);
+ bool done = isResultRowInterpolated(pResult, RESULT_ROW_START_INTERP);
if (!done) { // it is not interpolated, now start to generated the interpolated value
- int32_t startRowIndex = startPos;
- bool interp = setTimeWindowInterpolationStartTs(pOperatorInfo, pCtx, startRowIndex, pBlock->info.rows,
- pBlock->pDataBlock, tsCols, win);
+ bool interp = setTimeWindowInterpolationStartTs(pInfo, pCtx, numOfExprs, startPos, pBlock, tsCols, win);
if (interp) {
setResultRowInterpo(pResult, RESULT_ROW_START_INTERP);
}
} else {
- setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP);
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP);
}
// point interpolation does not require the end key time window interpolation.
@@ -595,139 +573,261 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
// }
// interpolation query does not generate the time window end interpolation
- done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP);
+ done = isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP);
if (!done) {
- int32_t endRowIndex = startPos + (forwardStep - 1) * step;
+ int32_t endRowIndex = startPos + forwardRows - 1;
- TSKEY endKey = (order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
+ TSKEY endKey = (pInfo->order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
bool interp =
- setTimeWindowInterpolationEndTs(pOperatorInfo, pCtx, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win);
+ setTimeWindowInterpolationEndTs(pInfo, pCtx, numOfExprs, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win);
if (interp) {
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
}
} else {
- setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_END_INTERP);
+ setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP);
}
}
-static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowIndex, int32_t numOfCols) {
- if (pDataBlock == NULL) {
+static void saveDataBlockLastRow(SArray* pPrevKeys, const SSDataBlock* pBlock, SArray* pCols) {
+ if (pBlock->pDataBlock == NULL) {
return;
}
- for (int32_t k = 0; k < numOfCols; ++k) {
- SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, k);
- memcpy(pRow[k], ((char*)pColInfo->pData) + (pColInfo->info.bytes * rowIndex), pColInfo->info.bytes);
+ size_t num = taosArrayGetSize(pPrevKeys);
+ for (int32_t k = 0; k < num; ++k) {
+ SColumn* pc = taosArrayGet(pCols, k);
+
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pc->slotId);
+
+ SGroupKeys* pkey = taosArrayGet(pPrevKeys, k);
+ for(int32_t i = pBlock->info.rows - 1; i >= 0; --i) {
+ if (colDataIsNull_s(pColInfo, i)) {
+ continue;
+ }
+
+ char* val = colDataGetData(pColInfo, i);
+ if (IS_VAR_DATA_TYPE(pkey->type)) {
+ memcpy(pkey->pData, val, varDataTLen(val));
+ ASSERT(varDataTLen(val) <= pkey->bytes);
+ } else {
+ memcpy(pkey->pData, val, pkey->bytes);
+ }
+
+ break;
+ }
}
}
-static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
- uint64_t tableGroupId) {
+static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t numOfExprs, SResultRowInfo* pResultRowInfo,
+ SSDataBlock* pBlock, int32_t scanFlag, int64_t* tsCols, SResultRowPosition* p) {
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
- SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
- int32_t numOfOutput = pOperatorInfo->numOfExprs;
+ int32_t startPos = 0;
+ int32_t numOfOutput = pOperatorInfo->numOfExprs;
+ uint64_t groupId = pBlock->info.groupId;
- SArray* pUpdated = NULL;
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- pUpdated = taosArrayInit(4, POINTER_BYTES);
+ SResultRow* pResult = NULL;
+
+ while (1) {
+ SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
+
+ SResultRowPosition* p1 = (SResultRowPosition*)pn->data;
+ if (p->pageId == p1->pageId && p->offset == p1->offset) {
+ break;
+ }
+
+ SResultRow* pr = getResultRowByPos(pInfo->aggSup.pResultBuf, p1);
+ ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId);
+
+ if (pr->closed) {
+ ASSERT(isResultRowInterpolated(pr, RESULT_ROW_START_INTERP) && isResultRowInterpolated(pr, RESULT_ROW_END_INTERP));
+ tdListPopHead(pResultRowInfo->openWindow);
+ continue;
+ }
+
+ STimeWindow w = pr->win;
+ int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pInfo->binfo.pCtx,
+ numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (ret != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
+
+ SGroupKeys *pTsKey = taosArrayGet(pInfo->pPrevValues, 0);
+ int64_t prevTs = *(int64_t*) pTsKey->pData;
+ doTimeWindowInterpolation(pInfo, numOfOutput, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos,
+ w.ekey, RESULT_ROW_END_INTERP);
+
+ setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
+ setNotInterpoWindowKey(pInfo->binfo.pCtx, numOfExprs, RESULT_ROW_START_INTERP);
+
+ doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &w, &pInfo->twAggSup.timeWindowData, startPos, 0, tsCols,
+ pBlock->info.rows, numOfExprs, pInfo->order);
+
+ if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
+ closeResultRow(pr);
+ tdListPopHead(pResultRowInfo->openWindow);
+ } else { // the remains are can not be closed yet.
+ break;
+ }
}
+}
- int32_t step = 1;
- bool ascScan = (pInfo->order == TSDB_ORDER_ASC);
+typedef int64_t (*__get_value_fn_t)(void* data, int32_t index);
- // int32_t prevIndex = pResultRowInfo->curPos;
+int32_t binarySearch(void* keyList, int num, TSKEY key, int order,
+ __get_value_fn_t getValuefn) {
+ int firstPos = 0, lastPos = num - 1, midPos = -1;
+ int numOfRows = 0;
- TSKEY* tsCols = NULL;
- if (pBlock->pDataBlock != NULL) {
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
- tsCols = (int64_t*)pColDataInfo->pData;
+ if (num <= 0) return -1;
+ if (order == TSDB_ORDER_DESC) {
+ // find the first position which is smaller or equal than the key
+ while (1) {
+ if (key >= getValuefn(keyList, lastPos)) return lastPos;
+ if (key == getValuefn(keyList, firstPos)) return firstPos;
+ if (key < getValuefn(keyList, firstPos)) return firstPos - 1;
- if (tsCols != NULL) {
- blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < getValuefn(keyList, midPos)) {
+ lastPos = midPos - 1;
+ } else if (key > getValuefn(keyList, midPos)) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // find the first position which is bigger or equal than the key
+ while (1) {
+ if (key <= getValuefn(keyList, firstPos)) return firstPos;
+ if (key == getValuefn(keyList, lastPos)) return lastPos;
+
+ if (key > getValuefn(keyList, lastPos)) {
+ lastPos = lastPos + 1;
+ if (lastPos >= num)
+ return -1;
+ else
+ return lastPos;
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < getValuefn(keyList, midPos)) {
+ lastPos = midPos - 1;
+ } else if (key > getValuefn(keyList, midPos)) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
}
}
- int32_t startPos = 0;
- TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols, pBlock->info.rows, ascScan);
+ return midPos;
+}
+
+int64_t getReskey(void* data, int32_t index) {
+ SArray* res = (SArray*) data;
+ SResKeyPos* pos = taosArrayGetP(res, index);
+ return *(int64_t*)pos->key;
+}
+
+static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
+ int32_t size = taosArrayGetSize(pUpdated);
+ int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey);
+ if (index == -1) {
+ index = 0;
+ } else {
+ TSKEY resTs = getReskey(pUpdated, index);
+ if (resTs < result->win.skey) {
+ index++;
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (newPos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ newPos->groupId = groupId;
+ newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset};
+ *(int64_t*)newPos->key = result->win.skey;
+ if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
+ int32_t scanFlag, SArray* pUpdated) {
+ SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
+
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+
+ int32_t startPos = 0;
+ int32_t numOfOutput = pOperatorInfo->numOfExprs;
+ int64_t *tsCols = extractTsCol(pBlock, pInfo);
+ uint64_t tableGroupId = pBlock->info.groupId;
+ bool ascScan = (pInfo->order == TSDB_ORDER_ASC);
+ TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
+ SResultRow* pResult = NULL;
STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval,
pInfo->interval.precision, &pInfo->win);
- bool masterScan = true;
- SResultRow* pResult = NULL;
- int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx,
+ int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx,
numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
- pos->groupId = tableGroupId;
- pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- *(int64_t*)pos->key = pResult->win.skey;
-
- taosArrayPush(pUpdated, &pos);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
+ pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) {
+ saveResult(pResult, tableGroupId, pUpdated);
+ }
+ if (pInfo->twAggSup.winMap) {
+ taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY));
+ }
}
- int32_t forwardStep = 0;
TSKEY ekey = ascScan? win.ekey:win.skey;
- forwardStep =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
- ASSERT(forwardStep > 0);
+ int32_t forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
+ ASSERT(forwardRows > 0);
// prev time window not interpolation yet.
- // int32_t curIndex = pResultRowInfo->curPos;
-
-#if 0
- if (prevIndex != -1 && prevIndex < curIndex && pInfo->timeWindowInterpo) {
- for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already.
- SResultRow* pRes = getResultRow(pResultRowInfo, j);
- if (pRes->closed) {
- assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
- continue;
- }
-
- STimeWindow w = pRes->win;
- ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &w, masterScan, &pResult, tableGroupId,
- pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup,
- pTaskInfo);
- if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
- doTimeWindowInterpolation(pOperatorInfo, &pInfo->binfo, pBlock->pDataBlock, *(TSKEY*)pInfo->pRow[0], -1,
- tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP);
-
- setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
- setNotInterpoWindowKey(pInfo->binfo.pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP);
-
- doApplyFunctions(pInfo->binfo.pCtx, &w, &pInfo->timeWindowData, startPos, 0, tsCols, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
- }
+ if (pInfo->timeWindowInterpo) {
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
- ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, tableGroupId,
- pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup,
- pTaskInfo);
+ ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx,
+ numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- }
-#endif
- // window start key interpolation
- doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &win, startPos, forwardStep,
- pInfo->order, false);
+ // window start key interpolation
+ doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &win, startPos, forwardRows);
+ }
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
- doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
- pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
+ doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
+ pBlock->info.rows, numOfOutput, pInfo->order);
+
+ doCloseWindow(pResultRowInfo, pInfo, pResult);
STimeWindow nextWin = win;
while (1) {
- int32_t prevEndPos = (forwardStep - 1) * step + startPos;
+ int32_t prevEndPos = forwardRows - 1 + startPos;
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->order);
if (startPos < 0) {
break;
@@ -735,41 +835,77 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// null data, failed to allocate more memory buffer
int32_t code =
- setTimeWindowOutputBuf(pResultRowInfo, &nextWin, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx,
+ setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx,
numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
- pos->groupId = tableGroupId;
- pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- *(int64_t*)pos->key = pResult->win.skey;
- taosArrayPush(pUpdated, &pos);
+ if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
+ pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) {
+ saveResult(pResult, tableGroupId, pUpdated);
+ }
+ if (pInfo->twAggSup.winMap) {
+ taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY));
+ }
}
ekey = ascScan? nextWin.ekey:nextWin.skey;
- forwardStep =
+ forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
// window start(end) key interpolation
- doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep,
- pInfo->order, false);
+ doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
- doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
- pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
+ doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
+ pBlock->info.rows, numOfOutput, pInfo->order);
+ doCloseWindow(pResultRowInfo, pInfo, pResult);
}
if (pInfo->timeWindowInterpo) {
- int32_t rowIndex = ascScan ? (pBlock->info.rows - 1) : 0;
- saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols);
+ saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols);
}
+}
- return pUpdated;
- // updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false);
+void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) {
+ // current result is done in computing final results.
+ if (pInfo->timeWindowInterpo && isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
+ closeResultRow(pResult);
+ tdListPopHead(pResultRowInfo->openWindow);
+ }
+}
+
+SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) {
+ SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
+ SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
+ if (pn == NULL) {
+ tdListAppend(pResultRowInfo->openWindow, &pos);
+ return pos;
+ }
+
+ SResultRowPosition* px = (SResultRowPosition*)pn->data;
+ if (px->pageId != pos.pageId || px->offset != pos.offset) {
+ tdListAppend(pResultRowInfo->openWindow, &pos);
+ }
+
+ return pos;
+}
+
+int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) {
+ TSKEY* tsCols = NULL;
+ if (pBlock->pDataBlock != NULL) {
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
+ tsCols = (int64_t*)pColDataInfo->pData;
+
+ if (tsCols != NULL) {
+ blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
+ }
+ }
+
+ return tsCols;
}
static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
@@ -782,13 +918,11 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
int32_t scanFlag = MAIN_SCAN;
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (pBlock == NULL) {
break;
}
@@ -800,17 +934,17 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
STableQueryInfo* pTableQueryInfo = pInfo->pCurrent;
setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
#if 0 // test for encode/decode result info
- if(pOperator->encodeResultRow){
+ if(pOperator->fpSet.encodeResultRow){
char *result = NULL;
int32_t length = 0;
SAggSupporter *pSup = &pInfo->aggSup;
- pOperator->encodeResultRow(pOperator, pSup, &pInfo->binfo, &result, &length);
+ pOperator->fpSet.encodeResultRow(pOperator, &result, &length);
taosHashClear(pSup->pResultRowHashTable);
pInfo->binfo.resultRowInfo.size = 0;
- pOperator->decodeResultRow(pOperator, pSup, &pInfo->binfo, result, length);
+ pOperator->fpSet.decodeResultRow(pOperator, result);
if(result){
taosMemoryFree(result);
}
@@ -821,6 +955,8 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
closeAllResultRows(&pInfo->binfo.resultRowInfo);
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->order);
OPTR_SET_OPENED(pOperator);
+
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
return TSDB_CODE_SUCCESS;
}
@@ -929,8 +1065,9 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
}
SStateWindowOperatorInfo* pInfo = pOperator->info;
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- SOptrBasicInfo* pBInfo = &pInfo->binfo;
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SOptrBasicInfo* pBInfo = &pInfo->binfo;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
@@ -943,13 +1080,11 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
}
int32_t order = TSDB_ORDER_ASC;
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (pBlock == NULL) {
break;
}
@@ -960,6 +1095,8 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
doStateWindowAggImpl(pOperator, pInfo, pBlock);
}
+ pOperator->cost.openCost = (taosGetTimestampUs() - st)/1000.0;
+
pOperator->status = OP_RES_TO_RETURN;
closeAllResultRows(&pBInfo->resultRowInfo);
@@ -970,7 +1107,10 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
+ size_t rows = pBInfo->pRes->info.rows;
+ pOperator->resultInfo.totalRows += rows;
+
+ return (rows == 0)? NULL : pBInfo->pRes;
}
static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
@@ -998,7 +1138,10 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- return pBlock->info.rows == 0 ? NULL : pBlock;
+ size_t rows = pBlock->info.rows;
+ pOperator->resultInfo.totalRows += rows;
+
+ return (rows == 0)? NULL:pBlock;
}
}
@@ -1033,13 +1176,8 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
}
}
-void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData,
- int16_t bytes, uint64_t groupId, int32_t numOfOutput) {
- SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
- SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
- GET_RES_WINDOW_KEY_LEN(bytes));
- SResultRow* pResult = getResultRowByPos(pSup->pResultBuf, p1);
+void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SOptrBasicInfo* pBinfo, int32_t numOfOutput) {
+ SResultRow* pResult = getResultRowByPos(pResultBuf, p1);
SqlFunctionCtx* pCtx = pBinfo->pCtx;
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].resultInfo = getResultCell(pResult, i, pBinfo->rowCellInfoOffset);
@@ -1054,20 +1192,75 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData,
}
}
+void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData,
+ int16_t bytes, uint64_t groupId, int32_t numOfOutput) {
+ SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
+ SResultRowPosition* p1 =
+ (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
+ GET_RES_WINDOW_KEY_LEN(bytes));
+ doClearWindowImpl(p1, pSup->pResultBuf, pBinfo, numOfOutput);
+}
+
static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo,
- SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock) {
+ SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock,
+ SArray* pUpWins) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
int32_t step = 0;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval,
- pIntrerval->precision, NULL);
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval,
+ pInterval->precision, NULL);
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i,
win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput);
+ if (pUpWins) {
+ taosArrayPush(pUpWins, &win);
+ }
+ }
+}
+
+static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup,
+ SInterval* pInterval, SArray* closeWins) {
+ void *pIte = NULL;
+ size_t keyLen = 0;
+ while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
+ void* key = taosHashGetKey(pIte, &keyLen);
+ uint64_t groupId = *(uint64_t*) key;
+ ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
+ TSKEY ts = *(int64_t*) ((char*)key + sizeof(uint64_t));
+ SResultRowInfo dumyInfo;
+ dumyInfo.cur.pageId = -1;
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval,
+ pInterval->precision, NULL);
+ if (win.ekey < pSup->maxTs - pSup->waterMark) {
+ if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
+ if (taosHashGet(pSup->winMap, &win.skey, sizeof(TSKEY))) {
+ continue;
+ }
+ }
+ char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
+ SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
+ if (pSup->calTrigger != STREAM_TRIGGER_AT_ONCE_SMA &&
+ pSup->calTrigger != STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
+ taosHashRemove(pHashMap, keyBuf, keyLen);
+ }
+ SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (pos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pos->groupId = groupId;
+ pos->pos = *(SResultRowPosition*) pIte;
+ *(int64_t*)pos->key = ts;
+ if (!taosArrayPush(closeWins, &pos)) {
+ taosMemoryFree(pos);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ taosHashPut(pSup->winMap, &win.skey, sizeof(TSKEY), NULL, 0);
+ }
}
+ return TSDB_CODE_SUCCESS;
}
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
@@ -1090,17 +1283,16 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = NULL;
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
+ SArray* pClosed = taosArrayInit(4, POINTER_BYTES);
+
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
if (pBlock == NULL) {
break;
}
- // The timewindows that overlaps the timestamps of the input pBlock need to be recalculated and return to the
+ // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
// caller. Note that all the time window are not close till now.
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
@@ -1109,15 +1301,26 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
if (pBlock->info.type == STREAM_REPROCESS) {
- doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval,
- pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock);
+ doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, 0,
+ pOperator->numOfExprs, pBlock, NULL);
qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
continue;
}
- pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0);
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated);
+ }
+
+ closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup,
+ &pInfo->interval, pClosed);
+ finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed,
+ pInfo->binfo.rowCellInfoOffset);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE ||
+ pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
+ taosArrayAddAll(pUpdated, pClosed);
}
+ taosArrayDestroy(pClosed);
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
@@ -1145,9 +1348,18 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo *)param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
cleanupAggSup(&pInfo->aggSup);
+ if (pInfo->pChildren) {
+ int32_t size = taosArrayGetSize(pInfo->pChildren);
+ for (int32_t i = 0; i < size; i++) {
+ SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
+ destroyIntervalOperatorInfo(pChildOp->info, numOfOutput);
+ taosMemoryFreeClear(pChildOp->info);
+ taosMemoryFreeClear(pChildOp);
+ }
+ }
}
-bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
+static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
for (int32_t i = 0; i < numOfCols; i++) {
if (!fmIsInvertible(pFCtx[i].functionId)) {
return false;
@@ -1156,21 +1368,65 @@ bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
return true;
}
+static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SIntervalAggOperatorInfo* pInfo) {
+ // the primary timestamp column
+ bool needed = false;
+ pInfo->pInterpCols = taosArrayInit(4, sizeof(SColumn));
+ pInfo->pPrevValues = taosArrayInit(4, sizeof(SGroupKeys));
+
+ { // ts column
+ SColumn c = {0};
+ c.colId = 1;
+ c.slotId = pInfo->primaryTsIndex;
+ c.type = TSDB_DATA_TYPE_TIMESTAMP;
+ c.bytes = sizeof(int64_t);
+ taosArrayPush(pInfo->pInterpCols, &c);
+
+ SGroupKeys key = {0};
+ key.bytes = c.bytes;
+ key.type = c.type;
+ key.isNull = true; // to denote no value is assigned yet
+ key.pData = taosMemoryCalloc(1, c.bytes);
+ taosArrayPush(pInfo->pPrevValues, &key);
+ }
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SExprInfo* pExpr = pCtx[i].pExpr;
+
+ if (strcmp(pExpr->pExpr->_function.functionName, "twa") == 0) {
+ SFunctParam* pParam = &pExpr->base.pParam[0];
+
+ SColumn c = *pParam->pCol;
+ taosArrayPush(pInfo->pInterpCols, &c);
+ needed = true;
+
+ SGroupKeys key = {0};
+ key.bytes = c.bytes;
+ key.type = c.type;
+ key.isNull = false;
+ key.pData = taosMemoryCalloc(1, c.bytes);
+ taosArrayPush(pInfo->pPrevValues, &key);
+ }
+ }
+
+ return needed;
+}
+
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo,
- SExecTaskInfo* pTaskInfo) {
+ STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
- pInfo->order = TSDB_ORDER_ASC;
- pInfo->interval = *pInterval;
+ pInfo->win = pTaskInfo->window;
+ pInfo->order = TSDB_ORDER_ASC;
+ pInfo->interval = *pInterval;
pInfo->execModel = pTaskInfo->execModel;
- pInfo->win = pTaskInfo->window;
- pInfo->twAggSup = *pTwAggSupp;
+ pInfo->twAggSup = *pTwAggSupp;
+
pInfo->primaryTsIndex = primaryTsSlotId;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
@@ -1180,9 +1436,15 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win);
+
pInfo->invertible = allInvertible(pInfo->binfo.pCtx, numOfCols);
pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API
+ pInfo->timeWindowInterpo = timeWindowinterpNeeded(pInfo->binfo.pCtx, numOfCols, pInfo);
+ if (pInfo->timeWindowInterpo) {
+ pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
+ }
+
// pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo);
if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) {
goto _error;
@@ -1190,14 +1452,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1);
- pOperator->name = "TimeIntervalAggOperator";
+ pOperator->name = "TimeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERVAL;
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
- pOperator->pExpr = pExprInfo;
- pOperator->pTaskInfo = pTaskInfo;
- pOperator->numOfExprs = numOfCols;
- pOperator->info = pInfo;
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->pExpr = pExprInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->numOfExprs = numOfCols;
+ pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
@@ -1219,32 +1481,38 @@ _error:
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo,
- SExecTaskInfo* pTaskInfo) {
+ STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
-
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
pInfo->twAggSup = *pTwAggSupp;
pInfo->primaryTsIndex = primaryTsSlotId;
-
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(pOperator, 4096);
-
int32_t code =
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock,
keyBufSize, pTaskInfo->id.str);
-
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
-
initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1);
+ int32_t numOfChild = 8;// Todo(liuyao) get it from phy plan
+ pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo));
+ for (int32_t i = 0; i < numOfChild; i++) {
+ SSDataBlock* chRes = createOneDataBlock(pResBlock, false);
+ SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols,
+ chRes, pInterval, primaryTsSlotId, pTwAggSupp, pTaskInfo);
+ if (pChildOp && chRes) {
+ taosArrayPush(pInfo->pChildren, &pChildOp);
+ continue;
+ }
+ goto _error;
+ }
pOperator->name = "StreamFinalIntervalOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
@@ -1276,8 +1544,7 @@ _error:
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo,
- SExecTaskInfo* pTaskInfo) {
+ STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -1299,8 +1566,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win);
- // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo);
- if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) {
+ if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1422,13 +1688,13 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
return pBInfo->pRes;
}
- int32_t order = TSDB_ORDER_ASC;
+ int64_t st = taosGetTimestampUs();
+ int32_t order = TSDB_ORDER_ASC;
+
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -1440,6 +1706,8 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
doSessionWindowAggImpl(pOperator, pInfo, pBlock);
}
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
// restore the value
pOperator->status = OP_RES_TO_RETURN;
closeAllResultRows(&pBInfo->resultRowInfo);
@@ -1451,7 +1719,10 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
+ size_t rows = pBInfo->pRes->info.rows;
+ pOperator->resultInfo.totalRows += rows;
+
+ return (rows == 0)? NULL : pBInfo->pRes;
}
static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) {
@@ -1473,9 +1744,7 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -1638,9 +1907,10 @@ _error:
return NULL;
}
-static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock,
+static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock,
int32_t tableGroupId) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
+ SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
int32_t numOfOutput = pOperatorInfo->numOfExprs;
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
@@ -1648,14 +1918,17 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes
bool ascScan = true;
TSKEY* tsCols = NULL;
SResultRow* pResult = NULL;
- int32_t forwardStep = 0;
+ int32_t forwardRows = 0;
if (pSDataBlock->pDataBlock != NULL) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
+ } else {
+ return pUpdated;
}
+
int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1);
- TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols, pSDataBlock->info.rows, ascScan);
+ TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
STimeWindow nextWin = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts,
&pInfo->interval, pInfo->interval.precision, NULL);
while (1) {
@@ -1670,15 +1943,15 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes
pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*)pos->key = pResult->win.skey;
taosArrayPush(pUpdated, &pos);
- forwardStep =
+ forwardRows =
getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
// window start(end) key interpolation
- doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep,
- pInfo->order, false);
+ // disable it temporarily
+// doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
- doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
+ doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
- int32_t prevEndPos = (forwardStep - 1) * step + startPos;
+ int32_t prevEndPos = (forwardRows - 1) * step + startPos;
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, pInfo->order);
if (startPos < 0) {
break;
@@ -1687,6 +1960,51 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes
return pUpdated;
}
+bool isFinalInterval(SStreamFinalIntervalOperatorInfo* pInfo) {
+ return pInfo->pChildren != NULL;
+}
+
+void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx,
+ int32_t numOfOutput, SExecTaskInfo* pTaskInfo) {
+ for (int32_t k = 0; k < numOfOutput; ++k) {
+ if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) {
+ continue;
+ }
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
+ code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code));
+ pTaskInfo->code = code;
+ longjmp(pTaskInfo->env, code);
+ }
+ }
+ }
+}
+
+static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SArray *pWinArray,
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) {
+ int32_t size = taosArrayGetSize(pWinArray);
+ ASSERT(pInfo->pChildren);
+ for (int32_t i = 0; i < size; i++) {
+ STimeWindow* pParentWin = taosArrayGet(pWinArray, i);
+ SResultRow* pCurResult = NULL;
+ setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, pParentWin, true, &pCurResult, 0,
+ pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup,
+ pTaskInfo);
+ int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
+ for (int32_t j = 0; j < numOfChildren; j++) {
+ SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j);
+ SIntervalAggOperatorInfo* pChInfo = pChildOp->info;
+ SResultRow* pChResult = NULL;
+ setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, pParentWin, true, &pChResult,
+ 0, pChInfo->binfo.pCtx, pChildOp->numOfExprs, pChInfo->binfo.rowCellInfoOffset,
+ &pChInfo->aggSup, pTaskInfo);
+ compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo);
+ }
+ }
+}
+
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0];
@@ -1703,19 +2021,34 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
while (1) {
- publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
+
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
if (pBlock->info.type == STREAM_REPROCESS) {
+ SArray *pUpWins = taosArrayInit(8, sizeof(STimeWindow));
doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval,
- pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock);
+ pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock, pUpWins);
+ if (isFinalInterval(pInfo)) {
+ int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock
+ SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
+ SIntervalAggOperatorInfo* pChildInfo = pChildOp->info;
+ doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval,
+ pChildInfo->primaryTsIndex, pChildOp->numOfExprs, pBlock, NULL);
+ rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId,
+ pOperator->numOfExprs, pOperator->pTaskInfo);
+ }
+ taosArrayDestroy(pUpWins);
continue;
}
- pUpdated = doHashInterval(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0);
+ if (isFinalInterval(pInfo)) {
+ int32_t chIndex = 1; //Todo(liuyao) get it from SSDataBlock
+ SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
+ doStreamIntervalAgg(pChildOp);
+ }
+ pUpdated = doHashInterval(pOperator, pBlock, 0);
}
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);
@@ -1725,3 +2058,603 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
pOperator->status = OP_RES_TO_RETURN;
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
}
+
+void destroyStreamAggSupporter(SStreamAggSupporter* pSup) {
+ taosArrayDestroy(pSup->pResultRows);
+ taosMemoryFreeClear(pSup->pKeyBuf);
+ destroyDiskbasedBuf(pSup->pResultBuf);
+}
+
+void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
+ SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param;
+ doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
+ destroyStreamAggSupporter(&pInfo->streamAggSup);
+ cleanupGroupResInfo(&pInfo->groupResInfo);
+ if (pInfo->pChildren != NULL) {
+ int32_t size = taosArrayGetSize(pInfo->pChildren);
+ for (int32_t i = 0; i < size; i++) {
+ SOperatorInfo *pChild = taosArrayGetP(pInfo->pChildren, i);
+ SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
+ destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput);
+ taosMemoryFreeClear(pChild);
+ taosMemoryFreeClear(pChInfo);
+ }
+ }
+}
+
+int32_t initBiasicInfo(SOptrBasicInfo* pBasicInfo, SExprInfo* pExprInfo,
+ int32_t numOfCols, SSDataBlock* pResultBlock, SDiskbasedBuf* pResultBuf) {
+ pBasicInfo->pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pBasicInfo->rowCellInfoOffset);
+ pBasicInfo->pRes = pResultBlock;
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ pBasicInfo->pCtx[i].pBuf = pResultBuf;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) {
+ for (int i = 0; i < nums; i++) {
+ pDummy[i].functionId = pCtx[i].functionId;
+ }
+}
+void initDownStream(SOperatorInfo* downstream, SStreamSessionAggOperatorInfo* pInfo) {
+ ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
+ SStreamBlockScanInfo* pScanInfo = downstream->info;
+ pScanInfo->sessionSup =
+ (SessionWindowSupporter){.pStreamAggSup = &pInfo->streamAggSup, .gap = pInfo->gap};
+ pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, 60000 * 60 * 6);
+}
+
+SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
+ SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap,
+ int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_OUT_OF_MEMORY;
+ SStreamSessionAggOperatorInfo* pInfo =
+ taosMemoryCalloc(1, sizeof(SStreamSessionAggOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+
+ initResultSizeInfo(pOperator, 4096);
+
+ code = initStreamAggSupporter(&pInfo->streamAggSup, "StreamSessionAggOperatorInfo");
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ code = initBiasicInfo(&pInfo->binfo, pExprInfo, numOfCols, pResBlock,
+ pInfo->streamAggSup.pResultBuf);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ pInfo->streamAggSup.resultRowSize = getResultRowSize(pInfo->binfo.pCtx, numOfCols);
+
+ pInfo->pDummyCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfCols, sizeof(SqlFunctionCtx));
+ if (pInfo->pDummyCtx == NULL) {
+ goto _error;
+ }
+ initDummyFunction(pInfo->pDummyCtx, pInfo->binfo.pCtx, numOfCols);
+
+ pInfo->twAggSup = *pTwAggSupp;
+ initResultRowInfo(&pInfo->binfo.resultRowInfo, 8);
+ initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
+
+ pInfo->primaryTsIndex = tsSlotId;
+ pInfo->gap = gap;
+ pInfo->binfo.pRes = pResBlock;
+ pInfo->order = TSDB_ORDER_ASC;
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
+ pInfo->pDelIterator = NULL;
+ pInfo->pDelRes = createOneDataBlock(pResBlock, false);
+ blockDataEnsureCapacity(pInfo->pDelRes, 64);
+ pInfo->pChildren = NULL;
+
+ pOperator->name = "StreamSessionWindowAggOperator";
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW;
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->pExpr = pExprInfo;
+ pOperator->numOfExprs = numOfCols;
+ pOperator->info = pInfo;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionWindowAgg,
+ NULL, NULL, destroyStreamSessionAggOperatorInfo, aggEncodeResultRow,
+ aggDecodeResultRow, NULL);
+ pOperator->pTaskInfo = pTaskInfo;
+ initDownStream(downstream, pInfo);
+ code = appendDownstream(pOperator, &downstream, 1);
+ return pOperator;
+
+_error:
+ if (pInfo != NULL) {
+ destroyStreamSessionAggOperatorInfo(pInfo, numOfCols);
+ }
+
+ taosMemoryFreeClear(pInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
+ return NULL;
+}
+
+int64_t getSessionWindowEndkey(void* data, int32_t index) {
+ SArray* pWinInfos = (SArray*) data;
+ SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index);
+ return pWin->win.ekey;
+}
+static bool isInWindow(SResultWindowInfo* pWin, TSKEY ts, int64_t gap) {
+ int64_t sGap = ts - pWin->win.skey;
+ int64_t eGap = pWin->win.ekey - ts;
+ if ( (sGap < 0 && sGap >= -gap) || (eGap < 0 && eGap >= -gap) || (sGap >= 0 && eGap >= 0) ) {
+ return true;
+ }
+ return false;
+}
+
+static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY ts,
+ int32_t index) {
+ SResultWindowInfo win =
+ {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+ return taosArrayInsert(pWinInfos, index, &win);
+}
+
+static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY ts) {
+ SResultWindowInfo win =
+ {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+ return taosArrayPush(pWinInfos, &win);
+}
+
+SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap,
+ int32_t* pIndex) {
+ int32_t size = taosArrayGetSize(pWinInfos);
+ if (size == 0) {
+ return addNewSessionWindow(pWinInfos, ts);
+ }
+ // find the first position which is smaller than the key
+ int32_t index = binarySearch(pWinInfos, size, ts, TSDB_ORDER_DESC,
+ getSessionWindowEndkey);
+ SResultWindowInfo* pWin = NULL;
+ if (index >= 0) {
+ pWin = taosArrayGet(pWinInfos, index);
+ if (isInWindow(pWin, ts, gap)) {
+ *pIndex = index;
+ return pWin;
+ }
+ }
+
+ if (index + 1 < size) {
+ pWin = taosArrayGet(pWinInfos, index + 1);
+ if (isInWindow(pWin, ts, gap)) {
+ *pIndex = index + 1;
+ return pWin;
+ }
+ }
+
+ if (index == size - 1) {
+ *pIndex = taosArrayGetSize(pWinInfos);
+ return addNewSessionWindow(pWinInfos, ts);
+ }
+ *pIndex = index;
+ return insertNewSessionWindow(pWinInfos, ts, index);
+}
+
+int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows,
+ int32_t start, int64_t gap, SHashObj* pStDeleted) {
+ for (int32_t i = start; i < rows; ++i) {
+ if (!isInWindow(pWinInfo, pTs[i], gap)) {
+ return i - start;
+ }
+ if (pWinInfo->win.skey > pTs[i]) {
+ if (pStDeleted && pWinInfo->isOutput) {
+ taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY));
+ pWinInfo->isOutput = false;
+ }
+ pWinInfo->win.skey = pTs[i];
+ }
+ pWinInfo->win.ekey = TMAX(pWinInfo->win.ekey, pTs[i]);
+ }
+ return rows - start;
+}
+
+static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pResult,
+ SqlFunctionCtx* pCtx, int32_t groupId, int32_t numOfOutput,
+ int32_t* rowCellInfoOffset, SStreamAggSupporter* pAggSup, SExecTaskInfo* pTaskInfo) {
+ assert(pWinInfo->win.skey <= pWinInfo->win.ekey);
+ // too many time window in query
+ int32_t size = taosArrayGetSize(pAggSup->pResultRows);
+ if (size > MAX_INTERVAL_TIME_WINDOW) {
+ longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
+ }
+
+ if (pWinInfo->pos.pageId == -1) {
+ *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ if (*pResult == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ initResultRow(*pResult);
+
+ // add a new result set for a new group
+ pWinInfo->pos.pageId = (*pResult)->pageId;
+ pWinInfo->pos.offset = (*pResult)->offset;
+ } else {
+ *pResult = getResultRowByPos(pAggSup->pResultBuf, &pWinInfo->pos);
+ if (!(*pResult)) {
+ qError("getResultRowByPos return NULL, TID:%s", GET_TASKID(pTaskInfo));
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ // set time window for current result
+ (*pResult)->win = pWinInfo->win;
+ setResultRowInitCtx(*pResult, pCtx, numOfOutput, rowCellInfoOffset);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t doOneWindowAgg(SStreamSessionAggOperatorInfo* pInfo,
+ SSDataBlock* pSDataBlock, SResultWindowInfo* pCurWin, SResultRow** pResult,
+ int32_t startIndex, int32_t winRows, int32_t numOutput, SExecTaskInfo* pTaskInfo ) {
+ SColumnInfoData* pColDataInfo =
+ taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
+ TSKEY* tsCols = (int64_t*)pColDataInfo->pData;
+ int32_t code = setWindowOutputBuf(pCurWin, pResult, pInfo->binfo.pCtx, pSDataBlock->info.groupId,
+ numOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || (*pResult) == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->win, true);
+ doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &pCurWin->win,
+ &pInfo->twAggSup.timeWindowData, startIndex, winRows, tsCols, pSDataBlock->info.rows,
+ numOutput, TSDB_ORDER_ASC);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t copyWinInfoToDataBlock(SSDataBlock* pBlock, SStreamAggSupporter* pAggSup,
+ int32_t start, int32_t num, int32_t numOfExprs, SOptrBasicInfo* pBinfo) {
+ for (int32_t i = start; i < num; i += 1) {
+ SResultWindowInfo* pWinInfo = taosArrayGet(pAggSup->pResultRows, start);
+ SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, pWinInfo->pos.pageId);
+ SResultRow* pRow = (SResultRow*)((char*)bufPage + pWinInfo->pos.offset);
+ for (int32_t j = 0; j < numOfExprs; ++j) {
+ SResultRowEntryInfo* pResultInfo = getResultCell(pRow, j, pBinfo->rowCellInfoOffset);
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, j);
+ char* in = GET_ROWCELL_INTERBUF(pBinfo->pCtx[j].resultInfo);
+ colDataAppend(pColInfoData, pBlock->info.rows, in, pResultInfo->isNullRes);
+ }
+ pBlock->info.rows += pRow->numOfRows;
+ releaseBufPage(pAggSup->pResultBuf, bufPage);
+ }
+ blockDataUpdateTsWindow(pBlock, -1);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t getNumCompactWindow(SArray* pWinInfos, int32_t startIndex, int64_t gap) {
+ SResultWindowInfo* pCurWin = taosArrayGet(pWinInfos, startIndex);
+ int32_t size = taosArrayGetSize(pWinInfos);
+ // Just look for the window behind StartIndex
+ for (int32_t i = startIndex + 1; i < size; i++) {
+ SResultWindowInfo* pWinInfo = taosArrayGet(pWinInfos, i);
+ if (!isInWindow(pCurWin, pWinInfo->win.skey, gap)) {
+ return i - startIndex - 1;
+ }
+ }
+
+ return size - startIndex - 1;
+}
+
+void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, int32_t num,
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SHashObj* pStUpdated, SHashObj* pStDeleted) {
+ SResultWindowInfo* pCurWin = taosArrayGet(pInfo->streamAggSup.pResultRows, startIndex);
+ SResultRow* pCurResult = NULL;
+ setWindowOutputBuf(pCurWin, &pCurResult, pInfo->binfo.pCtx, groupId,
+ numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo);
+ num += startIndex + 1;
+ ASSERT(num <= taosArrayGetSize(pInfo->streamAggSup.pResultRows));
+ // Just look for the window behind StartIndex
+ for (int32_t i = startIndex + 1; i < num; i++) {
+ SResultWindowInfo* pWinInfo = taosArrayGet(pInfo->streamAggSup.pResultRows, i);
+ SResultRow* pWinResult = NULL;
+ setWindowOutputBuf(pWinInfo, &pWinResult, pInfo->pDummyCtx, groupId,
+ numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo);
+ pCurWin->win.ekey = TMAX(pCurWin->win.ekey, pWinInfo->win.ekey);
+ compactFunctions(pInfo->binfo.pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
+ taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
+ if (pWinInfo->isOutput) {
+ taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY));
+ pWinInfo->isOutput = false;
+ }
+ taosArrayRemove(pInfo->streamAggSup.pResultRows, i);
+ }
+}
+
+static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator,
+ SSDataBlock* pSDataBlock, SHashObj* pStUpdated, SHashObj* pStDeleted) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
+ bool masterScan = true;
+ int32_t numOfOutput = pOperator->numOfExprs;
+ int64_t groupId = pSDataBlock->info.groupId;
+ int64_t gap = pInfo->gap;
+ int64_t code = TSDB_CODE_SUCCESS;
+
+ int32_t step = 1;
+ bool ascScan = true;
+ TSKEY* tsCols = NULL;
+ SResultRow* pResult = NULL;
+ int32_t winRows = 0;
+
+ if (pSDataBlock->pDataBlock != NULL) {
+ SColumnInfoData* pColDataInfo =
+ taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
+ tsCols = (int64_t*)pColDataInfo->pData;
+ } else {
+ return ;
+ }
+
+ SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
+ for(int32_t i = 0; i < pSDataBlock->info.rows; ) {
+ int32_t winIndex = 0;
+ SResultWindowInfo* pCurWin =
+ getSessionTimeWindow(pAggSup->pResultRows, tsCols[i], gap, &winIndex);
+ winRows =
+ updateSessionWindowInfo(pCurWin, tsCols, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted);
+ code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ // window start(end) key interpolation
+ // doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows,
+ // pInfo->order, false);
+ int32_t winNum = getNumCompactWindow(pAggSup->pResultRows, winIndex, gap);
+ if (winNum > 0) {
+ compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted);
+ }
+ pCurWin->isClosed = false;
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY));
+ if (code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ pCurWin->isOutput = true;
+ }
+ i += winRows;
+ }
+}
+
+static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SOptrBasicInfo* pBinfo,
+ SSDataBlock* pBlock, int32_t tsIndex, int32_t numOfOutput, int64_t gap, SArray* result) {
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
+ TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
+ int32_t step = 0;
+ for (int32_t i = 0; i < pBlock->info.rows; i += step) {
+ int32_t winIndex = 0;
+ SResultWindowInfo* pCurWin =
+ getSessionTimeWindow(pAggSup->pResultRows, tsCols[i], gap, &winIndex);
+ step = updateSessionWindowInfo(pCurWin, tsCols, pBlock->info.rows, i, gap, NULL);
+ ASSERT(isInWindow(pCurWin, tsCols[i], gap));
+ doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pBinfo, numOfOutput);
+ if (result) {
+ taosArrayPush(result, pCurWin);
+ }
+ }
+}
+
+static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated, int32_t groupId) {
+ void* pData = NULL;
+ size_t keyLen = 0;
+ while((pData = taosHashIterate(pStUpdated, pData)) != NULL) {
+ void* key = taosHashGetKey(pData, &keyLen);
+ ASSERT(keyLen == sizeof(SResultRowPosition));
+ SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (pos == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+ pos->groupId = groupId;
+ pos->pos = *(SResultRowPosition*)key;
+ *(int64_t*)pos->key = *(uint64_t*)pData;
+ taosArrayPush(pUpdated, &pos);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite) {
+ blockDataCleanup(pBlock);
+ size_t keyLen = 0;
+ while(( (*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
+ colDataAppend(pColInfoData, pBlock->info.rows, *Ite, false);
+ for (int32_t i = 1; i < pBlock->info.numOfCols; i++) {
+ pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+ colDataAppendNULL(pColInfoData, pBlock->info.rows);
+ }
+ pBlock->info.rows += 1;
+ if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
+ break;
+ }
+ }
+ if ((*Ite) == NULL) {
+ taosHashClear(pStDeleted);
+ }
+}
+
+static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray *pWinArray,
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) {
+ int32_t size = taosArrayGetSize(pWinArray);
+ ASSERT(pInfo->pChildren);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i);
+ SResultRow* pCurResult = NULL;
+ setWindowOutputBuf(pParentWin, &pCurResult, pInfo->binfo.pCtx, groupId,
+ numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo);
+ int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
+ for (int32_t j = 0; j < numOfChildren; j++) {
+ SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j);
+ SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
+ SArray* pChWins = pChInfo->streamAggSup.pResultRows;
+ int32_t chWinSize = taosArrayGetSize(pChWins);
+ int32_t index = binarySearch(pChWins, chWinSize, pParentWin->win.skey,
+ TSDB_ORDER_DESC, getSessionWindowEndkey);
+ for (int32_t k = index; k > 0 && k < chWinSize; k++) {
+ SResultWindowInfo* pcw = taosArrayGet(pChWins, k);
+ if (pParentWin->win.skey <= pcw->win.skey && pcw->win.ekey <= pParentWin->win.ekey) {
+ SResultRow* pChResult = NULL;
+ setWindowOutputBuf(pcw, &pChResult, pChInfo->binfo.pCtx, groupId,
+ numOfOutput, pChInfo->binfo.rowCellInfoOffset, &pChInfo->streamAggSup, pTaskInfo);
+ compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo);
+ continue;
+ }
+ break;
+ }
+ }
+ }
+}
+
+bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) {
+ return pInfo->pChildren != NULL;
+}
+
+int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed,
+ int8_t calTrigger) {
+ // Todo(liuyao) save window to tdb
+ int32_t size = taosArrayGetSize(pWins);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo *pSeWin = taosArrayGet(pWins, i);
+ if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) {
+ if (!pSeWin->isClosed) {
+ SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (pos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pos->groupId = 0;
+ pos->pos = pSeWin->pos;
+ *(int64_t*)pos->key = pSeWin->win.ekey;
+ if (!taosArrayPush(pClosed, &pos)) {
+ taosMemoryFree(pos);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pSeWin->isClosed = true;
+ if (calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
+ pSeWin->isOutput = true;
+ }
+ }
+ continue;
+ }
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
+ SOptrBasicInfo* pBInfo = &pInfo->binfo;
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
+ return pInfo->pDelRes;
+ }
+ doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo,
+ pInfo->streamAggSup.pResultBuf);
+ if (pBInfo->pRes->info.rows == 0 ||
+ !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) {
+ doSetOperatorCompleted(pOperator);
+ }
+ return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
+ }
+
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pStUpdated = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ while (1) {
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ break;
+ }
+ // the pDataBlock are always the same one, no need to call this again
+ setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
+ if (pBlock->info.type == STREAM_REPROCESS) {
+ SArray *pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
+ doClearSessionWindows(&pInfo->streamAggSup, &pInfo->binfo, pBlock, 0,
+ pOperator->numOfExprs, pInfo->gap, pWins);
+ if (isFinalSession(pInfo)) {
+ int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock
+ SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
+ SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
+ doClearSessionWindows(&pChildInfo->streamAggSup, &pChildInfo->binfo, pBlock, 0,
+ pChildOp->numOfExprs, pChildInfo->gap, NULL);
+ rebuildTimeWindow(pInfo, pWins, pInfo->binfo.pRes->info.groupId, pOperator->numOfExprs, pOperator->pTaskInfo);
+ }
+ taosArrayDestroy(pWins);
+ continue;
+ }
+ if (isFinalSession(pInfo)) {
+ int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock
+ SOptrBasicInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
+ doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL);
+ }
+ doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted);
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
+ }
+ // restore the value
+ pOperator->status = OP_RES_TO_RETURN;
+
+ SArray* pClosed = taosArrayInit(16, POINTER_BYTES);
+ closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed,
+ pInfo->twAggSup.calTrigger);
+ SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
+ copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId);
+ taosHashCleanup(pStUpdated);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
+ taosArrayAddAll(pUpdated, pClosed);
+ }
+
+ finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated,
+ pInfo->binfo.rowCellInfoOffset);
+ initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
+ blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
+ return pInfo->pDelRes;
+ }
+ doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo,
+ pInfo->streamAggSup.pResultBuf);
+ return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
+}
+
+SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream,
+ SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap,
+ int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_OUT_OF_MEMORY;
+ SStreamSessionAggOperatorInfo* pInfo = NULL;
+ SOperatorInfo* pOperator = createStreamSessionAggOperatorInfo(downstream, pExprInfo,
+ numOfCols, pResBlock, gap, tsSlotId, pTwAggSupp, pTaskInfo);
+ if (pOperator == NULL) {
+ goto _error;
+ }
+ pOperator->name = "StreamFinalSessionWindowAggOperator";
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW;
+ int32_t numOfChild = 1; //Todo(liuyao) get it from phy plan
+ pInfo = pOperator->info;
+ pInfo->pChildren = taosArrayInit(8, sizeof(void *));
+ for (int32_t i = 0; i < numOfChild; i++) {
+ SOperatorInfo* pChild = createStreamSessionAggOperatorInfo(NULL, pExprInfo,
+ numOfCols, NULL, gap, tsSlotId, pTwAggSupp, pTaskInfo);
+ if (pChild == NULL) {
+ goto _error;
+ }
+ taosArrayPush(pInfo->pChildren, &pChild);
+ }
+ return pOperator;
+
+_error:
+ if (pInfo != NULL) {
+ destroyStreamSessionAggOperatorInfo(pInfo, numOfCols);
+ }
+
+ taosMemoryFreeClear(pInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
+ return NULL;
+}
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index c826cb68bfaa5053d6c6cf09aa409f4518df9dfc..7581836d595b2a01e119ddbbdea24b7cd9cb6a74 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -31,20 +31,16 @@ struct STupleHandle {
struct SSortHandle {
int32_t type;
-
int32_t pageSize;
int32_t numOfPages;
SDiskbasedBuf *pBuf;
SArray *pSortInfo;
- SArray *pIndexMap;
SArray *pOrderedSource;
- _sort_fetch_block_fn_t fetchfp;
- _sort_merge_compar_fn_t comparFn;
- SMultiwayMergeTreeInfo *pMergeTree;
- int64_t startTs;
+ int32_t loops;
uint64_t sortElapsed;
+ int64_t startTs;
uint64_t totalElapsed;
int32_t sourceId;
@@ -53,13 +49,15 @@ struct SSortHandle {
int32_t numOfCompletedSources;
bool opened;
const char *idStr;
-
bool inMemSort;
bool needAdjust;
STupleHandle tupleHandle;
-
void *param;
void (*beforeFp)(SSDataBlock* pBlock, void* param);
+
+ _sort_fetch_block_fn_t fetchfp;
+ _sort_merge_compar_fn_t comparFn;
+ SMultiwayMergeTreeInfo *pMergeTree;
};
static int32_t msortComparFn(const void *pLeft, const void *pRight, void *param);
@@ -80,7 +78,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t
pSortHandle->pageSize = pageSize;
pSortHandle->numOfPages = numOfPages;
pSortHandle->pSortInfo = pSortInfo;
- pSortHandle->pIndexMap = pIndexMap;
+ pSortHandle->loops = 0;
if (pBlock != NULL) {
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
@@ -415,6 +413,9 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize);
blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows);
+ // the initial pass + sortPass + final mergePass
+ pHandle->loops = sortPass + 2;
+
size_t numOfSorted = taosArrayGetSize(pHandle->pOrderedSource);
for(int32_t t = 0; t < sortPass; ++t) {
int64_t st = taosGetTimestampUs();
@@ -502,12 +503,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
return 0;
}
-static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
+static int32_t createInitialSources(SSortHandle* pHandle) {
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0);
taosArrayClear(pHandle->pOrderedSource);
+
while (1) {
SSDataBlock* pBlock = pHandle->fetchfp(source->param);
if (pBlock == NULL) {
@@ -524,6 +526,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
} else {
pHandle->pageSize = 4096;
}
+
// todo!!
pHandle->numOfPages = 1024;
sortBufSize = pHandle->numOfPages * pHandle->pageSize;
@@ -535,7 +538,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
}
// todo relocate the columns
- int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock, pHandle->pIndexMap);
+ int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock);
if (code != 0) {
return code;
}
@@ -569,6 +572,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
pHandle->cmpParam.numOfSources = 1;
pHandle->inMemSort = true;
+ pHandle->loops = 1;
pHandle->tupleHandle.rowIndex = -1;
pHandle->tupleHandle.pBlock = pHandle->pDataBlock;
return 0;
@@ -592,7 +596,7 @@ int32_t tsortOpen(SSortHandle* pHandle) {
pHandle->opened = true;
- int32_t code = createInitialSortedMultiSources(pHandle);
+ int32_t code = createInitialSources(pHandle);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -692,3 +696,20 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {
SColumnInfoData* pColInfo = TARRAY_GET_ELEM(pVHandle->pBlock->pDataBlock, colIndex);
return colDataGetData(pColInfo, pVHandle->rowIndex);
}
+
+SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) {
+ SSortExecInfo info = {0};
+
+ info.sortBuffer = pHandle->pageSize * pHandle->numOfPages;
+ info.sortMethod = pHandle->inMemSort? SORT_QSORT_T:SORT_SPILLED_MERGE_SORT_T;
+ info.loops = pHandle->loops;
+
+ if (pHandle->pBuf != NULL) {
+ SDiskbasedBufStatis st = getDBufStatis(pHandle->pBuf);
+ info.writeBytes = st.flushBytes;
+ info.readBytes = st.loadBytes;
+ }
+
+ return info;
+}
+
diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt
index 7a4cd8092205786065015252432dcb4de0a1db41..ea401e56e5c6585b93344af99280bb450137f98f 100644
--- a/source/libs/function/CMakeLists.txt
+++ b/source/libs/function/CMakeLists.txt
@@ -14,7 +14,7 @@ target_include_directories(
target_link_libraries(
function
- PRIVATE os util common nodes scalar catalog qcom transport
+ PRIVATE os util common nodes scalar qcom transport
PUBLIC uv_a
)
diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h
index 3a753325bdffc3886af44a1f06a8a6d1a1dcd31b..bc91875006b0c45162f52505084c9971b17e5429 100644
--- a/source/libs/function/inc/builtins.h
+++ b/source/libs/function/inc/builtins.h
@@ -26,21 +26,24 @@ typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t l
typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
typedef struct SBuiltinFuncDefinition {
- char name[FUNCTION_NAME_MAX_LENGTH];
- EFunctionType type;
- uint64_t classification;
- FTranslateFunc translateFunc;
- FFuncDataRequired dataRequiredFunc;
- FExecGetEnv getEnvFunc;
- FExecInit initFunc;
- FExecProcess processFunc;
+ const char* name;
+ EFunctionType type;
+ uint64_t classification;
+ FTranslateFunc translateFunc;
+ FFuncDataRequired dataRequiredFunc;
+ FExecGetEnv getEnvFunc;
+ FExecInit initFunc;
+ FExecProcess processFunc;
FScalarExecProcess sprocessFunc;
- FExecFinalize finalizeFunc;
- FExecProcess invertFunc;
+ FExecFinalize finalizeFunc;
+ FExecProcess invertFunc;
+ FExecCombine combineFunc;
+ const char* pPartialFunc;
+ const char* pMergeFunc;
} SBuiltinFuncDefinition;
extern const SBuiltinFuncDefinition funcMgtBuiltins[];
-extern const int funcMgtBuiltinsNum;
+extern const int funcMgtBuiltinsNum;
#ifdef __cplusplus
}
diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h
index 3e2ccbc6b8fd86926f576eee274efa233a6ed95c..68b83f4a1955c72e119dcadd5d409ce10639e5e1 100644
--- a/source/libs/function/inc/builtinsimpl.h
+++ b/source/libs/function/inc/builtinsimpl.h
@@ -27,6 +27,7 @@ bool functionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx));
int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, char* finalResult);
+int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
EFuncDataRequired countDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
bool getCountFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
@@ -37,24 +38,29 @@ EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWin
bool getSumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t sumFunction(SqlFunctionCtx *pCtx);
int32_t sumInvertFunction(SqlFunctionCtx *pCtx);
+int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool minmaxFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
bool getMinmaxFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t minFunction(SqlFunctionCtx* pCtx);
int32_t maxFunction(SqlFunctionCtx *pCtx);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+int32_t minCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
+int32_t maxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getAvgFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool avgFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t avgFunction(SqlFunctionCtx* pCtx);
int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t avgInvertFunction(SqlFunctionCtx* pCtx);
+int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getStddevFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool stddevFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t stddevFunction(SqlFunctionCtx* pCtx);
int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx);
+int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getLeastSQRFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool leastSQRFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
@@ -67,6 +73,11 @@ bool percentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultI
int32_t percentileFunction(SqlFunctionCtx *pCtx);
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+bool getApercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
+bool apercentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
+int32_t apercentileFunction(SqlFunctionCtx *pCtx);
+int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+
bool getDiffFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool diffFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResInfo);
int32_t diffFunction(SqlFunctionCtx *pCtx);
@@ -74,7 +85,9 @@ int32_t diffFunction(SqlFunctionCtx *pCtx);
bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t firstFunction(SqlFunctionCtx *pCtx);
int32_t lastFunction(SqlFunctionCtx *pCtx);
-int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
+int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv);
int32_t topFunction(SqlFunctionCtx *pCtx);
@@ -127,6 +140,10 @@ bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo)
int32_t uniqueFunction(SqlFunctionCtx *pCtx);
//int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
+bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
+bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
+int32_t twaFunction(SqlFunctionCtx *pCtx);
+int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h
index 21d277665872fc520ecea0fe6157b8338789499b..29dd0bcd90d6297ca539bad8a5c5cd78ff151d1d 100644
--- a/source/libs/function/inc/functionMgtInt.h
+++ b/source/libs/function/inc/functionMgtInt.h
@@ -44,9 +44,7 @@ extern "C" {
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
-#define FUNC_UDF_ID_START 5000
-#define FUNC_AGGREGATE_UDF_ID 5001
-#define FUNC_SCALAR_UDF_ID 5002
+#define FUNC_UDF_ID_START 5000
extern const int funcMgtUdfNum;
diff --git a/source/libs/function/inc/taggfunction.h b/source/libs/function/inc/taggfunction.h
index d779cf50f4ce019ddcea41b71720347d54a34e96..c3d61d426d889cecda0723b48c6c26eae16316ff 100644
--- a/source/libs/function/inc/taggfunction.h
+++ b/source/libs/function/inc/taggfunction.h
@@ -52,13 +52,6 @@ typedef struct SInterpInfoDetail {
int8_t primaryCol;
} SInterpInfoDetail;
-typedef struct STwaInfo {
- int8_t hasResult; // flag to denote has value
- double dOutput;
- SPoint1 p;
- STimeWindow win;
-} STwaInfo;
-
bool topbot_datablock_filter(SqlFunctionCtx *pCtx, const char *minval, const char *maxval);
/**
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index 2cec75c8d3f03270613241ed44973502ff1e72fb..5d69cc6189381b6ec147656834589bfe1fb15d98 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -18,7 +18,6 @@
#include "querynodes.h"
#include "scalar.h"
#include "taoserror.h"
-#include "tdatablock.h"
static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) {
va_list vArgList;
@@ -103,6 +102,28 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (1 != numOfParams && 2 != numOfParams) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_NUMERIC_TYPE(para1Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ if (2 == numOfParams) {
+ uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_NUMERIC_TYPE(para2Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+ }
+
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -156,17 +177,34 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
+ SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of PERCENTILE function can only be column");
+ }
+
+ // param1
+ SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
+
+ if (pValue->datum.i < 0 || pValue->datum.i > 100) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ pValue->notReserved = true;
+
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // set result type
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
return TSDB_CODE_SUCCESS;
}
-static bool validAperventileAlgo(const SValueNode* pVal) {
+static bool validateApercentileAlgo(const SValueNode* pVal) {
if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
return false;
}
@@ -175,35 +213,52 @@ static bool validAperventileAlgo(const SValueNode* pVal) {
}
static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (2 != paraNum && 3 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (2 != numOfParams && 3 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) {
- return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ // param0
+ SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of APERCENTILE function can only be column");
}
- SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1);
- if (nodeType(pParamNode) != QUERY_NODE_VALUE) {
+ // param1
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (nodeType(pParamNode1) != QUERY_NODE_VALUE) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
-
- SValueNode* pValue = (SValueNode*)pParamNode;
+
+ SValueNode* pValue = (SValueNode*)pParamNode1;
if (pValue->datum.i < 0 || pValue->datum.i > 100) {
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
}
pValue->notReserved = true;
-
- if (3 == paraNum) {
- SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2);
- if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) {
+
+ uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ // param2
+ if (3 == numOfParams) {
+ uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
+ if (!IS_VAR_DATA_TYPE(para3Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"Third parameter algorithm of apercentile must be 'default' or 't-digest'");
}
+
+ pValue = (SValueNode*)pParamNode2;
+ pValue->notReserved = true;
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
@@ -218,17 +273,31 @@ static int32_t translateTbnameColumn(SFunctionNode* pFunc, char* pErrBuf, int32_
}
static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (2 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1);
- if (nodeType(pParamNode) != QUERY_NODE_VALUE) {
+ uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ // param0
+ SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of TOP/BOTTOM function can only be column");
+ }
+
+ // param1
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (nodeType(pParamNode1) != QUERY_NODE_VALUE) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
- SValueNode* pValue = (SValueNode*)pParamNode;
+ SValueNode* pValue = (SValueNode*)pParamNode1;
if (pValue->node.resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -239,6 +308,7 @@ static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
pValue->notReserved = true;
+ // set result type
SDataType* pType = &((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
pFunc->node.resType = (SDataType){.bytes = pType->bytes, .type = pType->type};
return TSDB_CODE_SUCCESS;
@@ -263,15 +333,16 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (1 != paraNum && 2 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (1 != numOfParams && 2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
- if (QUERY_NODE_COLUMN != nodeType(pPara)) {
+ // param0
+ SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
- "The input parameter of ELAPSED function can only be column");
+ "The first parameter of ELAPSED function can only be column");
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
@@ -279,6 +350,28 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param1
+ if (2 == numOfParams) {
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode1;
+
+ pValue->notReserved = true;
+
+ paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_INTEGER_TYPE(paraType)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ if (pValue->datum.i == 0) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "ELAPSED function time unit parameter should be greater than db precision");
+ }
+ }
+
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
return TSDB_CODE_SUCCESS;
}
@@ -290,6 +383,17 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
for (int32_t i = 0; i < numOfParams; ++i) {
+ SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
+ if (i > 0) { // param1 & param2
+ if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode;
+
+ pValue->notReserved = true;
+ }
+
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
@@ -301,15 +405,35 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- if (4 != LIST_LENGTH(pFunc->pParameterList)) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (4 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
+ SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of HISTOGRAM function can only be column");
+ }
+
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param1 ~ param3
+ for (int32_t i = 1; i < numOfParams; ++i) {
+ SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode;
+
+ pValue->notReserved = true;
+ }
+
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
@@ -331,58 +455,122 @@ static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
"The input parameter of HYPERLOGLOG function can only be column");
}
- pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_UBIGINT].bytes, .type = TSDB_DATA_TYPE_UBIGINT};
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
return TSDB_CODE_SUCCESS;
}
+static bool validateStateOper(const SValueNode* pVal) {
+ if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
+ return false;
+ }
+ return (0 == strcasecmp(varDataVal(pVal->datum.p), "GT") || 0 == strcasecmp(varDataVal(pVal->datum.p), "GE") ||
+ 0 == strcasecmp(varDataVal(pVal->datum.p), "LT") || 0 == strcasecmp(varDataVal(pVal->datum.p), "LE") ||
+ 0 == strcasecmp(varDataVal(pVal->datum.p), "EQ") || 0 == strcasecmp(varDataVal(pVal->datum.p), "NE"));
+}
+
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- if (3 != LIST_LENGTH(pFunc->pParameterList)) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (3 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
+ SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The input parameter of STATECOUNT function can only be column");
+ }
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param1 & param2
+ for (int32_t i = 1; i < numOfParams; ++i) {
+ SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode;
+
+ if (i == 1 && !validateStateOper(pValue)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "Second parameter of STATECOUNT function"
+ "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'");
+ }
+
+ pValue->notReserved = true;
+ }
+
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // set result type
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
return TSDB_CODE_SUCCESS;
}
static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (3 != paraNum && 4 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (3 != numOfParams && 4 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
+ SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The input parameter of STATEDURATION function can only be column");
+ }
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param1, param2 & param3
+ for (int32_t i = 1; i < numOfParams; ++i) {
+ SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode;
+
+ if (i == 1 && !validateStateOper(pValue)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "Second parameter of STATEDURATION function"
+ "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'");
+ } else if (i == 3 && pValue->datum.i == 0) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "STATEDURATION function time unit parameter should be greater than db precision");
+ }
+
+ pValue->notReserved = true;
+ }
+
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (paraNum == 4 && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
+ if (numOfParams == 4 &&
+ ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // set result type
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
return TSDB_CODE_SUCCESS;
}
static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
- return TSDB_CODE_SUCCESS;
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
@@ -416,13 +604,28 @@ static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
- if (QUERY_NODE_COLUMN != nodeType(pPara)) {
+ // param0
+ SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
- "The input parameter of MAVG function can only be column");
+ "The first parameter of MAVG function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+
+ // param1
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode1;
+ if (pValue->datum.i < 1 || pValue->datum.i > 1000) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ pValue->notReserved = true;
+
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
@@ -437,24 +640,41 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
- if (QUERY_NODE_COLUMN != nodeType(pPara)) {
+ // param0
+ SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (QUERY_NODE_COLUMN != nodeType(pParamNode0)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
- "The input parameter of SAMPLE function can only be column");
+ "The first parameter of SAMPLE function can only be column");
}
+ SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
+ uint8_t colType = pCol->resType.type;
+
+ // param1
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode1;
+ if (pValue->datum.i < 1 || pValue->datum.i > 1000) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ pValue->notReserved = true;
+
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_INTEGER_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
- SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- uint8_t colType = pCol->resType.type;
+ // set result type
if (IS_VAR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
}
+
return TSDB_CODE_SUCCESS;
}
@@ -464,21 +684,39 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
- "The input parameter of TAIL function can only be column");
+ "The first parameter of TAIL function can only be column");
}
+ SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
+ uint8_t colType = pCol->resType.type;
+ // param1 & param2
for (int32_t i = 1; i < numOfParams; ++i) {
+ SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode;
+
+ if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 100) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "TAIL function second parameter should be in range [1, 100], "
+ "third parameter should be in range [0, 100]");
+ }
+
+ pValue->notReserved = true;
+
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (!IS_INTEGER_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
- SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- uint8_t colType = pCol->resType.type;
+ // set result type
if (IS_VAR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
@@ -510,7 +748,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
- return TSDB_CODE_SUCCESS;
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
@@ -523,17 +761,51 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
- if (paraLen == 0 || paraLen > 2) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (numOfParams == 0 || numOfParams > 2) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_SIGNED_NUMERIC_TYPE(p1->resType.type) && !IS_FLOAT_TYPE(p1->resType.type) &&
- TSDB_DATA_TYPE_BOOL != p1->resType.type) {
+ // param0
+ SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of DIFF function can only be column");
+ }
+
+ uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
- pFunc->node.resType = p1->resType;
+
+ // param1
+ if (numOfParams == 2) {
+ uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_INTEGER_TYPE(paraType)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SValueNode* pValue = (SValueNode*)pParamNode1;
+ if (pValue->datum.i != 0 && pValue->datum.i != 1) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "Second parameter of DIFF function should be only 0 or 1");
+ }
+
+ pValue->notReserved = true;
+ }
+
+ uint8_t resType;
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
+ resType = TSDB_DATA_TYPE_BIGINT;
+ } else {
+ resType = TSDB_DATA_TYPE_DOUBLE;
+ }
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType};
return TSDB_CODE_SUCCESS;
}
@@ -552,8 +824,8 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, int32_t minParaNum,
int32_t maxParaNum, bool hasSep) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (paraNum < minParaNum || paraNum > maxParaNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (numOfParams < minParaNum || numOfParams > maxParaNum) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -561,11 +833,20 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
int32_t resultBytes = 0;
int32_t sepBytes = 0;
+ // concat_ws separator should be constant string
+ if (hasSep) {
+ SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pPara) != QUERY_NODE_VALUE) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of CONCAT_WS function can only be constant string");
+ }
+ }
+
/* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */
- for (int32_t i = 0; i < paraNum; ++i) {
+ for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType)) {
+ if (!IS_VAR_DATA_TYPE(paraType) && TSDB_DATA_TYPE_NULL != paraType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (TSDB_DATA_TYPE_NCHAR == paraType) {
@@ -573,7 +854,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
}
- for (int32_t i = 0; i < paraNum; ++i) {
+ for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
@@ -589,7 +870,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
if (hasSep) {
- resultBytes += sepBytes * (paraNum - 3);
+ resultBytes += sepBytes * (numOfParams - 3);
}
pFunc->node.resType = (SDataType){.bytes = resultBytes, .type = resultType};
@@ -605,24 +886,37 @@ static int32_t translateConcatWs(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (2 != paraNum && 3 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (2 != numOfParams && 3 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type) || !IS_INTEGER_TYPE(para2Type)) {
+ SExprNode* pPara0 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
+ SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 1);
+
+ uint8_t para1Type = p1->resType.type;
+ if (!IS_VAR_DATA_TYPE(pPara0->resType.type) || !IS_INTEGER_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (3 == paraNum) {
- uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if (!IS_INTEGER_TYPE(para3Type)) {
+
+ if (((SValueNode*)p1)->datum.i < 1) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ if (3 == numOfParams) {
+ SExprNode* p2 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 2);
+ uint8_t para2Type = p2->resType.type;
+ if (!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+
+ int64_t v = ((SValueNode*)p1)->datum.i;
+ if (v < 0 || v > INT16_MAX) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
}
- pFunc->node.resType = (SDataType){.bytes = pPara1->resType.bytes, .type = pPara1->resType.type};
+ pFunc->node.resType = (SDataType){.bytes = pPara0->resType.bytes, .type = pPara0->resType.type};
return TSDB_CODE_SUCCESS;
}
@@ -640,23 +934,119 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
(para2Type == TSDB_DATA_TYPE_BINARY && para1Type == TSDB_DATA_TYPE_NCHAR)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+
int32_t para2Bytes = pFunc->node.resType.bytes;
+ if (IS_VAR_DATA_TYPE(para2Type)) {
+ para2Bytes -= VARSTR_HEADER_SIZE;
+ }
if (para2Bytes <= 0 || para2Bytes > 1000) { // cast dst var type length limits to 1000
- return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "CAST function converted length should be in range [0, 1000]");
}
return TSDB_CODE_SUCCESS;
}
+/* Following are valid ISO-8601 timezone format:
+ * 1 z/Z
+ * 2 ±hh:mm
+ * 3 ±hhmm
+ * 4 ±hh
+ *
+ */
+
+static bool validateTimezoneFormat(const SValueNode* pVal) {
+ if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
+ return false;
+ }
+
+ char* tz = varDataVal(pVal->datum.p);
+ int32_t len = varDataLen(pVal->datum.p);
+
+ if (len == 0) {
+ return false;
+ } else if (len == 1 && (tz[0] == 'z' || tz[0] == 'Z')) {
+ return true;
+ } else if ((tz[0] == '+' || tz[0] == '-')) {
+ switch (len) {
+ case 3:
+ case 5: {
+ for (int32_t i = 1; i < len; ++i) {
+ if (!isdigit(tz[i])) {
+ return false;
+ }
+ }
+ break;
+ }
+ case 6: {
+ for (int32_t i = 1; i < len; ++i) {
+ if (i == 3) {
+ if (tz[i] != ':') {
+ return false;
+ }
+ continue;
+ }
+ if (!isdigit(tz[i])) {
+ return false;
+ }
+ }
+ break;
+ }
+ default: {
+ return false;
+ }
+ }
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+void static addTimezoneParam(SNodeList* pList) {
+ char buf[6] = {0};
+ time_t t = taosTime(NULL);
+ struct tm* tmInfo = taosLocalTime(&t, NULL);
+ strftime(buf, sizeof(buf), "%z", tmInfo);
+ int32_t len = (int32_t)strlen(buf);
+
+ SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ pVal->literal = strndup(buf, len);
+ pVal->isDuration = false;
+ pVal->translate = true;
+ pVal->node.resType.type = TSDB_DATA_TYPE_BINARY;
+ pVal->node.resType.bytes = len + VARSTR_HEADER_SIZE;
+ pVal->node.resType.precision = TSDB_TIME_PRECISION_MILLI;
+ pVal->datum.p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE + 1);
+ varDataSetLen(pVal->datum.p, len);
+ strncpy(varDataVal(pVal->datum.p), pVal->literal, len);
+
+ nodesListAppend(pList, pVal);
+}
+
static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (1 != numOfParams && 2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ // param1
+ if (numOfParams == 2) {
+ SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
+
+ if (!validateTimezoneFormat(pValue)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Invalid timzone format");
+ }
+ } else { // add default client timezone
+ addTimezoneParam(pFunc->pParameterList);
+ }
+
+ // set result type
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY};
return TSDB_CODE_SUCCESS;
}
@@ -692,8 +1082,8 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
}
static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
- if (2 != paraNum && 3 != paraNum) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (2 != numOfParams && 3 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -704,7 +1094,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
}
- if (3 == paraNum) {
+ if (3 == numOfParams) {
if (!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -745,7 +1135,10 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = functionSetup,
.processFunc = countFunction,
.finalizeFunc = functionFinalize,
- .invertFunc = countInvertFunction
+ .invertFunc = countInvertFunction,
+ .combineFunc = combineFunction,
+ // .pPartialFunc = "count",
+ // .pMergeFunc = "sum"
},
{
.name = "sum",
@@ -757,7 +1150,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = functionSetup,
.processFunc = sumFunction,
.finalizeFunc = functionFinalize,
- .invertFunc = sumInvertFunction
+ .invertFunc = sumInvertFunction,
+ .combineFunc = sumCombine,
},
{
.name = "min",
@@ -768,7 +1162,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
.processFunc = minFunction,
- .finalizeFunc = minmaxFunctionFinalize
+ .finalizeFunc = minmaxFunctionFinalize,
+ .combineFunc = minCombine
},
{
.name = "max",
@@ -779,7 +1174,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
.processFunc = maxFunction,
- .finalizeFunc = minmaxFunctionFinalize
+ .finalizeFunc = minmaxFunctionFinalize,
+ .combineFunc = maxCombine
},
{
.name = "stddev",
@@ -790,7 +1186,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = stddevFunctionSetup,
.processFunc = stddevFunction,
.finalizeFunc = stddevFinalize,
- .invertFunc = stddevInvertFunction
+ .invertFunc = stddevInvertFunction,
+ .combineFunc = stddevCombine,
},
{
.name = "leastsquares",
@@ -801,7 +1198,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = leastSQRFunctionSetup,
.processFunc = leastSQRFunction,
.finalizeFunc = leastSQRFinalize,
- .invertFunc = leastSQRInvertFunction
+ .invertFunc = leastSQRInvertFunction,
},
{
.name = "avg",
@@ -812,7 +1209,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.initFunc = avgFunctionSetup,
.processFunc = avgFunction,
.finalizeFunc = avgFinalize,
- .invertFunc = avgInvertFunction
+ .invertFunc = avgInvertFunction,
+ .combineFunc = avgCombine,
},
{
.name = "percentile",
@@ -829,15 +1227,15 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_APERCENTILE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentile,
- .getEnvFunc = getMinmaxFuncEnv,
- .initFunc = minmaxFunctionSetup,
- .processFunc = maxFunction,
- .finalizeFunc = functionFinalize
+ .getEnvFunc = getApercentileFuncEnv,
+ .initFunc = apercentileFunctionSetup,
+ .processFunc = apercentileFunction,
+ .finalizeFunc = apercentileFinalize
},
{
.name = "top",
.type = FUNCTION_TYPE_TOP,
- .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC,
.translateFunc = translateTop,
.getEnvFunc = getTopBotFuncEnv,
.initFunc = functionSetup,
@@ -847,7 +1245,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "bottom",
.type = FUNCTION_TYPE_BOTTOM,
- .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC,
.translateFunc = translateBottom,
.getEnvFunc = getTopBotFuncEnv,
.initFunc = functionSetup,
@@ -894,7 +1292,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = firstFunction,
- .finalizeFunc = functionFinalize
+ .finalizeFunc = firstLastFinalize,
+ .combineFunc = firstCombine,
},
{
.name = "last",
@@ -904,7 +1303,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
- .finalizeFunc = lastFinalize
+ .finalizeFunc = firstLastFinalize,
+ .combineFunc = lastCombine,
+ },
+ {
+ .name = "twa",
+ .type = FUNCTION_TYPE_TWA,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
+ .translateFunc = translateInNumOutDou,
+ .getEnvFunc = getTwaFuncEnv,
+ .initFunc = twaFunctionSetup,
+ .processFunc = twaFunction,
+ .finalizeFunc = twaFinalize
},
{
.name = "histogram",
@@ -937,7 +1347,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = functionFinalize
},
{
- .name = "state_count",
+ .name = "statecount",
.type = FUNCTION_TYPE_STATE_COUNT,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC,
.translateFunc = translateStateCount,
@@ -947,7 +1357,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = NULL
},
{
- .name = "state_duration",
+ .name = "stateduration",
.type = FUNCTION_TYPE_STATE_DURATION,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateStateDuration,
@@ -1020,7 +1430,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "log",
.type = FUNCTION_TYPE_LOG,
.classification = FUNC_MGT_SCALAR_FUNC,
- .translateFunc = translateIn2NumOutDou,
+ .translateFunc = translateLogarithm,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = logFunction,
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index ad92d095d5a292d366f127642e835b3dadda10dd..be18150234c5b1d7dc4064dfb561900290e3722b 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -14,12 +14,15 @@
*/
#include "builtinsimpl.h"
+#include "tglobal.h"
#include "cJSON.h"
#include "function.h"
#include "querynodes.h"
#include "taggfunction.h"
#include "tcompare.h"
#include "tdatablock.h"
+#include "tdigest.h"
+#include "thistogram.h"
#include "tpercentile.h"
#define HISTOGRAM_MAX_BINS_NUM 1000
@@ -95,6 +98,19 @@ typedef struct SPercentileInfo {
int64_t numOfElems;
} SPercentileInfo;
+typedef struct SAPercentileInfo {
+ double result;
+ int8_t algo;
+ SHistogramInfo *pHisto;
+ TDigest *pTDigest;
+} SAPercentileInfo;
+
+typedef enum {
+ APERCT_ALGO_UNKNOWN = 0,
+ APERCT_ALGO_DEFAULT,
+ APERCT_ALGO_TDIGEST,
+} EAPerctAlgoType;
+
typedef struct SDiffInfo {
bool hasPrev;
bool includeNull;
@@ -210,6 +226,7 @@ typedef struct SUniqueInfo {
int32_t numOfPoints;
uint8_t colType;
int16_t colBytes;
+ bool hasNull; //null is not hashable, handle separately
SHashObj *pHash;
char pItems[];
} SUniqueInfo;
@@ -284,7 +301,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
- //pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
+ pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
char* in = GET_ROWCELL_INTERBUF(pResInfo);
colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes);
@@ -292,6 +309,24 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pResInfo->numOfRes;
}
+int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+ int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+
+ if (pSResInfo->numOfRes != 0 &&
+ (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) > *(TSKEY*)(pSBuf + bytes)) ) {
+ memcpy(pDBuf, pSBuf, bytes);
+ *(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes);
+ pDResInfo->numOfRes = 1;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx)) {
return 0;
}
@@ -323,7 +358,7 @@ bool getCountFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static FORCE_INLINE int32_t getNumofElem(SqlFunctionCtx* pCtx) {
+static FORCE_INLINE int32_t getNumOfElems(SqlFunctionCtx* pCtx) {
int32_t numOfElem = 0;
/*
@@ -358,11 +393,12 @@ static FORCE_INLINE int32_t getNumofElem(SqlFunctionCtx* pCtx) {
* count function does not use the pCtx->interResBuf to keep the intermediate buffer
*/
int32_t countFunction(SqlFunctionCtx* pCtx) {
- int32_t numOfElem = getNumofElem(pCtx);
- SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
+ int32_t numOfElem = getNumOfElems(pCtx);
+ SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
- int32_t type = pInput->pData[0]->info.type;
+
+ int32_t type = pInput->pData[0]->info.type;
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
if (IS_NULL_TYPE(type)) {
@@ -373,12 +409,17 @@ int32_t countFunction(SqlFunctionCtx* pCtx) {
*((int64_t*)buf) += numOfElem;
}
- SET_VAL(pResInfo, numOfElem, 1);
+ if (tsCountAlwaysReturnValue) {
+ pResInfo->numOfRes = 1;
+ } else {
+ SET_VAL(pResInfo, 1, 1);
+ }
+
return TSDB_CODE_SUCCESS;
}
int32_t countInvertFunction(SqlFunctionCtx* pCtx) {
- int32_t numOfElem = getNumofElem(pCtx);
+ int32_t numOfElem = getNumOfElems(pCtx);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
@@ -388,6 +429,18 @@ int32_t countInvertFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
+int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ *((int64_t*)pDBuf) += *((int64_t*)pSBuf);
+
+ SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1);
+ return TSDB_CODE_SUCCESS;
+}
+
#define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \
do { \
_t* d = (_t*)(_col->pData); \
@@ -472,6 +525,11 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) {
}
}
+ //check for overflow
+ if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) {
+ GET_RES_INFO(pCtx)->isNullRes = 1;
+ }
+
_sum_over:
// data in the check operation are all null, not output
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
@@ -537,6 +595,26 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
+int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ SSumRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ SSumRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+
+ if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ pDBuf->isum += pSBuf->isum;
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
+ pDBuf->usum += pSBuf->usum;
+ } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
+ pDBuf->dsum += pSBuf->dsum;
+ }
+
+ SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1);
+ return TSDB_CODE_SUCCESS;
+}
+
bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SSumRes);
return true;
@@ -738,16 +816,41 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
+int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ SAvgRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+
+ if (IS_INTEGER_TYPE(type)) {
+ pDBuf->sum.isum += pSBuf->sum.isum;
+ } else {
+ pDBuf->sum.dsum += pSBuf->sum.dsum;
+ }
+ pDBuf->count += pSBuf->count;
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SInputColumnInfoData* pInput = &pCtx->input;
- int32_t type = pInput->pData[0]->info.type;
- SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+
+ int32_t type = pInput->pData[0]->info.type;
+ SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+
if (IS_INTEGER_TYPE(type)) {
pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count);
} else {
pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count);
}
+ //check for overflow
+ if (isinf(pAvgRes->result) || isnan(pAvgRes->result)) {
+ GET_RES_INFO(pCtx)->isNullRes = 1;
+ }
+
return functionFinalize(pCtx, pBlock);
}
@@ -1273,6 +1376,34 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
}
}
+int32_t minMaxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t isMinFunc) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ SMinmaxResInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ SMinmaxResInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ if (IS_FLOAT_TYPE(type)) {
+ if (pSBuf->assign &&
+ ( (((*(double*)&pDBuf->v) < (*(double*)&pSBuf->v)) ^ isMinFunc) || !pDBuf->assign ) ) {
+ *(double*) &pDBuf->v = *(double*) &pSBuf->v;
+ }
+ } else {
+ if ( pSBuf->assign && ( ((pDBuf->v < pSBuf->v) ^ isMinFunc) || !pDBuf->assign ) ) {
+ pDBuf->v = pSBuf->v;
+ }
+ }
+ SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t minCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ return minMaxCombine(pDestCtx, pSourceCtx, 1);
+}
+int32_t maxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ return minMaxCombine(pDestCtx, pSourceCtx, 0);
+}
+
bool getStddevFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SStddevRes);
return true;
@@ -1491,6 +1622,25 @@ int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return functionFinalize(pCtx, pBlock);
}
+int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ SStddevRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ SStddevRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+
+ if (IS_INTEGER_TYPE(type)) {
+ pDBuf->isum += pSBuf->isum;
+ pDBuf->quadraticISum += pSBuf->quadraticISum;
+ } else {
+ pDBuf->dsum += pSBuf->dsum;
+ pDBuf->quadraticDSum += pSBuf->quadraticDSum;
+ }
+ pDBuf->count += pSBuf->count;
+ return TSDB_CODE_SUCCESS;
+}
+
bool getLeastSQRFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SLeastSQRInfo);
return true;
@@ -1505,8 +1655,8 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
pInfo->startVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[1].param.d :
(double)pCtx->param[1].param.i;
- pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[2].param.d :
- (double)pCtx->param[1].param.i;
+ pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[2].param.nType) ? pCtx->param[2].param.d :
+ (double)pCtx->param[2].param.i;
return true;
}
@@ -1613,6 +1763,11 @@ int32_t leastSQRFunction(SqlFunctionCtx* pCtx) {
}
break;
}
+ case TSDB_DATA_TYPE_NULL: {
+ GET_RES_INFO(pCtx)->isNullRes = 1;
+ numOfElem = 1;
+ break;
+ }
default:
break;
@@ -1656,7 +1811,7 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param[0][2], param[1][2]);
varDataSetLen(buf, len);
- colDataAppend(pCol, currentRow, buf, false);
+ colDataAppend(pCol, currentRow, buf, pResInfo->isNullRes);
return pResInfo->numOfRes;
}
@@ -1686,7 +1841,7 @@ bool percentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultI
}
int32_t percentileFunction(SqlFunctionCtx* pCtx) {
- int32_t notNullElems = 0;
+ int32_t numOfElems = 0;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
@@ -1764,11 +1919,11 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
}
char* data = colDataGetData(pCol, i);
- notNullElems += 1;
+ numOfElems += 1;
tMemBucketPut(pInfo->pMemBucket, data, 1);
}
- SET_VAL(pResInfo, notNullElems, 1);
+ SET_VAL(pResInfo, numOfElems, 1);
}
return TSDB_CODE_SUCCESS;
@@ -1790,6 +1945,131 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return functionFinalize(pCtx, pBlock);
}
+bool getApercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
+ int32_t bytesHist = (int32_t)(sizeof(SAPercentileInfo) + sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
+ int32_t bytesDigest = (int32_t)(sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION));
+ pEnv->calcMemSize = TMAX(bytesHist, bytesDigest);
+ return true;
+}
+
+static int8_t getApercentileAlgo(char *algoStr) {
+ int8_t algoType;
+ if (strcasecmp(algoStr, "default") == 0) {
+ algoType = APERCT_ALGO_DEFAULT;
+ } else if (strcasecmp(algoStr, "t-digest") == 0) {
+ algoType = APERCT_ALGO_TDIGEST;
+ } else {
+ algoType = APERCT_ALGO_UNKNOWN;
+ }
+
+ return algoType;
+}
+
+static void buildHistogramInfo(SAPercentileInfo* pInfo) {
+ pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo));
+ pInfo->pHisto->elems = (SHistBin*) ((char*)pInfo->pHisto + sizeof(SHistogramInfo));
+}
+
+bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
+ if (!functionSetup(pCtx, pResultInfo)) {
+ return false;
+ }
+
+ SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
+ if (pCtx->numOfParams == 2) {
+ pInfo->algo = APERCT_ALGO_DEFAULT;
+ } else if (pCtx->numOfParams == 3) {
+ pInfo->algo = getApercentileAlgo(varDataVal(pCtx->param[2].param.pz));
+ if (pInfo->algo == APERCT_ALGO_UNKNOWN) {
+ return false;
+ }
+ }
+
+ char *tmp = (char *)pInfo + sizeof(SAPercentileInfo);
+ if (pInfo->algo == APERCT_ALGO_TDIGEST) {
+ pInfo->pTDigest = tdigestNewFrom(tmp, COMPRESSION);
+ } else {
+ buildHistogramInfo(pInfo);
+ pInfo->pHisto = tHistogramCreateFrom(tmp, MAX_HISTOGRAM_BIN);
+ }
+
+ return true;
+}
+
+int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
+ int32_t numOfElems = 0;
+ SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
+
+ SInputColumnInfoData* pInput = &pCtx->input;
+ //SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
+
+ SColumnInfoData* pCol = pInput->pData[0];
+ int32_t type = pCol->info.type;
+
+ SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+
+ int32_t start = pInput->startRowIndex;
+ if (pInfo->algo == APERCT_ALGO_TDIGEST) {
+ for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
+ if (colDataIsNull_f(pCol->nullbitmap, i)) {
+ continue;
+ }
+ numOfElems += 1;
+ char* data = colDataGetData(pCol, i);
+
+ double v = 0; // value
+ int64_t w = 1; // weigth
+ GET_TYPED_DATA(v, double, type, data);
+ tdigestAdd(pInfo->pTDigest, v, w);
+ }
+ } else {
+ for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
+ if (colDataIsNull_f(pCol->nullbitmap, i)) {
+ continue;
+ }
+ numOfElems += 1;
+ char* data = colDataGetData(pCol, i);
+
+ double v = 0;
+ GET_TYPED_DATA(v, double, type, data);
+ tHistogramAdd(&pInfo->pHisto, v);
+ }
+ }
+
+ SET_VAL(pResInfo, numOfElems, 1);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
+ SVariant* pVal = &pCtx->param[1].param;
+ double percent = (pVal->nType == TSDB_DATA_TYPE_BIGINT) ? pVal->i : pVal->d;
+
+ SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
+ SAPercentileInfo* pInfo = (SAPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
+
+ if (pInfo->algo == APERCT_ALGO_TDIGEST) {
+ if (pInfo->pTDigest->size > 0) {
+ pInfo->result = tdigestQuantile(pInfo->pTDigest, percent/100);
+ } else { // no need to free
+ //setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
+ return TSDB_CODE_SUCCESS;
+ }
+ } else {
+ if (pInfo->pHisto->numOfElems > 0) {
+ double ratio[] = {percent};
+ double *res = tHistogramUniform(pInfo->pHisto, ratio, 1);
+ pInfo->result = *res;
+ //memcpy(pCtx->pOutput, res, sizeof(double));
+ taosMemoryFree(res);
+ } else { // no need to free
+ //setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ return functionFinalize(pCtx, pBlock);
+}
+
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pNode = nodesListGetNode(pFunc->pParameterList, 0);
pEnv->calcMemSize = pNode->node.resType.bytes + sizeof(int64_t);
@@ -1802,8 +2082,6 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
return true;
}
-
-
static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) {
if (pTsColInfo == NULL) {
return 0;
@@ -1966,7 +2244,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
return TSDB_CODE_SUCCESS;
}
-int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
+int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
@@ -1979,6 +2257,24 @@ int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pResInfo->numOfRes;
}
+int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
+ SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
+ char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
+ int32_t type = pDestCtx->input.pData[0]->info.type;
+ int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
+
+ SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
+ char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+
+ if (pSResInfo->numOfRes != 0 &&
+ (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) < *(TSKEY*)(pSBuf + bytes)) ) {
+ memcpy(pDBuf, pSBuf, bytes);
+ *(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes);
+ pDResInfo->numOfRes = 1;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SDiffInfo);
return true;
@@ -2019,15 +2315,15 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
}
static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) {
- int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1;
+ int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1;
switch (type) {
case TSDB_DATA_TYPE_INT: {
int32_t v = *(int32_t*)pv;
- int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
+ int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
- colDataAppendInt32(pOutput, pos, &delta);
+ colDataAppendInt64(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
@@ -2035,22 +2331,22 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
int8_t v = *(int8_t*)pv;
- int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
+ int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
- colDataAppendInt8(pOutput, pos, &delta);
+ colDataAppendInt64(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
int16_t v = *(int16_t*)pv;
- int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
+ int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null
if (delta < 0 && pDiffInfo->ignoreNegative) {
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
- colDataAppendInt16(pOutput, pos, &delta);
+ colDataAppendInt64(pOutput, pos, &delta);
}
pDiffInfo->prev.i64 = v;
break;
@@ -2068,11 +2364,11 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
}
case TSDB_DATA_TYPE_FLOAT: {
float v = *(float*)pv;
- float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
- if (delta < 0 && pDiffInfo->ignoreNegative) {
+ double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
+ if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
- colDataAppendFloat(pOutput, pos, &delta);
+ colDataAppendDouble(pOutput, pos, &delta);
}
pDiffInfo->prev.d64 = v;
break;
@@ -2080,7 +2376,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
case TSDB_DATA_TYPE_DOUBLE: {
double v = *(double*)pv;
double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
- if (delta < 0 && pDiffInfo->ignoreNegative) {
+ if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow
colDataSetNull_f(pOutput->nullbitmap, pos);
} else {
colDataAppendDouble(pOutput, pos, &delta);
@@ -2953,13 +3249,13 @@ static uint64_t hllCountCnt(uint8_t *buckets) {
z += buckethisto[j];
z *= 0.5;
}
+
z += m * hllSigma(buckethisto[0]/(double)m);
double E = (double)llroundl(HLL_ALPHA_INF*m*m/z);
return (uint64_t) E;
}
-
int32_t hllFunction(SqlFunctionCtx *pCtx) {
SHLLInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
@@ -2992,7 +3288,6 @@ int32_t hllFunction(SqlFunctionCtx *pCtx) {
if (count > oldcount) {
pInfo->buckets[index] = count;
}
-
}
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
@@ -3000,9 +3295,13 @@ int32_t hllFunction(SqlFunctionCtx *pCtx) {
}
int32_t hllFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
- SHLLInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+ SResultRowEntryInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->result = hllCountCnt(pInfo->buckets);
+ SHLLInfo* pHllInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+ pHllInfo->result = hllCountCnt(pHllInfo->buckets);
+ if (tsCountAlwaysReturnValue && pHllInfo->result == 0) {
+ pInfo->numOfRes = 1;
+ }
return functionFinalize(pCtx, pBlock);
}
@@ -3250,7 +3549,12 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
double v;
GET_TYPED_DATA(v, double, type, data);
pSumRes->dsum += v;
- colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false);
+ //check for overflow
+ if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) {
+ colDataAppendNULL(pOutput, pos);
+ } else {
+ colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false);
+ }
}
//TODO: remove this after pTsOutput is handled
@@ -3324,7 +3628,12 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
pInfo->points[pInfo->pos] = v;
double result = pInfo->sum / pInfo->numOfPoints;
- colDataAppend(pOutput, pos, (char *)&result, false);
+ //check for overflow
+ if (isinf(result) || isnan(result)) {
+ colDataAppendNULL(pOutput, pos);
+ } else {
+ colDataAppend(pOutput, pos, (char *)&result, false);
+ }
//TODO: remove this after pTsOutput is handled
if (pTsOutput != NULL) {
@@ -3398,7 +3707,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
- SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t startOffset = pCtx->offset;
@@ -3421,24 +3729,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
return pInfo->numSampled;
}
-//int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
-// SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
-// SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
-// int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
-// SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
-//
-// //int32_t currentRow = pBlock->info.rows;
-// pResInfo->numOfRes = pInfo->numSampled;
-//
-// for (int32_t i = 0; i < pInfo->numSampled; ++i) {
-// colDataAppend(pCol, i, pInfo->data + i * pInfo->colBytes, false);
-// //TODO: handle ts output
-// }
-//
-// return pResInfo->numOfRes;
-//}
-
-
bool getTailFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
@@ -3484,6 +3774,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK
if (isNull) {
pItem->isNull = true;
} else {
+ pItem->isNull = false;
memcpy(pItem->data, data, colBytes);
}
}
@@ -3514,7 +3805,6 @@ int32_t tailFunction(SqlFunctionCtx* pCtx) {
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
- SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t startOffset = pCtx->offset;
@@ -3590,8 +3880,22 @@ bool uniqueFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
}
static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) {
- int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
+ //handle null elements
+ if (isNull == true) {
+ int32_t size = sizeof(SUniqueItem) + pInfo->colBytes;
+ SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + pInfo->numOfPoints * size);
+ if (pInfo->hasNull == false && pItem->isNull == false) {
+ pItem->timestamp = ts;
+ pItem->isNull = true;
+ pInfo->numOfPoints++;
+ pInfo->hasNull = true;
+ } else if (pItem->timestamp > ts && pItem->isNull == true) {
+ pItem->timestamp = ts;
+ }
+ return;
+ }
+ int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
SUniqueItem *pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
if (pHashItem == NULL) {
int32_t size = sizeof(SUniqueItem) + pInfo->colBytes;
@@ -3604,7 +3908,6 @@ static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) {
} else if (pHashItem->timestamp > ts) {
pHashItem->timestamp = ts;
}
-
}
int32_t uniqueFunction(SqlFunctionCtx* pCtx) {
@@ -3631,7 +3934,11 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) {
for (int32_t i = 0; i < pInfo->numOfPoints; ++i) {
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes));
- colDataAppend(pOutput, i, pItem->data, false);
+ if (pItem->isNull == true) {
+ colDataAppendNULL(pOutput, i);
+ } else {
+ colDataAppend(pOutput, i, pItem->data, false);
+ }
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, i, &pItem->timestamp);
}
@@ -3642,7 +3949,7 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) {
int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
- SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+ SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
@@ -3655,3 +3962,260 @@ int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pResInfo->numOfRes;
}
+typedef struct STwaInfo {
+ double dOutput;
+ SPoint1 p;
+ STimeWindow win;
+} STwaInfo;
+
+bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
+ pEnv->calcMemSize = sizeof(STwaInfo);
+ return true;
+}
+
+bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) {
+ if (!functionSetup(pCtx, pResultInfo)) {
+ return false;
+ }
+
+ STwaInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+ pInfo->p.key = INT64_MIN;
+ pInfo->win = TSWINDOW_INITIALIZER;
+ return true;
+}
+
+static double twa_get_area(SPoint1 s, SPoint1 e) {
+ if ((s.val >= 0 && e.val >= 0)|| (s.val <=0 && e.val <= 0)) {
+ return (s.val + e.val) * (e.key - s.key) / 2;
+ }
+
+ double x = (s.key * e.val - e.key * s.val)/(e.val - s.val);
+ double val = (s.val * (x - s.key) + e.val * (e.key - x)) / 2;
+ return val;
+}
+
+#define INIT_INTP_POINT(_p, _k, _v) \
+ do { \
+ (_p).key = (_k); \
+ (_p).val = (_v); \
+ } while (0)
+
+int32_t twaFunction(SqlFunctionCtx* pCtx) {
+ SInputColumnInfoData* pInput = &pCtx->input;
+ SColumnInfoData* pInputCol = pInput->pData[0];
+
+ TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
+
+ SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
+
+ STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+ SPoint1 *last = &pInfo->p;
+ int32_t numOfElems = 0;
+
+ int32_t i = pInput->startRowIndex;
+ if (pCtx->start.key != INT64_MIN) {
+ ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
+ (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
+
+ ASSERT(last->key == INT64_MIN);
+ last->key = tsList[i];
+
+ GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+
+ pInfo->dOutput += twa_get_area(pCtx->start, *last);
+ pInfo->win.skey = pCtx->start.key;
+ numOfElems++;
+ i += 1;
+ } else if (pInfo->p.key == INT64_MIN) {
+ last->key = tsList[i];
+ GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+
+ pInfo->win.skey = last->key;
+ numOfElems++;
+ i += 1;
+ }
+
+ SPoint1 st = {0};
+
+ // calculate the value of
+ switch(pInputCol->info.type) {
+ case TSDB_DATA_TYPE_TINYINT: {
+ int8_t *val = (int8_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+
+ case TSDB_DATA_TYPE_SMALLINT: {
+ int16_t *val = (int16_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ int32_t *val = (int32_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ int64_t *val = (int64_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ float *val = (float*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ double *val = (double*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ uint8_t *val = (uint8_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ uint16_t *val = (uint16_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UINT: {
+ uint32_t *val = (uint32_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ uint64_t *val = (uint64_t*) colDataGetData(pInputCol, 0);
+ for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
+
+ INIT_INTP_POINT(st, tsList[i], val[i]);
+ pInfo->dOutput += twa_get_area(pInfo->p, st);
+ pInfo->p = st;
+ }
+ break;
+ }
+
+ default: ASSERT(0);
+ }
+
+ // the last interpolated time window value
+ if (pCtx->end.key != INT64_MIN) {
+ pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end);
+ pInfo->p = pCtx->end;
+ }
+
+ pInfo->win.ekey = pInfo->p.key;
+
+ SET_VAL(pResInfo, numOfElems, 1);
+ return TSDB_CODE_SUCCESS;
+}
+
+/*
+ * To copy the input to interResBuf to avoid the input buffer space be over writen
+ * by next input data. The TWA function only applies to each table, so no merge procedure
+ * is required, we simply copy to the resut ot interResBuffer.
+ */
+//void twa_function_copy(SQLFunctionCtx *pCtx) {
+// assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
+// SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
+//
+// memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes);
+// pResInfo->hasResult = ((STwaInfo *)pCtx->pInput)->hasResult;
+//}
+
+int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
+ SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
+
+ STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo);
+ if (pResInfo->numOfRes == 0) {
+ pResInfo->isNullRes = 1;
+ } else {
+ // assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult);
+ if (pInfo->win.ekey == pInfo->win.skey) {
+ pInfo->dOutput = pInfo->p.val;
+ } else {
+ pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
+ }
+
+ pResInfo->numOfRes = 1;
+ }
+
+ return functionFinalize(pCtx, pBlock);
+}
+
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 49b20ebc853e1f97f191b2f5d9d0c5396d241b60..611ae8d81fdc681c28936456b5b46c0a7e09d4c0 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -16,7 +16,6 @@
#include "functionMgt.h"
#include "builtins.h"
-#include "catalog.h"
#include "functionMgtInt.h"
#include "taos.h"
#include "taoserror.h"
@@ -65,35 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) {
return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification);
}
-static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
- SFuncInfo funcInfo = {0};
- int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &funcInfo);
- if (TSDB_CODE_SUCCESS != code) {
- return code;
- }
-
- pFunc->funcType = FUNCTION_TYPE_UDF;
- pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
- pFunc->node.resType.type = funcInfo.outputType;
- pFunc->node.resType.bytes = funcInfo.outputLen;
- pFunc->udfBufSize = funcInfo.bufSize;
- tFreeSFuncInfo(&funcInfo);
- return TSDB_CODE_SUCCESS;
-}
-
int32_t fmFuncMgtInit() {
taosThreadOnce(&functionHashTableInit, doInitFunctionTable);
return initFunctionCode;
}
-int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
+int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) {
void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName));
if (NULL != pVal) {
pFunc->funcId = *(int32_t*)pVal;
pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type;
- return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen);
+ return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen);
}
- return getUdfInfo(pParam, pFunc);
+ return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION;
}
bool fmIsBuiltinFunc(const char* pFunc) {
@@ -118,6 +101,7 @@ int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet) {
pFpSet->init = funcMgtBuiltins[funcId].initFunc;
pFpSet->process = funcMgtBuiltins[funcId].processFunc;
pFpSet->finalize = funcMgtBuiltins[funcId].finalizeFunc;
+ pFpSet->combine = funcMgtBuiltins[funcId].combineFunc;
return TSDB_CODE_SUCCESS;
}
@@ -215,3 +199,81 @@ bool fmIsInvertible(int32_t funcId) {
}
return res;
}
+
+static SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) {
+ SFunctionNode* pFunc = nodesMakeNode(QUERY_NODE_FUNCTION);
+ if (NULL == pFunc) {
+ return NULL;
+ }
+ strcpy(pFunc->functionName, pName);
+ pFunc->pParameterList = pParameterList;
+ char msg[64] = {0};
+ if (TSDB_CODE_SUCCESS != fmGetFuncInfo(pFunc, msg, sizeof(msg))) {
+ nodesDestroyNode(pFunc);
+ return NULL;
+ }
+ return pFunc;
+}
+
+static SColumnNode* createColumnByFunc(const SFunctionNode* pFunc) {
+ SColumnNode* pCol = nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == pCol) {
+ return NULL;
+ }
+ strcpy(pCol->colName, pFunc->node.aliasName);
+ pCol->node.resType = pFunc->node.resType;
+ return pCol;
+}
+
+bool fmIsDistExecFunc(int32_t funcId) {
+ if (!fmIsVectorFunc(funcId)) {
+ return true;
+ }
+ return (NULL != funcMgtBuiltins[funcId].pPartialFunc && NULL != funcMgtBuiltins[funcId].pMergeFunc);
+}
+
+static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNode** pPartialFunc) {
+ SNodeList* pParameterList = nodesCloneList(pSrcFunc->pParameterList);
+ if (NULL == pParameterList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ *pPartialFunc = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pParameterList);
+ if (NULL == *pPartialFunc) {
+ nodesDestroyList(pParameterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ snprintf((*pPartialFunc)->node.aliasName, sizeof((*pPartialFunc)->node.aliasName), "%s.%p",
+ (*pPartialFunc)->functionName, pSrcFunc);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctionNode* pPartialFunc,
+ SFunctionNode** pMergeFunc) {
+ SNodeList* pParameterList = NULL;
+ nodesListMakeStrictAppend(&pParameterList, createColumnByFunc(pPartialFunc));
+ *pMergeFunc = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList);
+ if (NULL == *pMergeFunc) {
+ nodesDestroyList(pParameterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ strcpy((*pMergeFunc)->node.aliasName, pSrcFunc->node.aliasName);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc) {
+ if (!fmIsDistExecFunc(pFunc->funcId)) {
+ return TSDB_CODE_FAILED;
+ }
+
+ int32_t code = createPartialFunction(pFunc, pPartialFunc);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = createMergeFunction(pFunc, *pPartialFunc, pMergeFunc);
+ }
+
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesDestroyNode(*pPartialFunc);
+ nodesDestroyNode(*pMergeFunc);
+ }
+
+ return code;
+}
diff --git a/source/libs/function/src/taggfunction.c b/source/libs/function/src/taggfunction.c
index 950655e480b2b3413f26bc56d4771461b0dc4277..e683a38cbd1fd97ac7ba081a65f2af8ac18b8fee 100644
--- a/source/libs/function/src/taggfunction.c
+++ b/source/libs/function/src/taggfunction.c
@@ -236,7 +236,7 @@ bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) {
bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry) {
return pEntry->initialized;
}
-
+#if 0
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength,
bool isSuperTable/*, SUdfInfo* pUdfInfo*/) {
if (!isValidDataType(dataType)) {
@@ -470,6 +470,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS;
}
+#endif
static bool function_setup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) {
if (pResultInfo->initialized) {
diff --git a/source/libs/function/src/texpr.c b/source/libs/function/src/texpr.c
index b91af2d1577fc994ccaa6b11b8e9044ffb88b594..703b19ced7e1abeee312a414aafe6b34b936c271 100644
--- a/source/libs/function/src/texpr.c
+++ b/source/libs/function/src/texpr.c
@@ -36,12 +36,7 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
if (pNode->nodeType == TEXPR_BINARYEXPR_NODE || pNode->nodeType == TEXPR_UNARYEXPR_NODE) {
doExprTreeDestroy(&pNode, fp);
- } else if (pNode->nodeType == TEXPR_VALUE_NODE) {
- taosVariantDestroy(pNode->pVal);
- } else if (pNode->nodeType == TEXPR_COL_NODE) {
- taosMemoryFreeClear(pNode->pSchema);
}
-
taosMemoryFree(pNode);
}
@@ -49,15 +44,6 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
if (*pExpr == NULL) {
return;
}
-
- int32_t type = (*pExpr)->nodeType;
- if (type == TEXPR_VALUE_NODE) {
- taosVariantDestroy((*pExpr)->pVal);
- taosMemoryFree((*pExpr)->pVal);
- } else if (type == TEXPR_COL_NODE) {
- taosMemoryFree((*pExpr)->pSchema);
- }
-
taosMemoryFree(*pExpr);
*pExpr = NULL;
}
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index 441648e52b2ef78326d73d1944bcfbfd0009abc6..472d67260730ca10522ee0d07fc1d608b132688e 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -66,7 +66,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) {
}
static int32_t udfSpawnUdfd(SUdfdData* pData) {
- fnInfo("dnode start spawning udfd");
+ fnInfo("start to init udfd");
uv_process_options_t options = {0};
char path[PATH_MAX] = {0};
@@ -140,6 +140,8 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
if (err != 0) {
fnError("can not spawn udfd. path: %s, error: %s", path, uv_strerror(err));
+ } else {
+ fnInfo("udfd is initialized");
}
return err;
}
diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt
index e55b004972d841a2049dc0474dbf3343b1cc300a..75eac2430f70c7a4cfc215eee5515a392d1bcd40 100644
--- a/source/libs/index/CMakeLists.txt
+++ b/source/libs/index/CMakeLists.txt
@@ -12,6 +12,7 @@ target_link_libraries(
PUBLIC os
PUBLIC util
PUBLIC common
+ PUBLIC vnode
PUBLIC nodes
PUBLIC scalar
PUBLIC function
diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h
index aff2e0e836c0f2aae9a1fe63dd984cd4f5eb7850..6e68163d74677ad0b7c9df944b73d2ebe602d93a 100644
--- a/source/libs/index/inc/indexCache.h
+++ b/source/libs/index/inc/indexCache.h
@@ -36,9 +36,10 @@ typedef struct MemTable {
typedef struct IndexCache {
T_REF_DECLARE()
MemTable *mem, *imm;
+ int32_t merging;
SIndex* index;
char* colName;
- int32_t version;
+ int64_t version;
int64_t occupiedMem;
int8_t type;
uint64_t suid;
@@ -47,12 +48,12 @@ typedef struct IndexCache {
TdThreadCond finished;
} IndexCache;
-#define CACHE_VERSION(cache) atomic_load_32(&cache->version)
+#define CACHE_VERSION(cache) atomic_load_64(&cache->version)
typedef struct CacheTerm {
// key
char* colVal;
- int32_t version;
+ int64_t version;
// value
uint64_t uid;
int8_t colType;
@@ -74,7 +75,7 @@ void indexCacheIteratorDestroy(Iterate* iiter);
int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid);
// int indexCacheGet(void *cache, uint64_t *rst);
-int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s);
+int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s);
void indexCacheRef(IndexCache* cache);
void indexCacheUnRef(IndexCache* cache);
diff --git a/source/libs/index/inc/indexComm.h b/source/libs/index/inc/indexComm.h
index 3066fd1c2c57481cc80a6b19a7dc2de1a9b4d6cc..c338300b57d1c5d2d570130f596303503ee30187 100644
--- a/source/libs/index/inc/indexComm.h
+++ b/source/libs/index/inc/indexComm.h
@@ -33,8 +33,9 @@ typedef enum { MATCH, CONTINUE, BREAK } TExeCond;
typedef TExeCond (*_cache_range_compare)(void* a, void* b, int8_t type);
-TExeCond tCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b, int8_t dType);
-TExeCond tDoCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b);
+__compar_fn_t indexGetCompar(int8_t type);
+TExeCond tCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b, int8_t dType);
+TExeCond tDoCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b);
_cache_range_compare indexGetCompare(RangeType ty);
diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h
index 0bdcb131b69befd518b233e38a2653a17e67bde8..24a4e99970692b202ab36fd1d1a83a45a09bcaa4 100644
--- a/source/libs/index/inc/indexInt.h
+++ b/source/libs/index/inc/indexInt.h
@@ -34,6 +34,15 @@
extern "C" {
#endif
+// clang-format off
+#define indexFatal(...) do { if (idxDebugFlag & DEBUG_FATAL) { taosPrintLog("INDEX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0)
+#define indexError(...) do { if (idxDebugFlag & DEBUG_ERROR) { taosPrintLog("INDEX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0)
+#define indexWarn(...) do { if (idxDebugFlag & DEBUG_WARN) { taosPrintLog("INDEX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0)
+#define indexInfo(...) do { if (idxDebugFlag & DEBUG_INFO) { taosPrintLog("INDEX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0)
+#define indexDebug(...) do { if (idxDebugFlag & DEBUG_DEBUG) { taosPrintLog("INDEX ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__);} } while (0)
+#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("INDEX ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__);} } while (0)
+// clang-format on
+
typedef enum { LT, LE, GT, GE } RangeType;
typedef enum { kTypeValue, kTypeDeletion } STermValueType;
@@ -122,8 +131,7 @@ typedef struct TFileCacheKey {
char* colName;
int32_t nColName;
} ICacheKey;
-
-int indexFlushCacheToTFile(SIndex* sIdx, void*);
+int indexFlushCacheToTFile(SIndex* sIdx, void*, bool quit);
int64_t indexAddRef(void* p);
int32_t indexRemoveRef(int64_t ref);
@@ -134,15 +142,6 @@ int32_t indexSerialCacheKey(ICacheKey* key, char* buf);
// int32_t indexSerialKey(ICacheKey* key, char* buf);
// int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
-// clang-format off
-#define indexFatal(...) do { if (sDebugFlag & DEBUG_FATAL) { taosPrintLog("INDEX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0)
-#define indexError(...) do { if (sDebugFlag & DEBUG_ERROR) { taosPrintLog("INDEX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0)
-#define indexWarn(...) do { if (sDebugFlag & DEBUG_WARN) { taosPrintLog("INDEX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0)
-#define indexInfo(...) do { if (sDebugFlag & DEBUG_INFO) { taosPrintLog("INDEX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0)
-#define indexDebug(...) do { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLog("INDEX ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__);} } while (0)
-#define indexTrace(...) do { if (sDebugFlag & DEBUG_TRACE) { taosPrintLog("INDEX ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__);} } while (0)
-// clang-format on
-
#define INDEX_TYPE_CONTAIN_EXTERN_TYPE(ty, exTy) (((ty >> 4) & (exTy)) != 0)
#define INDEX_TYPE_GET_TYPE(ty) (ty & 0x0F)
diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h
index 85ed397b0ac5d14984a4020b265fbdcf6951c68e..ca55aa93da5a47bcefa26bf880d115abeb46b8c8 100644
--- a/source/libs/index/inc/indexTfile.h
+++ b/source/libs/index/inc/indexTfile.h
@@ -28,12 +28,12 @@ extern "C" {
// tfile header content
// |<---suid--->|<---version--->|<-------colName------>|<---type-->|<--fstOffset->|
-// |<-uint64_t->|<---int32_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->|
+// |<-uint64_t->|<---int64_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->|
#pragma pack(push, 1)
typedef struct TFileHeader {
uint64_t suid;
- int32_t version;
+ int64_t version;
char colName[TSDB_COL_NAME_LEN]; //
uint8_t colType;
int32_t fstOffset;
@@ -74,9 +74,10 @@ typedef struct TFileReader {
} TFileReader;
typedef struct IndexTFile {
- char* path;
- TFileCache* cache;
- TFileWriter* tw;
+ char* path;
+ TFileCache* cache;
+ TFileWriter* tw;
+ TdThreadMutex mtx;
} IndexTFile;
typedef struct TFileWriterOpt {
@@ -101,14 +102,14 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read
TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName);
-TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName);
+TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName);
TFileReader* tfileReaderCreate(WriterCtx* ctx);
void tfileReaderDestroy(TFileReader* reader);
-int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr);
+int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr);
void tfileReaderRef(TFileReader* reader);
void tfileReaderUnRef(TFileReader* reader);
-TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t type);
+TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t type);
void tfileWriterClose(TFileWriter* tw);
TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header);
void tfileWriterDestroy(TFileWriter* tw);
@@ -119,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw);
IndexTFile* indexTFileCreate(const char* path);
void indexTFileDestroy(IndexTFile* tfile);
int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid);
-int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr);
+int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr);
Iterate* tfileIteratorCreate(TFileReader* reader);
void tfileIteratorDestroy(Iterate* iterator);
diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h
index f1676ed411a5e2074667816d1746dc607dc0f44d..dbaecaa9630b04b8b50f108c1a59e499f04899dc 100644
--- a/source/libs/index/inc/indexUtil.h
+++ b/source/libs/index/inc/indexUtil.h
@@ -66,7 +66,7 @@ extern "C" {
* [1, 4, 5]
* output:[4, 5]
*/
-void iIntersection(SArray *interResults, SArray *finalResult);
+void iIntersection(SArray *in, SArray *out);
/* multi sorted result union
* input: [1, 2, 4, 5]
@@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult);
* [1, 4, 5]
* output:[1, 2, 3, 4, 5]
*/
-void iUnion(SArray *interResults, SArray *finalResult);
+void iUnion(SArray *in, SArray *out);
/* see example
* total: [1, 2, 4, 5, 7, 8]
@@ -92,19 +92,24 @@ typedef struct {
uint64_t data;
} SIdxVerdata;
+/*
+ * index temp result
+ *
+ */
typedef struct {
SArray *total;
- SArray *added;
- SArray *deled;
-} SIdxTempResult;
+ SArray *add;
+ SArray *del;
+} SIdxTRslt;
+
+SIdxTRslt *idxTRsltCreate();
-SIdxTempResult *sIdxTempResultCreate();
+void idxTRsltClear(SIdxTRslt *tr);
-void sIdxTempResultClear(SIdxTempResult *tr);
+void idxTRsltDestroy(SIdxTRslt *tr);
-void sIdxTempResultDestroy(SIdxTempResult *tr);
+void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out);
-void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr);
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index 6add788a896f8149f49d9f224538d5b3ab4e5b57..ba3aea969f6c8a5214a3999a7d4ca2c68ec503ac 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -29,7 +29,7 @@
#include "lucene++/Lucene_c.h"
#endif
-#define INDEX_NUM_OF_THREADS 1
+#define INDEX_NUM_OF_THREADS 5
#define INDEX_QUEUE_SIZE 200
#define INDEX_DATA_BOOL_NULL 0x02
@@ -80,12 +80,12 @@ static TdThreadOnce isInit = PTHREAD_ONCE_INIT;
static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* term, SArray** result);
static void indexInterResultsDestroy(SArray* results);
-static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType, SArray* finalResult);
+static int indexMergeFinalResults(SArray* in, EIndexOperatorType oType, SArray* out);
static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch);
// merge cache and tfile by opera type
-static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper);
+static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper);
// static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
// int32_t indexSerialKey(ICacheKey* key, char* buf);
@@ -150,6 +150,7 @@ void indexClose(SIndex* sIdx) {
indexCacheForceToMerge((void*)(*pCache));
indexInfo("%s wait to merge", (*pCache)->colName);
indexWait((void*)(sIdx));
+ indexInfo("%s finish to wait", (*pCache)->colName);
iter = taosHashIterate(sIdx->colObj, iter);
indexCacheUnRef(*pCache);
}
@@ -201,6 +202,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) {
char buf[128] = {0};
ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType};
int32_t sz = indexSerialCacheKey(&key, buf);
+ indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType);
IndexCache** cache = taosHashGet(index->colObj, buf, sz);
assert(*cache != NULL);
@@ -328,6 +330,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
char buf[128] = {0};
ICacheKey key = {
.suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType};
+ indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType);
int32_t sz = indexSerialCacheKey(&key, buf);
taosThreadMutexLock(&sIdx->mtx);
@@ -341,7 +344,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
int64_t st = taosGetTimestampUs();
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
if (0 == indexCacheSearch(cache, query, tr, &s)) {
if (s == kTypeDeletion) {
indexInfo("col: %s already drop by", term->colName);
@@ -363,12 +366,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
int64_t cost = taosGetTimestampUs() - st;
indexInfo("search cost: %" PRIu64 "us", cost);
- sIdxTempResultMergeTo(*result, tr);
+ idxTRsltMergeTo(tr, *result);
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
return 0;
END:
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
return -1;
}
static void indexInterResultsDestroy(SArray* results) {
@@ -384,38 +387,38 @@ static void indexInterResultsDestroy(SArray* results) {
taosArrayDestroy(results);
}
-static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType, SArray* fResults) {
+static int indexMergeFinalResults(SArray* in, EIndexOperatorType oType, SArray* out) {
// refactor, merge interResults into fResults by oType
- for (int i = 0; i < taosArrayGetSize(interResults); i--) {
- SArray* t = taosArrayGetP(interResults, i);
+ for (int i = 0; i < taosArrayGetSize(in); i--) {
+ SArray* t = taosArrayGetP(in, i);
taosArraySort(t, uidCompare);
taosArrayRemoveDuplicate(t, uidCompare, NULL);
}
if (oType == MUST) {
- iIntersection(interResults, fResults);
+ iIntersection(in, out);
} else if (oType == SHOULD) {
- iUnion(interResults, fResults);
+ iUnion(in, out);
} else if (oType == NOT) {
// just one column index, enhance later
- taosArrayAddAll(fResults, interResults);
+ // taosArrayAddAll(fResults, interResults);
// not use currently
}
return 0;
}
-static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) {
+static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) {
int32_t sz = taosArrayGetSize(result);
if (sz > 0) {
TFileValue* lv = taosArrayGetP(result, sz - 1);
if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) {
- sIdxTempResultMergeTo(lv->tableId, tr);
- sIdxTempResultClear(tr);
+ idxTRsltMergeTo(tr, lv->tableId);
+ idxTRsltClear(tr);
taosArrayPush(result, &tfv);
} else if (tfv == NULL) {
// handle last iterator
- sIdxTempResultMergeTo(lv->tableId, tr);
+ idxTRsltMergeTo(tr, lv->tableId);
} else {
// temp result saved in help
tfileValueDestroy(tfv);
@@ -424,7 +427,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx
taosArrayPush(result, &tfv);
}
}
-static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) {
+static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) {
char* colVal = (cv != NULL) ? cv->colVal : tv->colVal;
TFileValue* tfv = tfileValueCreate(colVal);
@@ -434,9 +437,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal
uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0);
uint32_t ver = cv->ver;
if (cv->type == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, id)
} else if (cv->type == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, id)
}
}
if (tv != NULL) {
@@ -452,7 +455,7 @@ static void indexDestroyFinalResult(SArray* result) {
taosArrayDestroy(result);
}
-int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
+int indexFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
if (sIdx == NULL) {
return -1;
}
@@ -460,7 +463,10 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
int64_t st = taosGetTimestampUs();
- IndexCache* pCache = (IndexCache*)cache;
+ IndexCache* pCache = (IndexCache*)cache;
+
+ while (quit && atomic_load_32(&pCache->merging) == 1) {
+ }
TFileReader* pReader = tfileGetReaderByCol(sIdx->tindex, pCache->suid, pCache->colName);
if (pReader == NULL) {
indexWarn("empty tfile reader found");
@@ -471,9 +477,9 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
indexError("%p immtable is empty, ignore merge opera", pCache);
indexCacheDestroyImm(pCache);
tfileReaderUnRef(pReader);
- if (sIdx->quit) {
+ atomic_store_32(&pCache->merging, 0);
+ if (quit) {
indexPost(sIdx);
- // indexCacheBroadcast(pCache);
}
indexReleaseRef(sIdx->refId);
return 0;
@@ -489,7 +495,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
bool cn = cacheIter ? cacheIter->next(cacheIter) : false;
bool tn = tfileIter ? tfileIter->next(tfileIter) : false;
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
while (cn == true || tn == true) {
IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL;
IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL;
@@ -515,7 +521,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
}
}
indexMayMergeTempToFinalResult(result, NULL, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
int ret = indexGenTFile(sIdx, pCache, result);
indexDestroyFinalResult(result);
@@ -534,7 +540,8 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
} else {
indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
}
- if (sIdx->quit) {
+ atomic_store_32(&pCache->merging, 0);
+ if (quit) {
indexPost(sIdx);
}
indexReleaseRef(sIdx->refId);
@@ -557,20 +564,18 @@ void iterateValueDestroy(IterateValue* value, bool destroy) {
static int64_t indexGetAvaialbleVer(SIndex* sIdx, IndexCache* cache) {
ICacheKey key = {.suid = cache->suid, .colName = cache->colName, .nColName = strlen(cache->colName)};
int64_t ver = CACHE_VERSION(cache);
- taosThreadMutexLock(&sIdx->mtx);
- TFileReader* trd = tfileCacheGet(((IndexTFile*)sIdx->tindex)->cache, &key);
- if (trd != NULL) {
- if (ver < trd->header.version) {
- ver = trd->header.version + 1;
- } else {
- ver += 1;
- }
- indexInfo("header: %d, ver: %" PRId64 "", trd->header.version, ver);
- tfileReaderUnRef(trd);
- } else {
- indexInfo("not found reader base %p", trd);
+
+ IndexTFile* tf = (IndexTFile*)(sIdx->tindex);
+
+ taosThreadMutexLock(&tf->mtx);
+ TFileReader* rd = tfileCacheGet(tf->cache, &key);
+ taosThreadMutexUnlock(&tf->mtx);
+
+ if (rd != NULL) {
+ ver = (ver > rd->header.version ? ver : rd->header.version) + 1;
+ indexInfo("header: %" PRId64 ", ver: %" PRId64 "", rd->header.version, ver);
}
- taosThreadMutexUnlock(&sIdx->mtx);
+ tfileReaderUnRef(rd);
return ver;
}
static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) {
@@ -597,13 +602,15 @@ static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) {
}
indexInfo("success to create tfile, reopen it, %s", reader->ctx->file.buf);
+ IndexTFile* tf = (IndexTFile*)sIdx->tindex;
+
TFileHeader* header = &reader->header;
ICacheKey key = {.suid = cache->suid, .colName = header->colName, .nColName = strlen(header->colName)};
- taosThreadMutexLock(&sIdx->mtx);
- IndexTFile* ifile = (IndexTFile*)sIdx->tindex;
- tfileCachePut(ifile->cache, &key, reader);
- taosThreadMutexUnlock(&sIdx->mtx);
+ taosThreadMutexLock(&tf->mtx);
+ tfileCachePut(tf->cache, &key, reader);
+ taosThreadMutexUnlock(&tf->mtx);
+
return ret;
END:
if (tw != NULL) {
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index d704e3876e4979cdf8c1354e9b3d2ef23bf91132..4e7be245ef7fb0a4c383a0abf0b242ebbb46522c 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -36,32 +36,31 @@ static char* indexCacheTermGet(const void* pData);
static MemTable* indexInternalCacheCreate(int8_t type);
-static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
+static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
/*comm func of compare, used in (LE/LT/GE/GT compare)*/
-static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s,
- RangeType type);
-static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-
-static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
+static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type);
+static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+
+static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s,
RangeType type);
-static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = {
+static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = {
{cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual,
cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange},
{cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON,
@@ -71,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp
static void doMergeWork(SSchedMsg* msg);
static bool indexCacheIteratorNext(Iterate* itera);
-static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
if (cache == NULL) {
return 0;
}
@@ -80,7 +79,7 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr
CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm));
pCt->colVal = term->colVal;
- pCt->version = atomic_load_32(&pCache->version);
+ pCt->version = atomic_load_64(&pCache->version);
char* key = indexCacheTermGet(pCt);
@@ -93,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr
CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node);
if (0 == strcmp(c->colVal, pCt->colVal)) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else {
break;
@@ -108,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr
tSkipListDestroyIter(iter);
return 0;
}
-static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
- RangeType type) {
+static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) {
if (cache == NULL) {
return 0;
}
@@ -133,7 +131,8 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm));
pCt->colVal = term->colVal;
- pCt->version = atomic_load_32(&pCache->version);
+ pCt->colType = term->colType;
+ pCt->version = atomic_load_64(&pCache->version);
char* key = indexCacheTermGet(pCt);
@@ -147,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType);
if (cond == MATCH) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else if (cond == CONTINUE) {
continue;
@@ -163,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
tSkipListDestroyIter(iter);
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, LT);
}
-static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, LE);
}
-static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, GT);
}
-static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, GE);
}
-static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
if (cache == NULL) {
return 0;
}
@@ -185,7 +184,7 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm));
pCt->colVal = term->colVal;
- pCt->version = atomic_load_32(&pCache->version);
+ pCt->version = atomic_load_64(&pCache->version);
char* exBuf = NULL;
if (INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON)) {
@@ -204,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
if (0 == strcmp(c->colVal, pCt->colVal)) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else {
break;
@@ -222,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT);
}
-static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE);
}
-static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT);
}
-static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE);
}
-static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
+static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s,
RangeType type) {
if (cache == NULL) {
return 0;
@@ -259,7 +258,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm));
pCt->colVal = term->colVal;
- pCt->version = atomic_load_32(&pCache->version);
+ pCt->version = atomic_load_64(&pCache->version);
int8_t dType = INDEX_TYPE_GET_TYPE(term->colType);
int skip = 0;
@@ -289,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
TExeCond cond = cmpFn(p + skip, term->colVal, dType);
if (cond == MATCH) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else if (cond == CONTINUE) {
continue;
@@ -309,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
@@ -356,7 +355,7 @@ void indexCacheDebug(IndexCache* cache) {
CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node);
if (ct != NULL) {
// TODO, add more debug info
- indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version);
+ indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version);
}
}
tSkipListDestroyIter(iter);
@@ -377,7 +376,7 @@ void indexCacheDebug(IndexCache* cache) {
CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node);
if (ct != NULL) {
// TODO, add more debug info
- indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version);
+ indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version);
}
}
tSkipListDestroyIter(iter);
@@ -495,16 +494,19 @@ static void indexCacheMakeRoomForWrite(IndexCache* cache) {
// TODO: wake up by condition variable
indexCacheWait(cache);
} else {
- bool notifyQuit = cache->occupiedMem >= MEM_SIGNAL_QUIT ? true : false;
+ bool quit = cache->occupiedMem >= MEM_SIGNAL_QUIT ? true : false;
indexCacheRef(cache);
cache->imm = cache->mem;
cache->mem = indexInternalCacheCreate(cache->type);
cache->mem->pCache = cache;
cache->occupiedMem = 0;
+ if (quit == false) {
+ atomic_store_32(&cache->merging, 1);
+ }
// sched to merge
// unref cache in bgwork
- indexCacheSchedToMerge(cache, notifyQuit);
+ indexCacheSchedToMerge(cache, quit);
}
}
}
@@ -529,7 +531,7 @@ int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid) {
ct->colVal = (char*)taosMemoryCalloc(1, sizeof(char) * (term->nColVal + 1));
memcpy(ct->colVal, term->colVal, term->nColVal);
}
- ct->version = atomic_add_fetch_32(&pCache->version, 1);
+ ct->version = atomic_add_fetch_64(&pCache->version, 1);
// set value
ct->uid = uid;
ct->operaType = term->operType;
@@ -568,7 +570,7 @@ int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t u
return 0;
}
-static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) {
+static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) {
if (mem == NULL) {
return 0;
}
@@ -582,7 +584,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu
return cacheSearch[0][qtype](mem, term, tr, s);
}
}
-int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) {
+int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) {
int64_t st = taosGetTimestampUs();
if (cache == NULL) {
return 0;
@@ -597,10 +599,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result
indexMemRef(imm);
taosThreadMutexUnlock(&pCache->mtx);
- int ret = indexQueryMem(mem, query, result, s);
+ int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0;
if (ret == 0 && *s != kTypeDeletion) {
// continue search in imm
- ret = indexQueryMem(imm, query, result, s);
+ ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0;
}
indexMemUnRef(mem);
@@ -663,7 +665,11 @@ static int32_t indexCacheTermCompare(const void* l, const void* r) {
// compare colVal
int32_t cmp = strcmp(lt->colVal, rt->colVal);
if (cmp == 0) {
- return rt->version - lt->version;
+ if (rt->version == lt->version) {
+ cmp = 0;
+ } else {
+ cmp = rt->version < lt->version ? -1 : 1;
+ }
}
return cmp;
}
@@ -705,7 +711,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) {
return cmp;
}
static MemTable* indexInternalCacheCreate(int8_t type) {
- int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type;
+ int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY;
int32_t (*cmpFn)(const void* l, const void* r) =
INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare;
@@ -722,9 +728,9 @@ static void doMergeWork(SSchedMsg* msg) {
IndexCache* pCache = msg->ahandle;
SIndex* sidx = (SIndex*)pCache->index;
- sidx->quit = msg->thandle ? true : false;
+ int quit = msg->thandle ? true : false;
taosMemoryFree(msg->thandle);
- indexFlushCacheToTFile(sidx, pCache);
+ indexFlushCacheToTFile(sidx, pCache, quit);
}
static bool indexCacheIteratorNext(Iterate* itera) {
SSkipListIterator* iter = itera->iter;
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 78c7babb681e44629281f0ffd6ea6ba835495b5b..5310e1c3451dee18bd3a31922b2ce14f752ebc1d 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -75,7 +75,7 @@ char* indexInt2str(int64_t val, char* dst, int radix) {
;
return dst - 1;
}
-static __compar_fn_t indexGetCompar(int8_t type) {
+__compar_fn_t indexGetCompar(int8_t type) {
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
return (__compar_fn_t)strcmp;
}
@@ -182,6 +182,9 @@ TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
case QUERY_GREATER_EQUAL: {
if (ret >= 0) return MATCH;
}
+ case QUERY_TERM: {
+ if (ret == 0) return MATCH;
+ }
}
return CONTINUE;
}
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 0273867ccf040f3d3344066270ef3b8aa6a3bae2..766746dd2a695076d3ab524076bfa143ceba1c54 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -14,11 +14,13 @@
*/
#include "index.h"
+#include "indexComm.h"
#include "indexInt.h"
#include "nodes.h"
#include "querynodes.h"
#include "scalar.h"
#include "tdatablock.h"
+#include "vnode.h"
// clang-format off
#define SIF_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
@@ -37,12 +39,15 @@ typedef struct SIFParam {
int64_t suid; // add later
char dbName[TSDB_DB_NAME_LEN];
char colName[TSDB_COL_NAME_LEN];
+
+ SIndexMetaArg arg;
} SIFParam;
typedef struct SIFCtx {
- int32_t code;
- SHashObj *pRes; /* element is SIFParam */
- bool noExec; // true: just iterate condition tree, and add hint to executor plan
+ int32_t code;
+ SHashObj * pRes; /* element is SIFParam */
+ bool noExec; // true: just iterate condition tree, and add hint to executor plan
+ SIndexMetaArg arg;
// SIdxFltStatus st;
} SIFCtx;
@@ -256,8 +261,52 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu
indexError("index-filter not support buildin function");
return TSDB_CODE_QRY_INVALID_INPUT;
}
+
+typedef int (*Filter)(void *a, void *b, int16_t dtype);
+
+int sifGreaterThan(void *a, void *b, int16_t dtype) {
+ __compar_fn_t func = indexGetCompar(dtype);
+ return tDoCompare(func, QUERY_GREATER_THAN, a, b);
+}
+int sifGreaterEqual(void *a, void *b, int16_t dtype) {
+ __compar_fn_t func = indexGetCompar(dtype);
+ return tDoCompare(func, QUERY_GREATER_EQUAL, a, b);
+}
+int sifLessEqual(void *a, void *b, int16_t dtype) {
+ __compar_fn_t func = indexGetCompar(dtype);
+ return tDoCompare(func, QUERY_LESS_EQUAL, a, b);
+}
+int sifLessThan(void *a, void *b, int16_t dtype) {
+ __compar_fn_t func = indexGetCompar(dtype);
+ return (int)tDoCompare(func, QUERY_LESS_THAN, a, b);
+}
+int sifEqual(void *a, void *b, int16_t dtype) {
+ __compar_fn_t func = indexGetCompar(dtype);
+ return (int)tDoCompare(func, QUERY_TERM, a, b);
+}
+static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
+ if (type == QUERY_LESS_EQUAL || type == QUERY_LESS_THAN) {
+ *reverse = true;
+ } else {
+ *reverse = false;
+ }
+ if (type == QUERY_LESS_EQUAL)
+ return sifLessEqual;
+ else if (type == QUERY_LESS_THAN)
+ return sifLessThan;
+ else if (type == QUERY_GREATER_EQUAL)
+ return sifGreaterEqual;
+ else if (type == QUERY_GREATER_THAN)
+ return sifGreaterThan;
+ else if (type == QUERY_TERM) {
+ return sifEqual;
+ }
+ return NULL;
+}
static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) {
- SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName),
+ SIndexMetaArg *arg = &output->arg;
+#ifdef USE_INVERTED_INDEX
+ SIndexTerm *tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName),
right->condValue, strlen(right->condValue));
if (tm == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
@@ -268,9 +317,27 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST);
indexMultiTermQueryAdd(mtm, tm, qtype);
- int ret = indexSearch(NULL, mtm, output->result);
+ int ret = indexSearch(arg->metaHandle, mtm, output->result);
+ indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result));
indexMultiTermQueryDestroy(mtm);
return ret;
+#else
+ EIndexQueryType qtype = 0;
+ SIF_ERR_RET(sifGetFuncFromSql(operType, &qtype));
+ bool reverse;
+ Filter filterFunc = sifGetFilterFunc(qtype, &reverse);
+
+ SMetaFltParam param = {.suid = arg->suid,
+ .cid = left->colId,
+ .type = left->colValType,
+ .val = right->condValue,
+ .reverse = reverse,
+ .filterFunc = filterFunc};
+
+ int ret = metaFilteTableIds(arg->metaEx, ¶m, output->result);
+ return ret;
+#endif
+ return 0;
}
static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
@@ -372,6 +439,8 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) {
SIFParam *params = NULL;
SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx));
+ // ugly code, refactor later
+ output->arg = ctx->arg;
sif_func_t operFn = sifGetOperFn(node->opType);
if (ctx->noExec && operFn == NULL) {
output->status = SFLT_NOT_INDEX;
@@ -423,7 +492,7 @@ _return:
static EDealRes sifWalkFunction(SNode *pNode, void *context) {
SFunctionNode *node = (SFunctionNode *)pNode;
- SIFParam output = {0};
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecFunction(node, ctx, &output);
@@ -439,7 +508,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) {
}
static EDealRes sifWalkLogic(SNode *pNode, void *context) {
SLogicConditionNode *node = (SLogicConditionNode *)pNode;
- SIFParam output = {0};
+
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecLogic(node, ctx, &output);
@@ -455,7 +525,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) {
}
static EDealRes sifWalkOper(SNode *pNode, void *context) {
SOperatorNode *node = (SOperatorNode *)pNode;
- SIFParam output = {0};
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecOper(node, ctx, &output);
@@ -507,8 +577,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
int32_t code = 0;
- SIFCtx ctx = {.code = 0, .noExec = false};
+ SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg};
ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+
if (NULL == ctx.pRes) {
indexError("index-filter failed to taosHashInit");
return TSDB_CODE_QRY_OUT_OF_MEMORY;
@@ -523,7 +594,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode));
SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
- taosArrayAddAll(pDst->result, res->result);
+ if (res->result != NULL) {
+ taosArrayAddAll(pDst->result, res->result);
+ }
sifFreeParam(res);
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
@@ -561,7 +634,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
SIF_RET(code);
}
-int32_t doFilterTag(const SNode *pFilterNode, SArray *result) {
+int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) {
if (pFilterNode == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -570,10 +643,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) {
// todo move to the initialization function
// SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0));
- SIFParam param = {0};
+ SArray * output = taosArrayInit(8, sizeof(uint64_t));
+ SIFParam param = {.arg = *metaArg, .result = output};
SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m));
taosArrayAddAll(result, param.result);
+ // taosArrayAddAll(result, param.result);
sifFreeParam(¶m);
SIF_RET(TSDB_CODE_SUCCESS);
}
diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c
index 335b0865269604432259847de072a53854286c2c..892716f38708fed46bc755548436f2477d1e91e5 100644
--- a/source/libs/index/src/indexFst.c
+++ b/source/libs/index/src/indexFst.c
@@ -1324,7 +1324,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb
if (FST_NODE_ADDR(p->node) != fstGetRootAddr(sws->fst)) {
taosArrayPop(sws->inp);
}
- // streamStateDestroy(p);
+ streamStateDestroy(p);
continue;
}
FstTransition trn;
diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c
index a980c6b740ab4f5b0e128479de342ce84c159c3c..5760b24900ef47e6a52419ade3d91cee9870709a 100644
--- a/source/libs/index/src/indexFstUtil.c
+++ b/source/libs/index/src/indexFstUtil.c
@@ -93,14 +93,15 @@ FstSlice fstSliceCreate(uint8_t* data, uint64_t len) {
// just shallow copy
FstSlice fstSliceCopy(FstSlice* s, int32_t start, int32_t end) {
FstString* str = s->str;
- str->ref++;
+ atomic_add_fetch_32(&str->ref, 1);
FstSlice t = {.str = str, .start = start + s->start, .end = end + s->start};
return t;
}
FstSlice fstSliceDeepCopy(FstSlice* s, int32_t start, int32_t end) {
- int32_t tlen = end - start + 1;
- int32_t slen;
+ int32_t tlen = end - start + 1;
+ int32_t slen;
+
uint8_t* data = fstSliceData(s, &slen);
assert(tlen <= slen);
@@ -129,8 +130,9 @@ uint8_t* fstSliceData(FstSlice* s, int32_t* size) {
}
void fstSliceDestroy(FstSlice* s) {
FstString* str = s->str;
- str->ref--;
- if (str->ref == 0) {
+
+ int32_t ref = atomic_sub_fetch_32(&str->ref, 1);
+ if (ref == 0) {
taosMemoryFree(str->data);
taosMemoryFree(str);
s->str = NULL;
diff --git a/source/libs/index/src/indexJson.c b/source/libs/index/src/indexJson.c
index de88ff3c8ae287eda194fd4c9d7bff7080edd15c..a2f0563d470f30cf989f71bf068c16e38b236ce4 100644
--- a/source/libs/index/src/indexJson.c
+++ b/source/libs/index/src/indexJson.c
@@ -24,8 +24,8 @@ int tIndexJsonPut(SIndexJson *index, SIndexJsonMultiTerm *terms, uint64_t uid) {
SIndexJsonTerm *p = taosArrayGetP(terms, i);
INDEX_TYPE_ADD_EXTERN_TYPE(p->colType, TSDB_DATA_TYPE_JSON);
}
- return indexPut(index, terms, uid);
// handle put
+ return indexPut(index, terms, uid);
}
int tIndexJsonSearch(SIndexJson *index, SIndexJsonMultiTermQuery *tq, SArray *result) {
@@ -34,11 +34,11 @@ int tIndexJsonSearch(SIndexJson *index, SIndexJsonMultiTermQuery *tq, SArray *re
SIndexJsonTerm *p = taosArrayGetP(terms, i);
INDEX_TYPE_ADD_EXTERN_TYPE(p->colType, TSDB_DATA_TYPE_JSON);
}
- return indexSearch(index, tq, result);
// handle search
+ return indexSearch(index, tq, result);
}
void tIndexJsonClose(SIndexJson *index) {
- return indexClose(index);
// handle close
+ return indexClose(index);
}
diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c
index 3d85646bd25596e7d3a666b99287d6b5e3d5e902..53dd2923ac8c1f07b62098a3663c030016b46a72 100644
--- a/source/libs/index/src/indexTfile.c
+++ b/source/libs/index/src/indexTfile.c
@@ -54,37 +54,37 @@ static SArray* tfileGetFileList(const char* path);
static int tfileRmExpireFile(SArray* result);
static void tfileDestroyFileName(void* elem);
static int tfileCompare(const void* a, const void* b);
-static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version);
-static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version);
-static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version);
+static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version);
+static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version);
+static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version);
/*
* search from tfile
*/
-static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-
-static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype);
-
-static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-
-static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype);
-
-static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = {
+static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+
+static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype);
+
+static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+
+static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype);
+
+static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = {
{tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual,
tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange},
{tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON,
@@ -151,13 +151,10 @@ TFileReader* tfileCacheGet(TFileCache* tcache, ICacheKey* key) {
char buf[128] = {0};
int32_t sz = indexSerialCacheKey(key, buf);
assert(sz < sizeof(buf));
- indexInfo("Try to get key: %s", buf);
TFileReader** reader = taosHashGet(tcache->tableCache, buf, sz);
if (reader == NULL || *reader == NULL) {
- indexInfo("failed to get key: %s", buf);
return NULL;
}
- indexInfo("Get key: %s file: %s", buf, (*reader)->ctx->file.buf);
tfileReaderRef(*reader);
return *reader;
@@ -168,11 +165,11 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) {
// remove last version index reader
TFileReader** p = taosHashGet(tcache->tableCache, buf, sz);
if (p != NULL && *p != NULL) {
- TFileReader* oldReader = *p;
+ TFileReader* oldRdr = *p;
taosHashRemove(tcache->tableCache, buf, sz);
- indexInfo("found %s, remove file %s", buf, oldReader->ctx->file.buf);
- oldReader->remove = true;
- tfileReaderUnRef(oldReader);
+ indexInfo("found %s, should remove file %s", buf, oldRdr->ctx->file.buf);
+ oldRdr->remove = true;
+ tfileReaderUnRef(oldRdr);
}
taosHashPut(tcache->tableCache, buf, sz, &reader, sizeof(void*));
tfileReaderRef(reader);
@@ -214,10 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) {
}
// T_REF_INC(reader);
fstDestroy(reader->fst);
+ if (reader->remove) {
+ indexInfo("%s is removed", reader->ctx->file.buf);
+ } else {
+ indexInfo("%s is not removed", reader->ctx->file.buf);
+ }
writerCtxDestroy(reader->ctx, reader->remove);
+
taosMemoryFree(reader);
}
-static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = tem->colVal;
uint64_t sz = tem->nColVal;
@@ -240,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
return 0;
}
-static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
char* p = tem->colVal;
uint64_t sz = tem->nColVal;
@@ -276,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
}
return 0;
}
-static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
@@ -295,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
fstSliceDestroy(&key);
return 0;
}
-static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
@@ -316,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
return 0;
}
-static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) {
+static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) {
int ret = 0;
char* p = tem->colVal;
int skip = 0;
@@ -355,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult
fstStreamBuilderDestroy(sb);
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, LT);
}
-static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, LE);
}
-static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, GT);
}
-static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, GE);
}
-static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
char* p = tem->colVal;
@@ -396,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
fstSliceDestroy(&key);
return 0;
}
-static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = indexPackJsonData(tem);
int sz = strlen(p);
@@ -421,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult*
// deprecate api
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, LT);
}
-static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, LE);
}
-static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, GT);
}
-static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, GE);
}
-static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) {
+static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) {
int ret = 0;
int skip = 0;
@@ -498,7 +501,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR
fstStreamBuilderDestroy(sb);
return TSDB_CODE_SUCCESS;
}
-int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) {
+int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) {
SIndexTerm* term = query->term;
EIndexQueryType qtype = query->qType;
int ret = 0;
@@ -512,7 +515,7 @@ int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResul
return ret;
}
-TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t colType) {
+TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t colType) {
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
// indexInfo("open write file name %s", fullname);
@@ -529,7 +532,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const c
return tfileWriterCreate(wcx, &tfh);
}
-TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName) {
+TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName) {
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
@@ -657,7 +660,7 @@ IndexTFile* indexTFileCreate(const char* path) {
tfileCacheDestroy(cache);
return NULL;
}
-
+ taosThreadMutexInit(&tfile->mtx, NULL);
tfile->cache = cache;
return tfile;
}
@@ -665,11 +668,12 @@ void indexTFileDestroy(IndexTFile* tfile) {
if (tfile == NULL) {
return;
}
+ taosThreadMutexDestroy(&tfile->mtx);
tfileCacheDestroy(tfile->cache);
taosMemoryFree(tfile);
}
-int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) {
+int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) {
int ret = -1;
if (tfile == NULL) {
return ret;
@@ -680,7 +684,10 @@ int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result
SIndexTerm* term = query->term;
ICacheKey key = {.suid = term->suid, .colType = term->colType, .colName = term->colName, .nColName = term->nColName};
+
+ taosThreadMutexLock(&pTfile->mtx);
TFileReader* reader = tfileCacheGet(pTfile->cache, &key);
+ taosThreadMutexUnlock(&pTfile->mtx);
if (reader == NULL) {
return 0;
}
@@ -780,8 +787,13 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName) {
if (tf == NULL) {
return NULL;
}
- ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)};
- return tfileCacheGet(tf->cache, &key);
+ TFileReader* rd = NULL;
+ ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)};
+
+ taosThreadMutexLock(&tf->mtx);
+ rd = tfileCacheGet(tf->cache, &key);
+ taosThreadMutexUnlock(&tf->mtx);
+ return rd;
}
static int tfileUidCompare(const void* a, const void* b) {
@@ -1013,7 +1025,7 @@ void tfileReaderUnRef(TFileReader* reader) {
static SArray* tfileGetFileList(const char* path) {
char buf[128] = {0};
uint64_t suid;
- uint32_t version;
+ int64_t version;
SArray* files = taosArrayInit(4, sizeof(void*));
TdDirPtr pDir = taosOpenDir(path);
@@ -1053,19 +1065,19 @@ static int tfileCompare(const void* a, const void* b) {
return strcmp(as, bs);
}
-static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version) {
- if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%d.tindex", suid, col, version)) {
+static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version) {
+ if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%" PRId64 ".tindex", suid, col, version)) {
// read suid & colid & version success
return 0;
}
return -1;
}
// tfile name suid-colId-version.tindex
-static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version) {
- sprintf(filename, "%" PRIu64 "-%s-%d.tindex", suid, col, version);
+static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version) {
+ sprintf(filename, "%" PRIu64 "-%s-%" PRId64 ".tindex", suid, col, version);
return;
}
-static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version) {
+static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version) {
char filename[128] = {0};
tfileGenFileName(filename, suid, col, version);
sprintf(fullname, "%s/%s", path, filename);
diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c
index a618787fd49c96b729e782b4a01a5374c76639be..1d2027889572fcd809e378dcae13560b0bae51c1 100644
--- a/source/libs/index/src/indexUtil.c
+++ b/source/libs/index/src/indexUtil.c
@@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
return s;
}
-void iIntersection(SArray *inters, SArray *final) {
- int32_t sz = (int32_t)taosArrayGetSize(inters);
+void iIntersection(SArray *in, SArray *out) {
+ int32_t sz = (int32_t)taosArrayGetSize(in);
if (sz <= 0) {
return;
}
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
- SArray *t = taosArrayGetP(inters, i);
+ SArray *t = taosArrayGetP(in, i);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
- SArray *base = taosArrayGetP(inters, 0);
+ SArray *base = taosArrayGetP(in, 0);
for (int i = 0; i < taosArrayGetSize(base); i++) {
uint64_t tgt = *(uint64_t *)taosArrayGet(base, i);
bool has = true;
- for (int j = 1; j < taosArrayGetSize(inters); j++) {
- SArray *oth = taosArrayGetP(inters, j);
+ for (int j = 1; j < taosArrayGetSize(in); j++) {
+ SArray *oth = taosArrayGetP(in, j);
int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt);
if (mid >= 0 && mid < mi[j].len) {
uint64_t val = *(uint64_t *)taosArrayGet(oth, mid);
@@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) {
}
}
if (has == true) {
- taosArrayPush(final, &tgt);
+ taosArrayPush(out, &tgt);
}
}
taosMemoryFreeClear(mi);
}
-void iUnion(SArray *inters, SArray *final) {
- int32_t sz = (int32_t)taosArrayGetSize(inters);
+void iUnion(SArray *in, SArray *out) {
+ int32_t sz = (int32_t)taosArrayGetSize(in);
if (sz <= 0) {
return;
}
if (sz == 1) {
- taosArrayAddAll(final, taosArrayGetP(inters, 0));
+ taosArrayAddAll(out, taosArrayGetP(in, 0));
return;
}
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
- SArray *t = taosArrayGetP(inters, i);
+ SArray *t = taosArrayGetP(in, i);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
while (1) {
- uint64_t mVal = UINT_MAX;
+ uint64_t mVal = UINT64_MAX;
int mIdx = -1;
for (int j = 0; j < sz; j++) {
- SArray *t = taosArrayGetP(inters, j);
+ SArray *t = taosArrayGetP(in, j);
if (mi[j].idx >= mi[j].len) {
continue;
}
@@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) {
}
if (mIdx != -1) {
mi[mIdx].idx++;
- if (taosArrayGetSize(final) > 0) {
- uint64_t lVal = *(uint64_t *)taosArrayGetLast(final);
+ if (taosArrayGetSize(out) > 0) {
+ uint64_t lVal = *(uint64_t *)taosArrayGetLast(out);
if (lVal == mVal) {
continue;
}
}
- taosArrayPush(final, &mVal);
+ taosArrayPush(out, &mVal);
} else {
break;
}
@@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) {
return cmp;
}
-SIdxTempResult *sIdxTempResultCreate() {
- SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult));
+SIdxTRslt *idxTRsltCreate() {
+ SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt));
tr->total = taosArrayInit(4, sizeof(uint64_t));
- tr->added = taosArrayInit(4, sizeof(uint64_t));
- tr->deled = taosArrayInit(4, sizeof(uint64_t));
+ tr->add = taosArrayInit(4, sizeof(uint64_t));
+ tr->del = taosArrayInit(4, sizeof(uint64_t));
return tr;
}
-void sIdxTempResultClear(SIdxTempResult *tr) {
+void idxTRsltClear(SIdxTRslt *tr) {
if (tr == NULL) {
return;
}
taosArrayClear(tr->total);
- taosArrayClear(tr->added);
- taosArrayClear(tr->deled);
+ taosArrayClear(tr->add);
+ taosArrayClear(tr->del);
}
-void sIdxTempResultDestroy(SIdxTempResult *tr) {
+void idxTRsltDestroy(SIdxTRslt *tr) {
if (tr == NULL) {
return;
}
taosArrayDestroy(tr->total);
- taosArrayDestroy(tr->added);
- taosArrayDestroy(tr->deled);
+ taosArrayDestroy(tr->add);
+ taosArrayDestroy(tr->del);
}
-void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) {
+void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) {
taosArraySort(tr->total, uidCompare);
- taosArraySort(tr->added, uidCompare);
- taosArraySort(tr->deled, uidCompare);
-
- SArray *arrs = taosArrayInit(2, sizeof(void *));
- taosArrayPush(arrs, &tr->total);
- taosArrayPush(arrs, &tr->added);
-
- iUnion(arrs, result);
- taosArrayDestroy(arrs);
-
- iExcept(result, tr->deled);
+ taosArraySort(tr->add, uidCompare);
+ taosArraySort(tr->del, uidCompare);
+
+ if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) {
+ SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total;
+ taosArrayAddAll(result, t);
+ } else {
+ SArray *arrs = taosArrayInit(2, sizeof(void *));
+ taosArrayPush(arrs, &tr->total);
+ taosArrayPush(arrs, &tr->add);
+ iUnion(arrs, result);
+ taosArrayDestroy(arrs);
+ }
+ iExcept(result, tr->del);
}
diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt
index c0b47e74c6b0561141806dae8ce14ab4d632ec8e..2835084a81b87e358916c20ce0e6c70cf6884021 100644
--- a/source/libs/index/test/CMakeLists.txt
+++ b/source/libs/index/test/CMakeLists.txt
@@ -1,74 +1,74 @@
-add_executable(indexTest "")
-add_executable(fstTest "")
-add_executable(fstUT "")
-add_executable(UtilUT "")
-add_executable(jsonUT "")
+add_executable(idxTest "")
+add_executable(idxFstTest "")
+add_executable(idxFstUT "")
+add_executable(idxUtilUT "")
+add_executable(idxJsonUT "")
-target_sources(indexTest
+target_sources(idxTest
PRIVATE
"indexTests.cc"
)
-target_sources(fstTest
+target_sources(idxFstTest
PRIVATE
"fstTest.cc"
)
-target_sources(fstUT
+target_sources(idxFstUT
PRIVATE
"fstUT.cc"
)
-target_sources(UtilUT
+target_sources(idxUtilUT
PRIVATE
"utilUT.cc"
)
-target_sources(jsonUT
+target_sources(idxJsonUT
PRIVATE
"jsonUT.cc"
)
-target_include_directories ( indexTest
+target_include_directories (idxTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( fstTest
+target_include_directories (idxFstTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( fstUT
+target_include_directories (idxFstUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( UtilUT
+target_include_directories (idxUtilUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories (jsonUT
+target_include_directories (idxJsonUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_link_libraries (indexTest
+target_link_libraries (idxTest
os
util
common
gtest_main
index
)
-target_link_libraries (fstTest
+target_link_libraries (idxFstTest
os
util
common
gtest_main
index
)
-target_link_libraries (fstUT
+target_link_libraries (idxFstUT
os
util
common
@@ -76,7 +76,7 @@ target_link_libraries (fstUT
index
)
-target_link_libraries (UtilUT
+target_link_libraries (idxUtilUT
os
util
common
@@ -84,7 +84,7 @@ target_link_libraries (UtilUT
index
)
-target_link_libraries (jsonUT
+target_link_libraries (idxJsonUT
os
util
common
@@ -92,19 +92,21 @@ target_link_libraries (jsonUT
index
)
-add_test(
- NAME idxtest
- COMMAND indexTest
-)
-add_test(
- NAME idxJsonUT
- COMMAND jsonUT
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME idxtest
+ COMMAND idxTest
+ )
+ add_test(
+ NAME idxJsonUT
+ COMMAND idxJsonUT
+ )
+endif(NOT TD_WINDOWS)
add_test(
NAME idxUtilUT
- COMMAND UtilUT
+ COMMAND idxUtilUT
)
add_test(
NAME idxFstUT
- COMMAND fstUT
+ COMMAND idxFstUT
)
diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc
index f848cee86b4af0376af61640eb01a07eb1c22371..74a30c3387ea3c3133e4e4f82ffd3dd8dc38f540 100644
--- a/source/libs/index/test/indexTests.cc
+++ b/source/libs/index/test/indexTests.cc
@@ -272,14 +272,14 @@ void validateFst() {
}
delete m;
}
-static std::string logDir = "/tmp/log";
+static std::string logDir = TD_TMP_DIR_PATH "log";
static void initLog() {
const char* defaultLogFileNamePrefix = "taoslog";
const int32_t maxLogFileNum = 10;
tsAsyncLog = 0;
- sDebugFlag = 143;
+ idxDebugFlag = 143;
strcpy(tsLogDir, logDir.c_str());
taosRemoveDir(tsLogDir);
taosMkDir(tsLogDir);
@@ -387,7 +387,7 @@ class TFileObj {
std::string path(path_);
int colId = 2;
char buf[64] = {0};
- sprintf(buf, "%" PRIu64 "-%d-%d.tindex", header.suid, colId_, header.version);
+ sprintf(buf, "%" PRIu64 "-%d-%" PRId64 ".tindex", header.suid, colId_, header.version);
path.append("/").append(buf);
fileName_ = path;
@@ -411,12 +411,12 @@ class TFileObj {
//
//
}
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
int ret = tfileReaderSearch(reader_, query, tr);
- sIdxTempResultMergeTo(result, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltMergeTo(tr, result);
+ idxTRsltDestroy(tr);
return ret;
}
~TFileObj() {
@@ -531,11 +531,11 @@ class CacheObj {
indexCacheDebug(cache);
}
int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) {
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
int ret = indexCacheSearch(cache, query, tr, s);
- sIdxTempResultMergeTo(result, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltMergeTo(tr, result);
+ idxTRsltDestroy(tr);
if (ret != 0) {
std::cout << "failed to get from cache:" << ret << std::endl;
@@ -794,10 +794,10 @@ class IndexObj {
}
int sz = taosArrayGetSize(result);
indexMultiTermQueryDestroy(mq);
- taosArrayDestroy(result);
assert(sz == 1);
uint64_t* ret = (uint64_t*)taosArrayGet(result, 0);
assert(val = *ret);
+ taosArrayDestroy(result);
return sz;
}
@@ -916,7 +916,7 @@ TEST_F(IndexEnv2, testIndexOpen) {
}
}
TEST_F(IndexEnv2, testEmptyIndexOpen) {
- std::string path = "/tmp/test";
+ std::string path = TD_TMP_DIR_PATH "test";
if (index->Init(path) != 0) {
std::cout << "failed to init index" << std::endl;
exit(1);
@@ -953,8 +953,8 @@ TEST_F(IndexEnv2, testIndex_TrigeFlush) {
}
static void single_write_and_search(IndexObj* idx) {
- int target = idx->SearchOne("tag1", "Hello");
- target = idx->SearchOne("tag2", "Test");
+ // int target = idx->SearchOne("tag1", "Hello");
+ // target = idx->SearchOne("tag2", "Test");
}
static void multi_write_and_search(IndexObj* idx) {
idx->PutOne("tag1", "Hello");
diff --git a/source/libs/index/test/index_executor_tests.cpp b/source/libs/index/test/index_executor_tests.cpp
index b0c2a983d1b5f60b50e4f5734a8c99fb3729d80e..b88ffe5b8bdb2058a66d1e56020206643c246e42 100644
--- a/source/libs/index/test/index_executor_tests.cpp
+++ b/source/libs/index/test/index_executor_tests.cpp
@@ -24,11 +24,7 @@
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wsign-compare"
-#include "executor.h"
-#include "executorimpl.h"
-#include "indexoperator.h"
-#include "os.h"
-
+#include "index.h"
#include "stub.h"
#include "taos.h"
#include "tcompare.h"
diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc
index 8a837c5700da2b8c70d083d5f282933844091673..48ce8839c459bb2c523d710f1804346f2bede33a 100644
--- a/source/libs/index/test/jsonUT.cc
+++ b/source/libs/index/test/jsonUT.cc
@@ -24,7 +24,7 @@ static void initLog() {
const int32_t maxLogFileNum = 10;
tsAsyncLog = 0;
- sDebugFlag = 143;
+ idxDebugFlag = 143;
strcpy(tsLogDir, logDir.c_str());
taosRemoveDir(tsLogDir);
taosMkDir(tsLogDir);
@@ -51,6 +51,7 @@ class JsonEnv : public ::testing::Test {
tIndexJsonClose(index);
indexOptsDestroy(opts);
printf("destory\n");
+ taosMsleep(1000);
}
SIndexJsonOpts* opts;
SIndexJson* index;
diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc
index 18a2b457c41c2cd66f20a01f3690d0af4fe69d3d..4a30160244d82b8c00b3e7b031d6fd492057ec21 100644
--- a/source/libs/index/test/utilUT.cc
+++ b/source/libs/index/test/utilUT.cc
@@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) {
iUnion(src, rslt);
assert(taosArrayGetSize(rslt) == 12);
}
+TEST_F(UtilEnv, 05unionExcept) {
+ clearSourceArray(src);
+ clearFinalArray(rslt);
+
+ uint64_t arr2[] = {7};
+ SArray * f = (SArray *)taosArrayGetP(src, 1);
+ for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) {
+ taosArrayPush(f, &arr2[i]);
+ }
+
+ iUnion(src, rslt);
+
+ SArray *ept = taosArrayInit(0, sizeof(uint64_t));
+ iExcept(rslt, ept);
+ EXPECT_EQ(taosArrayGetSize(rslt), 1);
+}
TEST_F(UtilEnv, 01Except) {
SArray *total = taosArrayInit(4, sizeof(uint64_t));
{
@@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) {
ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100);
}
TEST_F(UtilEnv, testFill) {
- for (int i = 0; i < 10000000; i++) {
+ for (int i = 0; i < 1000000; i++) {
int64_t val = i;
char buf[65] = {0};
indexInt2str(val, buf, 1);
EXPECT_EQ(val, taosStr2int64(buf));
}
- for (int i = 0; i < 10000000; i++) {
+ for (int i = 0; i < 1000000; i++) {
int64_t val = 0 - i;
char buf[65] = {0};
indexInt2str(val, buf, -1);
EXPECT_EQ(val, taosStr2int64(buf));
}
}
+TEST_F(UtilEnv, TempResult) {
+ SIdxTRslt *relt = idxTRsltCreate();
+
+ SArray *f = taosArrayInit(0, sizeof(uint64_t));
+
+ uint64_t val = UINT64_MAX - 1;
+ taosArrayPush(relt->add, &val);
+ idxTRsltMergeTo(relt, f);
+ EXPECT_EQ(taosArrayGetSize(f), 1);
+}
+TEST_F(UtilEnv, TempResultExcept) {
+ SIdxTRslt *relt = idxTRsltCreate();
+
+ SArray *f = taosArrayInit(0, sizeof(uint64_t));
+
+ uint64_t val = UINT64_MAX;
+ taosArrayPush(relt->add, &val);
+ idxTRsltMergeTo(relt, f);
+ EXPECT_EQ(taosArrayGetSize(f), 1);
+}
diff --git a/source/libs/monitor/src/monMain.c b/source/libs/monitor/src/monMain.c
index 3ece089a2821a4e9db0a5e66853c01a224a2e78c..bf857ad718d27f1057529824cfd9cc53106a73bb 100644
--- a/source/libs/monitor/src/monMain.c
+++ b/source/libs/monitor/src/monMain.c
@@ -530,7 +530,8 @@ void monSendReport() {
monGenLogJson(pMonitor);
char *pCont = tjsonToString(pMonitor->pJson);
- if (pCont != NULL) {
+ // uDebugL("report cont:%s\n", pCont);
+ if (pCont != NULL) {
EHttpCompFlag flag = tsMonitor.cfg.comp ? HTTP_GZIP : HTTP_FLAT;
if (taosSendHttpReport(tsMonitor.cfg.server, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) {
uError("failed to send monitor msg");
diff --git a/source/libs/monitor/src/monMsg.c b/source/libs/monitor/src/monMsg.c
index e106cbd428b48f7751785b019e21f8c5e547969c..944a7b54750c9e8850d0fe124f36561c54a6630e 100644
--- a/source/libs/monitor/src/monMsg.c
+++ b/source/libs/monitor/src/monMsg.c
@@ -556,4 +556,50 @@ int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInf
tDecoderClear(&decoder);
return 0;
-}
\ No newline at end of file
+}
+
+
+int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, buf, bufLen);
+
+ if (tStartEncode(&encoder) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfProcessedQuery) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfProcessedCQuery) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfProcessedFetch) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfProcessedDrop) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfProcessedHb) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->cacheDataSize) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfQueryInQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->numOfFetchInQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->timeInQueryQueue) < 0) return -1;
+ if (tEncodeI64(&encoder, pInfo->timeInFetchQueue) < 0) return -1;
+ tEndEncode(&encoder);
+
+ int32_t tlen = encoder.pos;
+ tEncoderClear(&encoder);
+ return tlen;
+}
+
+int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
+ SDecoder decoder = {0};
+ tDecoderInit(&decoder, buf, bufLen);
+
+ if (tStartDecode(&decoder) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfProcessedQuery) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfProcessedCQuery) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfProcessedFetch) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfProcessedDrop) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfProcessedHb) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->cacheDataSize) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfQueryInQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->numOfFetchInQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->timeInQueryQueue) < 0) return -1;
+ if (tDecodeI64(&decoder, &pInfo->timeInFetchQueue) < 0) return -1;
+ tEndDecode(&decoder);
+
+ tDecoderClear(&decoder);
+ return 0;
+}
+
+
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 5774dcaa1d2c7d5006f440e04867ed66f67d90f1..35b4da7013f2c9cce51c1382368cc38cd9aafa93 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -142,14 +142,16 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
- case TSDB_DATA_TYPE_VARBINARY:
- pDst->datum.p = taosMemoryMalloc(pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
+ case TSDB_DATA_TYPE_VARBINARY: {
+ int32_t len = varDataTLen(pSrc->datum.p) + 1;
+ pDst->datum.p = taosMemoryCalloc(1, len);
if (NULL == pDst->datum.p) {
nodesDestroyNode(pDst);
return NULL;
}
- memcpy(pDst->datum.p, pSrc->datum.p, pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
+ memcpy(pDst->datum.p, pSrc->datum.p, len);
break;
+ }
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
@@ -191,6 +193,7 @@ static SNode* tableNodeCopy(const STableNode* pSrc, STableNode* pDst) {
COPY_CHAR_ARRAY_FIELD(tableName);
COPY_CHAR_ARRAY_FIELD(tableAlias);
COPY_SCALAR_FIELD(precision);
+ COPY_SCALAR_FIELD(singleTable);
return (SNode*)pDst;
}
@@ -304,6 +307,7 @@ static SNode* logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) {
CLONE_NODE_FIELD(pConditions);
CLONE_NODE_LIST_FIELD(pChildren);
COPY_SCALAR_FIELD(optimizedFlag);
+ COPY_SCALAR_FIELD(precision);
return (SNode*)pDst;
}
@@ -326,6 +330,11 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
COPY_SCALAR_FIELD(sliding);
COPY_SCALAR_FIELD(intervalUnit);
COPY_SCALAR_FIELD(slidingUnit);
+ CLONE_NODE_FIELD(pTagCond);
+ COPY_SCALAR_FIELD(triggerType);
+ COPY_SCALAR_FIELD(watermark);
+ COPY_SCALAR_FIELD(tsColId);
+ COPY_SCALAR_FIELD(filesFactor);
return (SNode*)pDst;
}
@@ -333,6 +342,7 @@ static SNode* logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
COPY_SCALAR_FIELD(joinType);
CLONE_NODE_FIELD(pOnConditions);
+ COPY_SCALAR_FIELD(isSingleTableJoin);
return (SNode*)pDst;
}
@@ -363,7 +373,14 @@ static SNode* logicVnodeModifCopy(const SVnodeModifLogicNode* pSrc, SVnodeModifL
static SNode* logicExchangeCopy(const SExchangeLogicNode* pSrc, SExchangeLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
COPY_SCALAR_FIELD(srcGroupId);
- COPY_SCALAR_FIELD(precision);
+ return (SNode*)pDst;
+}
+
+static SNode* logicMergeCopy(const SMergeLogicNode* pSrc, SMergeLogicNode* pDst) {
+ COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
+ CLONE_NODE_LIST_FIELD(pMergeKeys);
+ COPY_SCALAR_FIELD(numOfChannels);
+ COPY_SCALAR_FIELD(srcGroupId);
return (SNode*)pDst;
}
@@ -381,6 +398,8 @@ static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pD
CLONE_NODE_FIELD(pStateExpr);
COPY_SCALAR_FIELD(triggerType);
COPY_SCALAR_FIELD(watermark);
+ COPY_SCALAR_FIELD(filesFactor);
+ COPY_SCALAR_FIELD(stmInterAlgo);
return (SNode*)pDst;
}
@@ -526,6 +545,8 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) {
return logicVnodeModifCopy((const SVnodeModifLogicNode*)pNode, (SVnodeModifLogicNode*)pDst);
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
return logicExchangeCopy((const SExchangeLogicNode*)pNode, (SExchangeLogicNode*)pDst);
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return logicMergeCopy((const SMergeLogicNode*)pNode, (SMergeLogicNode*)pDst);
case QUERY_NODE_LOGIC_PLAN_WINDOW:
return logicWindowCopy((const SWindowLogicNode*)pNode, (SWindowLogicNode*)pDst);
case QUERY_NODE_LOGIC_PLAN_FILL:
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index f28885aad560d09e3aee28f524d2835d5b66f2de..54754ace51558ed23baf56dd9155c5ca1a41dbe3 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -190,6 +190,8 @@ const char* nodesNodeName(ENodeType type) {
return "LogicVnodeModif";
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
return "LogicExchange";
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return "LogicMerge";
case QUERY_NODE_LOGIC_PLAN_WINDOW:
return "LogicWindow";
case QUERY_NODE_LOGIC_PLAN_FILL:
@@ -220,16 +222,24 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiAgg";
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return "PhysiExchange";
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ return "PhysiMerge";
case QUERY_NODE_PHYSICAL_PLAN_SORT:
return "PhysiSort";
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:
return "PhysiInterval";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
return "PhysiStreamInterval";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ return "PhysiStreamFinalInterval";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ return "PhysiStreamSemiInterval";
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return "PhysiFill";
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
return "PhysiSessionWindow";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
+ return "PhysiStreamSessionWindow";
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
return "PhysiStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
@@ -594,7 +604,6 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
}
static const char* jkExchangeLogicPlanSrcGroupId = "SrcGroupId";
-static const char* jkExchangeLogicPlanSrcPrecision = "Precision";
static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) {
const SExchangeLogicNode* pNode = (const SExchangeLogicNode*)pObj;
@@ -603,9 +612,6 @@ static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcGroupId, pNode->srcGroupId);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcPrecision, pNode->precision);
- }
return code;
}
@@ -617,8 +623,144 @@ static int32_t jsonToLogicExchangeNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcGroupId, &pNode->srcGroupId);
}
+
+ return code;
+}
+
+static const char* jkMergeLogicPlanMergeKeys = "MergeKeys";
+static const char* jkMergeLogicPlanNumOfChannels = "NumOfChannels";
+static const char* jkMergeLogicPlanSrcGroupId = "SrcGroupId";
+
+static int32_t logicMergeNodeToJson(const void* pObj, SJson* pJson) {
+ const SMergeLogicNode* pNode = (const SMergeLogicNode*)pObj;
+
+ int32_t code = logicPlanNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkMergeLogicPlanMergeKeys, pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkMergeLogicPlanNumOfChannels, pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkMergeLogicPlanSrcGroupId, pNode->srcGroupId);
+ }
+
+ return code;
+}
+
+static int32_t jsonToLogicMergeNode(const SJson* pJson, void* pObj) {
+ SMergeLogicNode* pNode = (SMergeLogicNode*)pObj;
+
+ int32_t code = jsonToLogicPlanNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkMergeLogicPlanMergeKeys, &pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkMergeLogicPlanNumOfChannels, &pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkMergeLogicPlanSrcGroupId, &pNode->srcGroupId);
+ }
+
+ return code;
+}
+
+static const char* jkWindowLogicPlanWinType = "WinType";
+static const char* jkWindowLogicPlanFuncs = "Funcs";
+static const char* jkWindowLogicPlanInterval = "Interval";
+static const char* jkWindowLogicPlanOffset = "Offset";
+static const char* jkWindowLogicPlanSliding = "Sliding";
+static const char* jkWindowLogicPlanIntervalUnit = "IntervalUnit";
+static const char* jkWindowLogicPlanSlidingUnit = "SlidingUnit";
+static const char* jkWindowLogicPlanSessionGap = "SessionGap";
+static const char* jkWindowLogicPlanTspk = "Tspk";
+static const char* jkWindowLogicPlanStateExpr = "StateExpr";
+static const char* jkWindowLogicPlanTriggerType = "TriggerType";
+static const char* jkWindowLogicPlanWatermark = "Watermark";
+
+static int32_t logicWindowNodeToJson(const void* pObj, SJson* pJson) {
+ const SWindowLogicNode* pNode = (const SWindowLogicNode*)pObj;
+
+ int32_t code = logicPlanNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanWinType, pNode->winType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkWindowLogicPlanFuncs, pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanInterval, pNode->interval);
+ }
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonGetUTinyIntValue(pJson, jkExchangeLogicPlanSrcPrecision, &pNode->precision);
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanOffset, pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSliding, pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanIntervalUnit, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSlidingUnit, pNode->slidingUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSessionGap, pNode->sessionGap);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkWindowLogicPlanTspk, nodeToJson, pNode->pTspk);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkWindowLogicPlanStateExpr, nodeToJson, pNode->pStateExpr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanTriggerType, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanWatermark, pNode->watermark);
+ }
+
+ return code;
+}
+
+static int32_t jsonToLogicWindowNode(const SJson* pJson, void* pObj) {
+ SWindowLogicNode* pNode = (SWindowLogicNode*)pObj;
+
+ int32_t code = jsonToLogicPlanNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkWindowLogicPlanWinType, pNode->winType, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkWindowLogicPlanFuncs, &pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanInterval, &pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanOffset, &pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanSliding, &pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanIntervalUnit, &pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanSlidingUnit, &pNode->slidingUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanSessionGap, &pNode->sessionGap);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkWindowLogicPlanTspk, &pNode->pTspk);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkWindowLogicPlanStateExpr, &pNode->pStateExpr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanTriggerType, &pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanWatermark, &pNode->watermark);
}
return code;
@@ -1128,6 +1270,10 @@ static const char* jkTableScanPhysiPlanOffset = "Offset";
static const char* jkTableScanPhysiPlanSliding = "Sliding";
static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit";
static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit";
+static const char* jkTableScanPhysiPlanTriggerType = "triggerType";
+static const char* jkTableScanPhysiPlanWatermark = "watermark";
+static const char* jkTableScanPhysiPlanTsColId = "tsColId";
+static const char* jkTableScanPhysiPlanFilesFactor = "FilesFactor";
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
@@ -1169,6 +1315,18 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddDoubleToObject(pJson, jkTableScanPhysiPlanFilesFactor, pNode->filesFactor);
+ }
return code;
}
@@ -1219,7 +1377,18 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code);
;
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanFilesFactor, &pNode->filesFactor);
+ }
return code;
}
@@ -1430,6 +1599,44 @@ static int32_t jsonToPhysiExchangeNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkMergePhysiPlanMergeKeys = "MergeKeys";
+static const char* jkMergePhysiPlanNumOfChannels = "NumOfChannels";
+static const char* jkMergePhysiPlanSrcGroupId = "SrcGroupId";
+
+static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) {
+ const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj;
+
+ int32_t code = physicPlanNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkMergePhysiPlanMergeKeys, pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkMergePhysiPlanNumOfChannels, pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkMergePhysiPlanSrcGroupId, pNode->srcGroupId);
+ }
+
+ return code;
+}
+
+static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) {
+ SMergePhysiNode* pNode = (SMergePhysiNode*)pObj;
+
+ int32_t code = jsonToPhysicPlanNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkMergePhysiPlanMergeKeys, &pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkMergePhysiPlanNumOfChannels, &pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkMergePhysiPlanSrcGroupId, &pNode->srcGroupId);
+ }
+
+ return code;
+}
+
static const char* jkSortPhysiPlanExprs = "Exprs";
static const char* jkSortPhysiPlanSortKeys = "SortKeys";
static const char* jkSortPhysiPlanTargets = "Targets";
@@ -1473,6 +1680,7 @@ static const char* jkWindowPhysiPlanFuncs = "Funcs";
static const char* jkWindowPhysiPlanTsPk = "TsPk";
static const char* jkWindowPhysiPlanTriggerType = "TriggerType";
static const char* jkWindowPhysiPlanWatermark = "Watermark";
+static const char* jkWindowPhysiPlanFilesFactor = "FilesFactor";
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
@@ -1493,6 +1701,9 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanWatermark, pNode->watermark);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddDoubleToObject(pJson, jkWindowPhysiPlanFilesFactor, pNode->filesFactor);
+ }
return code;
}
@@ -1518,6 +1729,9 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code);
;
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetDoubleValue(pJson, jkWindowPhysiPlanFilesFactor, &pNode->filesFactor);
+ }
return code;
}
@@ -2528,6 +2742,29 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey";
+static const char* jkSessionWindowGap = "Gap";
+
+static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) {
+ const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj;
+
+ int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkSessionWindowGap, nodeToJson, pNode->pGap);
+ }
+ return code;
+}
+
+static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) {
+ SSessionWindowNode* pNode = (SSessionWindowNode*)pObj;
+
+ int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap);
+ }
+ return code;
+}
+
static const char* jkIntervalWindowInterval = "Interval";
static const char* jkIntervalWindowOffset = "Offset";
static const char* jkIntervalWindowSliding = "Sliding";
@@ -2750,6 +2987,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkDatabaseOptionsBuffer = "Buffer";
+static const char* jkDatabaseOptionsCachelast = "Cachelast";
+static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel";
+static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode";
+static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile";
+static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod";
+static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock";
+static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock";
+static const char* jkDatabaseOptionsKeep = "Keep";
+static const char* jkDatabaseOptionsPages = "Pages";
+static const char* jkDatabaseOptionsPagesize = "Pagesize";
+static const char* jkDatabaseOptionsPrecision = "Precision";
+static const char* jkDatabaseOptionsReplica = "Replica";
+static const char* jkDatabaseOptionsStrict = "Strict";
+static const char* jkDatabaseOptionsWalLevel = "WalLevel";
+static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups";
+static const char* jkDatabaseOptionsSingleStable = "SingleStable";
+static const char* jkDatabaseOptionsRetentions = "Retentions";
+static const char* jkDatabaseOptionsSchemaless = "Schemaless";
+
+static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) {
+ const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj;
+
+ int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless);
+ }
+
+ return code;
+}
+
+static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) {
+ SDatabaseOptions* pNode = (SDatabaseOptions*)pObj;
+
+ int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless);
+ }
+
+ return code;
+}
+
static const char* jkDataBlockDescDataBlockId = "DataBlockId";
static const char* jkDataBlockDescSlots = "Slots";
static const char* jkDataBlockTotalRowSize = "TotalRowSize";
@@ -2952,6 +3333,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkAlterDatabaseStmtDbName = "DbName";
+static const char* jkAlterDatabaseStmtOptions = "Options";
+
+static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj;
+
+ int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) {
+ SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj;
+
+ int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions);
+ }
+
+ return code;
+}
+
+static const char* jkAlterTableStmtDbName = "DbName";
+static const char* jkAlterTableStmtTableName = "TableName";
+static const char* jkAlterTableStmtAlterType = "AlterType";
+static const char* jkAlterTableStmtColName = "ColName";
+static const char* jkAlterTableStmtNewColName = "NewColName";
+static const char* jkAlterTableStmtOptions = "Options";
+static const char* jkAlterTableStmtNewDataType = "NewDataType";
+static const char* jkAlterTableStmtNewTagVal = "NewTagVal";
+
+static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj;
+
+ int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) {
+ SAlterTableStmt* pNode = (SAlterTableStmt*)pObj;
+
+ int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal);
+ }
+
+ return code;
+}
+
+static const char* jkAlterDnodeStmtDnodeId = "DnodeId";
+static const char* jkAlterDnodeStmtConfig = "Config";
+static const char* jkAlterDnodeStmtValue = "Value";
+
+static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj;
+
+ int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) {
+ SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj;
+
+ int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value);
+ }
+
+ return code;
+}
+
static const char* jkCreateTopicStmtTopicName = "TopicName";
static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName";
static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists";
@@ -2962,7 +3467,7 @@ static int32_t createTopicStmtToJson(const void* pObj, SJson* pJson) {
int32_t code = tjsonAddStringToObject(pJson, jkCreateTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName);
+ code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkCreateTopicStmtIgnoreExists, pNode->ignoreExists);
@@ -2979,7 +3484,7 @@ static int32_t jsonToCreateTopicStmt(const SJson* pJson, void* pObj) {
int32_t code = tjsonGetStringValue(pJson, jkCreateTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName);
+ code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkCreateTopicStmtIgnoreExists, &pNode->ignoreExists);
@@ -3015,8 +3520,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return orderByExprNodeToJson(pObj, pJson);
case QUERY_NODE_LIMIT:
case QUERY_NODE_STATE_WINDOW:
- case QUERY_NODE_SESSION_WINDOW:
break;
+ case QUERY_NODE_SESSION_WINDOW:
+ return sessionWindowNodeToJson(pObj, pJson);
case QUERY_NODE_INTERVAL_WINDOW:
return intervalWindowNodeToJson(pObj, pJson);
case QUERY_NODE_NODE_LIST:
@@ -3035,6 +3541,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
break;
case QUERY_NODE_DOWNSTREAM_SOURCE:
return downstreamSourceNodeToJson(pObj, pJson);
+ case QUERY_NODE_DATABASE_OPTIONS:
+ return databaseOptionsToJson(pObj, pJson);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize.
case QUERY_NODE_SET_OPERATOR:
@@ -3043,8 +3551,17 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return selectStmtToJson(pObj, pJson);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
+ break;
+ case QUERY_NODE_ALTER_DATABASE_STMT:
+ return alterDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_TABLE_STMT:
+ break;
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return alterTableStmtToJson(pObj, pJson);
case QUERY_NODE_USE_DATABASE_STMT:
+ break;
+ case QUERY_NODE_ALTER_DNODE_STMT:
+ return alterDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
break;
@@ -3062,6 +3579,10 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
break;
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
return logicExchangeNodeToJson(pObj, pJson);
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return logicMergeNodeToJson(pObj, pJson);
+ case QUERY_NODE_LOGIC_PLAN_WINDOW:
+ return logicWindowNodeToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_FILL:
return logicFillNodeToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_SORT:
@@ -3088,14 +3609,19 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return physiAggNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return physiExchangeNodeToJson(pObj, pJson);
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ return physiMergeNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_SORT:
return physiSortNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return physiIntervalNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return physiFillNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
return physiSessionWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
return physiStateWindowNodeToJson(pObj, pJson);
@@ -3134,6 +3660,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToTempTableNode(pJson, pObj);
case QUERY_NODE_ORDER_BY_EXPR:
return jsonToOrderByExprNode(pJson, pObj);
+ case QUERY_NODE_SESSION_WINDOW:
+ return jsonToSessionWindowNode(pJson, pObj);
case QUERY_NODE_INTERVAL_WINDOW:
return jsonToIntervalWindowNode(pJson, pObj);
case QUERY_NODE_NODE_LIST:
@@ -3148,12 +3676,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToSlotDescNode(pJson, pObj);
case QUERY_NODE_DOWNSTREAM_SOURCE:
return jsonToDownstreamSourceNode(pJson, pObj);
+ case QUERY_NODE_DATABASE_OPTIONS:
+ return jsonToDatabaseOptions(pJson, pObj);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize.
case QUERY_NODE_SET_OPERATOR:
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
return jsonToSelectStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_DATABASE_STMT:
+ return jsonToAlterDatabaseStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return jsonToAlterTableStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_DNODE_STMT:
+ return jsonToAlterDnodeStmt(pJson, pObj);
case QUERY_NODE_CREATE_TOPIC_STMT:
return jsonToCreateTopicStmt(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SCAN:
@@ -3162,6 +3698,10 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToLogicProjectNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
return jsonToLogicExchangeNode(pJson, pObj);
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return jsonToLogicMergeNode(pJson, pObj);
+ case QUERY_NODE_LOGIC_PLAN_WINDOW:
+ return jsonToLogicWindowNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_FILL:
return jsonToLogicFillNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SORT:
@@ -3188,14 +3728,19 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToPhysiAggNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
return jsonToPhysiExchangeNode(pJson, pObj);
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ return jsonToPhysiMergeNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_SORT:
return jsonToPhysiSortNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return jsonToPhysiIntervalNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
return jsonToPhysiFillNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
return jsonToPhysiSessionWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
return jsonToPhysiStateWindowNode(pJson, pObj);
diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c
index e8274c3c8eaa916a6e2c3877cde6185b99a623d8..ae1ff5744bcc48eeaec661137e01eeaf01684636 100644
--- a/source/libs/nodes/src/nodesTraverseFuncs.c
+++ b/source/libs/nodes/src/nodesTraverseFuncs.c
@@ -517,6 +517,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext);
break;
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext);
break;
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: {
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 3f7003dfa3b5a911e35c8823d1d883d9cca5bea7..76f15afc8e050ee57b4bc5b774f0fd1e57338972 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -21,149 +21,158 @@
#include "taoserror.h"
#include "thash.h"
-int32_t nodesNodeSize(ENodeType type) {
+static SNode* makeNode(ENodeType type, size_t size) {
+ SNode* p = taosMemoryCalloc(1, size);
+ if (NULL == p) {
+ return NULL;
+ }
+ setNodeType(p, type);
+ return p;
+}
+
+SNodeptr nodesMakeNode(ENodeType type) {
switch (type) {
case QUERY_NODE_COLUMN:
- return sizeof(SColumnNode);
+ return makeNode(type, sizeof(SColumnNode));
case QUERY_NODE_VALUE:
- return sizeof(SValueNode);
+ return makeNode(type, sizeof(SValueNode));
case QUERY_NODE_OPERATOR:
- return sizeof(SOperatorNode);
+ return makeNode(type, sizeof(SOperatorNode));
case QUERY_NODE_LOGIC_CONDITION:
- return sizeof(SLogicConditionNode);
+ return makeNode(type, sizeof(SLogicConditionNode));
case QUERY_NODE_FUNCTION:
- return sizeof(SFunctionNode);
+ return makeNode(type, sizeof(SFunctionNode));
case QUERY_NODE_REAL_TABLE:
- return sizeof(SRealTableNode);
+ return makeNode(type, sizeof(SRealTableNode));
case QUERY_NODE_TEMP_TABLE:
- return sizeof(STempTableNode);
+ return makeNode(type, sizeof(STempTableNode));
case QUERY_NODE_JOIN_TABLE:
- return sizeof(SJoinTableNode);
+ return makeNode(type, sizeof(SJoinTableNode));
case QUERY_NODE_GROUPING_SET:
- return sizeof(SGroupingSetNode);
+ return makeNode(type, sizeof(SGroupingSetNode));
case QUERY_NODE_ORDER_BY_EXPR:
- return sizeof(SOrderByExprNode);
+ return makeNode(type, sizeof(SOrderByExprNode));
case QUERY_NODE_LIMIT:
- return sizeof(SLimitNode);
+ return makeNode(type, sizeof(SLimitNode));
case QUERY_NODE_STATE_WINDOW:
- return sizeof(SStateWindowNode);
+ return makeNode(type, sizeof(SStateWindowNode));
case QUERY_NODE_SESSION_WINDOW:
- return sizeof(SSessionWindowNode);
+ return makeNode(type, sizeof(SSessionWindowNode));
case QUERY_NODE_INTERVAL_WINDOW:
- return sizeof(SIntervalWindowNode);
+ return makeNode(type, sizeof(SIntervalWindowNode));
case QUERY_NODE_NODE_LIST:
- return sizeof(SNodeListNode);
+ return makeNode(type, sizeof(SNodeListNode));
case QUERY_NODE_FILL:
- return sizeof(SFillNode);
+ return makeNode(type, sizeof(SFillNode));
case QUERY_NODE_RAW_EXPR:
- return sizeof(SRawExprNode);
+ return makeNode(type, sizeof(SRawExprNode));
case QUERY_NODE_TARGET:
- return sizeof(STargetNode);
+ return makeNode(type, sizeof(STargetNode));
case QUERY_NODE_DATABLOCK_DESC:
- return sizeof(SDataBlockDescNode);
+ return makeNode(type, sizeof(SDataBlockDescNode));
case QUERY_NODE_SLOT_DESC:
- return sizeof(SSlotDescNode);
+ return makeNode(type, sizeof(SSlotDescNode));
case QUERY_NODE_COLUMN_DEF:
- return sizeof(SColumnDefNode);
+ return makeNode(type, sizeof(SColumnDefNode));
case QUERY_NODE_DOWNSTREAM_SOURCE:
- return sizeof(SDownstreamSourceNode);
+ return makeNode(type, sizeof(SDownstreamSourceNode));
case QUERY_NODE_DATABASE_OPTIONS:
- return sizeof(SDatabaseOptions);
+ return makeNode(type, sizeof(SDatabaseOptions));
case QUERY_NODE_TABLE_OPTIONS:
- return sizeof(STableOptions);
+ return makeNode(type, sizeof(STableOptions));
case QUERY_NODE_INDEX_OPTIONS:
- return sizeof(SIndexOptions);
+ return makeNode(type, sizeof(SIndexOptions));
case QUERY_NODE_EXPLAIN_OPTIONS:
- return sizeof(SExplainOptions);
+ return makeNode(type, sizeof(SExplainOptions));
case QUERY_NODE_STREAM_OPTIONS:
- return sizeof(SStreamOptions);
- case QUERY_NODE_TOPIC_OPTIONS:
- return sizeof(STopicOptions);
+ return makeNode(type, sizeof(SStreamOptions));
case QUERY_NODE_LEFT_VALUE:
- return sizeof(SLeftValueNode);
+ return makeNode(type, sizeof(SLeftValueNode));
case QUERY_NODE_SET_OPERATOR:
- return sizeof(SSetOperator);
+ return makeNode(type, sizeof(SSetOperator));
case QUERY_NODE_SELECT_STMT:
- return sizeof(SSelectStmt);
+ return makeNode(type, sizeof(SSelectStmt));
case QUERY_NODE_VNODE_MODIF_STMT:
- return sizeof(SVnodeModifOpStmt);
+ return makeNode(type, sizeof(SVnodeModifOpStmt));
case QUERY_NODE_CREATE_DATABASE_STMT:
- return sizeof(SCreateDatabaseStmt);
+ return makeNode(type, sizeof(SCreateDatabaseStmt));
case QUERY_NODE_DROP_DATABASE_STMT:
- return sizeof(SDropDatabaseStmt);
+ return makeNode(type, sizeof(SDropDatabaseStmt));
case QUERY_NODE_ALTER_DATABASE_STMT:
- return sizeof(SAlterDatabaseStmt);
+ return makeNode(type, sizeof(SAlterDatabaseStmt));
case QUERY_NODE_CREATE_TABLE_STMT:
- return sizeof(SCreateTableStmt);
+ return makeNode(type, sizeof(SCreateTableStmt));
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
- return sizeof(SCreateSubTableClause);
+ return makeNode(type, sizeof(SCreateSubTableClause));
case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
- return sizeof(SCreateMultiTableStmt);
+ return makeNode(type, sizeof(SCreateMultiTableStmt));
case QUERY_NODE_DROP_TABLE_CLAUSE:
- return sizeof(SDropTableClause);
+ return makeNode(type, sizeof(SDropTableClause));
case QUERY_NODE_DROP_TABLE_STMT:
- return sizeof(SDropTableStmt);
+ return makeNode(type, sizeof(SDropTableStmt));
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
- return sizeof(SDropSuperTableStmt);
+ return makeNode(type, sizeof(SDropSuperTableStmt));
case QUERY_NODE_ALTER_TABLE_STMT:
- return sizeof(SAlterTableStmt);
+ return makeNode(type, sizeof(SAlterTableStmt));
case QUERY_NODE_CREATE_USER_STMT:
- return sizeof(SCreateUserStmt);
+ return makeNode(type, sizeof(SCreateUserStmt));
case QUERY_NODE_ALTER_USER_STMT:
- return sizeof(SAlterUserStmt);
+ return makeNode(type, sizeof(SAlterUserStmt));
case QUERY_NODE_DROP_USER_STMT:
- return sizeof(SDropUserStmt);
+ return makeNode(type, sizeof(SDropUserStmt));
case QUERY_NODE_USE_DATABASE_STMT:
- return sizeof(SUseDatabaseStmt);
+ return makeNode(type, sizeof(SUseDatabaseStmt));
case QUERY_NODE_CREATE_DNODE_STMT:
- return sizeof(SCreateDnodeStmt);
+ return makeNode(type, sizeof(SCreateDnodeStmt));
case QUERY_NODE_DROP_DNODE_STMT:
- return sizeof(SDropDnodeStmt);
+ return makeNode(type, sizeof(SDropDnodeStmt));
case QUERY_NODE_ALTER_DNODE_STMT:
- return sizeof(SAlterDnodeStmt);
+ return makeNode(type, sizeof(SAlterDnodeStmt));
case QUERY_NODE_CREATE_INDEX_STMT:
- return sizeof(SCreateIndexStmt);
+ return makeNode(type, sizeof(SCreateIndexStmt));
case QUERY_NODE_DROP_INDEX_STMT:
- return sizeof(SDropIndexStmt);
+ return makeNode(type, sizeof(SDropIndexStmt));
case QUERY_NODE_CREATE_QNODE_STMT:
case QUERY_NODE_CREATE_BNODE_STMT:
case QUERY_NODE_CREATE_SNODE_STMT:
case QUERY_NODE_CREATE_MNODE_STMT:
- return sizeof(SCreateComponentNodeStmt);
+ return makeNode(type, sizeof(SCreateComponentNodeStmt));
case QUERY_NODE_DROP_QNODE_STMT:
case QUERY_NODE_DROP_BNODE_STMT:
case QUERY_NODE_DROP_SNODE_STMT:
case QUERY_NODE_DROP_MNODE_STMT:
- return sizeof(SDropComponentNodeStmt);
+ return makeNode(type, sizeof(SDropComponentNodeStmt));
case QUERY_NODE_CREATE_TOPIC_STMT:
- return sizeof(SCreateTopicStmt);
+ return makeNode(type, sizeof(SCreateTopicStmt));
case QUERY_NODE_DROP_TOPIC_STMT:
- return sizeof(SDropTopicStmt);
+ return makeNode(type, sizeof(SDropTopicStmt));
+ case QUERY_NODE_DROP_CGROUP_STMT:
+ return makeNode(type, sizeof(SDropCGroupStmt));
case QUERY_NODE_EXPLAIN_STMT:
- return sizeof(SExplainStmt);
+ return makeNode(type, sizeof(SExplainStmt));
case QUERY_NODE_DESCRIBE_STMT:
- return sizeof(SDescribeStmt);
+ return makeNode(type, sizeof(SDescribeStmt));
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
- return sizeof(SNode);
+ return makeNode(type, sizeof(SNode));
case QUERY_NODE_COMPACT_STMT:
break;
case QUERY_NODE_CREATE_FUNCTION_STMT:
- return sizeof(SCreateFunctionStmt);
+ return makeNode(type, sizeof(SCreateFunctionStmt));
case QUERY_NODE_DROP_FUNCTION_STMT:
- return sizeof(SDropFunctionStmt);
+ return makeNode(type, sizeof(SDropFunctionStmt));
case QUERY_NODE_CREATE_STREAM_STMT:
- return sizeof(SCreateStreamStmt);
+ return makeNode(type, sizeof(SCreateStreamStmt));
case QUERY_NODE_DROP_STREAM_STMT:
- return sizeof(SDropStreamStmt);
+ return makeNode(type, sizeof(SDropStreamStmt));
case QUERY_NODE_MERGE_VGROUP_STMT:
case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
case QUERY_NODE_SPLIT_VGROUP_STMT:
case QUERY_NODE_SYNCDB_STMT:
break;
case QUERY_NODE_GRANT_STMT:
- return sizeof(SGrantStmt);
+ return makeNode(type, sizeof(SGrantStmt));
case QUERY_NODE_REVOKE_STMT:
- return sizeof(SRevokeStmt);
+ return makeNode(type, sizeof(SRevokeStmt));
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_MODULES_STMT:
@@ -194,89 +203,90 @@ int32_t nodesNodeSize(ENodeType type) {
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
- return sizeof(SShowStmt);
+ return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_KILL_CONNECTION_STMT:
case QUERY_NODE_KILL_QUERY_STMT:
case QUERY_NODE_KILL_TRANSACTION_STMT:
- return sizeof(SKillStmt);
+ return makeNode(type, sizeof(SKillStmt));
case QUERY_NODE_LOGIC_PLAN_SCAN:
- return sizeof(SScanLogicNode);
+ return makeNode(type, sizeof(SScanLogicNode));
case QUERY_NODE_LOGIC_PLAN_JOIN:
- return sizeof(SJoinLogicNode);
+ return makeNode(type, sizeof(SJoinLogicNode));
case QUERY_NODE_LOGIC_PLAN_AGG:
- return sizeof(SAggLogicNode);
+ return makeNode(type, sizeof(SAggLogicNode));
case QUERY_NODE_LOGIC_PLAN_PROJECT:
- return sizeof(SProjectLogicNode);
+ return makeNode(type, sizeof(SProjectLogicNode));
case QUERY_NODE_LOGIC_PLAN_VNODE_MODIF:
- return sizeof(SVnodeModifLogicNode);
+ return makeNode(type, sizeof(SVnodeModifLogicNode));
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
- return sizeof(SExchangeLogicNode);
+ return makeNode(type, sizeof(SExchangeLogicNode));
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return makeNode(type, sizeof(SMergeLogicNode));
case QUERY_NODE_LOGIC_PLAN_WINDOW:
- return sizeof(SWindowLogicNode);
+ return makeNode(type, sizeof(SWindowLogicNode));
case QUERY_NODE_LOGIC_PLAN_FILL:
- return sizeof(SFillLogicNode);
+ return makeNode(type, sizeof(SFillLogicNode));
case QUERY_NODE_LOGIC_PLAN_SORT:
- return sizeof(SSortLogicNode);
+ return makeNode(type, sizeof(SSortLogicNode));
case QUERY_NODE_LOGIC_PLAN_PARTITION:
- return sizeof(SPartitionLogicNode);
+ return makeNode(type, sizeof(SPartitionLogicNode));
case QUERY_NODE_LOGIC_SUBPLAN:
- return sizeof(SLogicSubplan);
+ return makeNode(type, sizeof(SLogicSubplan));
case QUERY_NODE_LOGIC_PLAN:
- return sizeof(SQueryLogicPlan);
+ return makeNode(type, sizeof(SQueryLogicPlan));
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
- return sizeof(STagScanPhysiNode);
+ return makeNode(type, sizeof(STagScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
- return sizeof(STableScanPhysiNode);
+ return makeNode(type, sizeof(STableScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
- return sizeof(STableSeqScanPhysiNode);
+ return makeNode(type, sizeof(STableSeqScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
- return sizeof(SStreamScanPhysiNode);
+ return makeNode(type, sizeof(SStreamScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
- return sizeof(SSystemTableScanPhysiNode);
+ return makeNode(type, sizeof(SSystemTableScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
- return sizeof(SProjectPhysiNode);
+ return makeNode(type, sizeof(SProjectPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
- return sizeof(SJoinPhysiNode);
+ return makeNode(type, sizeof(SJoinPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_AGG:
- return sizeof(SAggPhysiNode);
+ return makeNode(type, sizeof(SAggPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
- return sizeof(SExchangePhysiNode);
+ return makeNode(type, sizeof(SExchangePhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ return makeNode(type, sizeof(SMergePhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_SORT:
- return sizeof(SSortPhysiNode);
+ return makeNode(type, sizeof(SSortPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:
- return sizeof(SIntervalPhysiNode);
+ return makeNode(type, sizeof(SIntervalPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
- return sizeof(SStreamIntervalPhysiNode);
+ return makeNode(type, sizeof(SStreamIntervalPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ return makeNode(type, sizeof(SStreamFinalIntervalPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_FILL:
- return sizeof(SFillPhysiNode);
+ return makeNode(type, sizeof(SFillPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
- return sizeof(SSessionWinodwPhysiNode);
+ return makeNode(type, sizeof(SSessionWinodwPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
+ return makeNode(type, sizeof(SStreamSessionWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW:
- return sizeof(SStateWinodwPhysiNode);
+ return makeNode(type, sizeof(SStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
- return sizeof(SPartitionPhysiNode);
+ return makeNode(type, sizeof(SPartitionPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
- return sizeof(SDataDispatcherNode);
+ return makeNode(type, sizeof(SDataDispatcherNode));
case QUERY_NODE_PHYSICAL_PLAN_INSERT:
- return sizeof(SDataInserterNode);
+ return makeNode(type, sizeof(SDataInserterNode));
case QUERY_NODE_PHYSICAL_SUBPLAN:
- return sizeof(SSubplan);
+ return makeNode(type, sizeof(SSubplan));
case QUERY_NODE_PHYSICAL_PLAN:
- return sizeof(SQueryPlan);
+ return makeNode(type, sizeof(SQueryPlan));
default:
break;
}
nodesError("nodesMakeNode unknown node = %s", nodesNodeName(type));
- return 0;
-}
-
-SNodeptr nodesMakeNode(ENodeType type) {
- SNode* p = taosMemoryCalloc(1, nodesNodeSize(type));
- if (NULL == p) {
- return NULL;
- }
- setNodeType(p, type);
- return p;
+ return NULL;
}
static void destroyVgDataBlockArray(SArray* pArray) {
@@ -664,6 +674,7 @@ void nodesDestroyNode(SNodeptr pNode) {
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW:
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index fc096a057c3bbe71ce844e1ac82bdde8273862d0..7dd0ef2616bf3fda27192fa7099906348753c163 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -53,12 +53,12 @@ typedef enum EDatabaseOptionType {
DB_OPTION_WAL,
DB_OPTION_VGROUPS,
DB_OPTION_SINGLE_STABLE,
- DB_OPTION_RETENTIONS
+ DB_OPTION_RETENTIONS,
+ DB_OPTION_SCHEMALESS
} EDatabaseOptionType;
typedef enum ETableOptionType {
TABLE_OPTION_COMMENT = 1,
- TABLE_OPTION_DELAY,
TABLE_OPTION_FILE_FACTOR,
TABLE_OPTION_ROLLUP,
TABLE_OPTION_TTL,
@@ -143,12 +143,12 @@ SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNod
SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables);
SNode* createDropSuperTableStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable);
SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions);
-SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType,
- const SToken* pColName, SDataType dataType);
-SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, const SToken* pColName);
-SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType,
- const SToken* pOldColName, const SToken* pNewColName);
-SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const SToken* pTagName, SNode* pVal);
+SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName,
+ SDataType dataType);
+SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName);
+SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName,
+ SToken* pNewColName);
+SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal);
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern);
SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName);
@@ -167,8 +167,10 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co
SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId);
SNode* createTopicOptions(SAstCreateContext* pCxt);
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery,
- const SToken* pSubscribeDbName, SNode* pOptions);
+ const SToken* pSubDbName, SNode* pRealTable);
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName);
+SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId,
+ const SToken* pTopicName);
SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue);
SNode* createDefaultExplainOptions(SAstCreateContext* pCxt);
SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal);
diff --git a/source/libs/parser/inc/parInsertData.h b/source/libs/parser/inc/parInsertData.h
index e19f54dff36a696665d09796dc78eb0b6ca34068..aeebf51c96efa271799a66e9223065d4fd0314b9 100644
--- a/source/libs/parser/inc/parInsertData.h
+++ b/source/libs/parser/inc/parInsertData.h
@@ -94,7 +94,7 @@ static FORCE_INLINE void getSTSRowAppendInfo(uint8_t rowType, SParsedDataColInfo
col_id_t *colIdx) {
col_id_t schemaIdx = 0;
if (IS_DATA_COL_ORDERED(spd)) {
- schemaIdx = spd->boundColumns[idx] - PRIMARYKEY_TIMESTAMP_COL_ID;
+ schemaIdx = spd->boundColumns[idx];
if (TD_IS_TP_ROW_T(rowType)) {
*toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart
*colIdx = schemaIdx;
@@ -104,7 +104,7 @@ static FORCE_INLINE void getSTSRowAppendInfo(uint8_t rowType, SParsedDataColInfo
}
} else {
ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx);
- schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx - PRIMARYKEY_TIMESTAMP_COL_ID;
+ schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx;
if (TD_IS_TP_ROW_T(rowType)) {
*toffset = (spd->cols + schemaIdx)->toffset;
*colIdx = schemaIdx;
@@ -133,14 +133,15 @@ static FORCE_INLINE int32_t setBlockInfo(SSubmitBlk *pBlocks, STableDataBlocks *
int32_t schemaIdxCompar(const void *lhs, const void *rhs);
int32_t boundIdxCompar(const void *lhs, const void *rhs);
void setBoundColumnInfo(SParsedDataColInfo *pColList, SSchema *pSchema, col_id_t numOfCols);
-void destroyBlockArrayList(SArray* pDataBlockList);
-void destroyBlockHashmap(SHashObj* pDataBlockHash);
-int initRowBuilder(SRowBuilder *pBuilder, int16_t schemaVer, SParsedDataColInfo *pColInfo);
-int32_t allocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
-int32_t getDataBlockFromList(SHashObj* pHashList, void* id, int32_t idLen, int32_t size, int32_t startOffset, int32_t rowSize,
- STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList, SVCreateTbReq* pCreateTbReq);
-int32_t mergeTableDataBlocks(SHashObj* pHashObj, uint8_t payloadType, SArray** pVgDataBlocks);
-int32_t buildCreateTbMsg(STableDataBlocks* pBlocks, SVCreateTbReq* pCreateTbReq);
+void destroyBlockArrayList(SArray *pDataBlockList);
+void destroyBlockHashmap(SHashObj *pDataBlockHash);
+int initRowBuilder(SRowBuilder *pBuilder, int16_t schemaVer, SParsedDataColInfo *pColInfo);
+int32_t allocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
+int32_t getDataBlockFromList(SHashObj *pHashList, void *id, int32_t idLen, int32_t size, int32_t startOffset,
+ int32_t rowSize, STableMeta *pTableMeta, STableDataBlocks **dataBlocks, SArray *pBlockList,
+ SVCreateTbReq *pCreateTbReq);
+int32_t mergeTableDataBlocks(SHashObj *pHashObj, uint8_t payloadType, SArray **pVgDataBlocks);
+int32_t buildCreateTbMsg(STableDataBlocks *pBlocks, SVCreateTbReq *pCreateTbReq);
int32_t allocateMemForSize(STableDataBlocks *pDataBlock, int32_t allSize);
diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h
index 2ad1ebc1121d96f243fff9d55980b26bffdf6c04..8ec20cde5a07b54a5609a6097316e4ae5e538a83 100644
--- a/source/libs/parser/inc/parInt.h
+++ b/source/libs/parser/inc/parInt.h
@@ -24,12 +24,15 @@ extern "C" {
#include "parUtil.h"
#include "parser.h"
+int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery);
int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery);
int32_t parse(SParseContext* pParseCxt, SQuery** pQuery);
+int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery);
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
+int32_t isNotSchemalessDb(SParseContext* pContext, char *dbName);
#ifdef __cplusplus
}
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index f82d29d27eeb2f8b80baf56ff7c065025abcc3b5..0351023f5bc8fdbae4476e810e66f7d8bfc4e71f 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -20,15 +20,16 @@
extern "C" {
#endif
+#include "catalog.h"
#include "os.h"
#include "query.h"
-#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__)
-#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__)
-#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__)
-#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__)
-#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__)
-#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__)
+#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
+#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__)
+#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__)
+#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__)
+#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__)
+#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__)
#define PK_TS_COL_INTERNAL_NAME "_rowts"
@@ -37,6 +38,16 @@ typedef struct SMsgBuf {
char* buf;
} SMsgBuf;
+typedef struct SParseMetaCache {
+ SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
+ SHashObj* pDbVgroup; // key is dbFName, element is SArray*
+ SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
+ SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
+ SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
+ SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
+ SHashObj* pUdf; // key is funcName, element is SFuncInfo*
+} SParseMetaCache;
+
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg);
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr);
@@ -47,10 +58,33 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
int32_t getNumOfTags(const STableMeta* pTableMeta);
STableComInfo getTableInfo(const STableMeta* pTableMeta);
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
-int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId);
+int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag **ppTag, SMsgBuf* pMsgBuf);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
+int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
+int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
+int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
+int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
+int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
+int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
+ SParseMetaCache* pMetaCache);
+int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache);
+int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache);
+int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta);
+int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
+int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
+int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
+ int32_t* pTableNum);
+int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
+int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
+ bool* pPass);
+int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 2cba1eb043e0e4063b4a7519a3c116820412c69d..6c090a07901c1d96a567961b8a2ec3daabaaedf8 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -15,11 +15,15 @@
#include
#include
+#define ALLOW_FORBID_FUNC
+
#include "functionMgt.h"
#include "nodes.h"
#include "parToken.h"
#include "ttokendef.h"
#include "parAst.h"
+
+#define YYSTACKDEPTH 0
}
%syntax_error {
@@ -180,6 +184,7 @@ db_options(A) ::= db_options(B) WAL NK_INTEGER(C).
db_options(A) ::= db_options(B) VGROUPS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_VGROUPS, &C); }
db_options(A) ::= db_options(B) SINGLE_STABLE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SINGLE_STABLE, &C); }
db_options(A) ::= db_options(B) RETENTIONS retention_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_RETENTIONS, C); }
+db_options(A) ::= db_options(B) SCHEMALESS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SCHEMALESS, &C); }
alter_db_options(A) ::= alter_db_option(B). { A = createAlterDatabaseOptions(pCxt); A = setAlterDatabaseOption(pCxt, A, &B); }
alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setAlterDatabaseOption(pCxt, B, &C); }
@@ -312,7 +317,6 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP.
table_options(A) ::= . { A = createDefaultTableOptions(pCxt); }
table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); }
-table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); }
table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); }
table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); }
table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); }
@@ -402,16 +406,12 @@ func_list(A) ::= func_list(B) NK_COMMA func(C).
func(A) ::= function_name(B) NK_LP expression_list(C) NK_RP. { A = createFunctionNode(pCxt, &B, C); }
/************************************************ create/drop topic ***************************************************/
-cmd ::= CREATE TOPIC not_exists_opt(A)
- topic_name(B) topic_options(D) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, D); }
-cmd ::= CREATE TOPIC not_exists_opt(A)
- topic_name(B) topic_options(D) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, D); }
+cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, NULL); }
+cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, NULL); }
+cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
+ AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, NULL, C); }
cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); }
-
-topic_options(A) ::= . { A = createTopicOptions(pCxt); }
-topic_options(A) ::= topic_options(B) WITH TABLE. { ((STopicOptions*)B)->withTable = true; A = B; }
-topic_options(A) ::= topic_options(B) WITH SCHEMA. { ((STopicOptions*)B)->withSchema = true; A = B; }
-topic_options(A) ::= topic_options(B) WITH TAG. { ((STopicOptions*)B)->withTag = true; A = B; }
+cmd ::= DROP CONSUMER GROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); }
/************************************************ desc/describe *******************************************************/
cmd ::= DESC full_table_name(A). { pCxt->pRootNode = createDescribeStmt(pCxt, A); }
@@ -565,6 +565,10 @@ topic_name(A) ::= NK_ID(B).
%destructor stream_name { }
stream_name(A) ::= NK_ID(B). { A = B; }
+%type cgroup_name { SToken }
+%destructor cgroup_name { }
+cgroup_name(A) ::= NK_ID(B). { A = B; }
+
/************************************************ expression **********************************************************/
expression(A) ::= literal(B). { A = B; }
expression(A) ::= pseudo_column(B). { A = B; }
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index f93f0218d4537218e3a3fdc07686995d9bb4935c..72a88548d2270d6d4776e2614bf05fc8c2b7ebf6 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -29,17 +29,17 @@
} \
} while (0)
-#define CHECK_RAW_EXPR_NODE(node) \
- do { \
- if (NULL == (node) || QUERY_NODE_RAW_EXPR != nodeType(node)) { \
- pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; \
- return NULL; \
- } \
+#define CHECK_PARSER_STATUS(pCxt) \
+ do { \
+ if (TSDB_CODE_SUCCESS != pCxt->errCode) { \
+ return NULL; \
+ } \
} while (0)
SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL};
void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
+ memset(pCxt, 0, sizeof(SAstCreateContext));
pCxt->pQueryCxt = pParseCxt;
pCxt->msgBuf.buf = pParseCxt->pMsg;
pCxt->msgBuf.len = pParseCxt->msgLen;
@@ -206,6 +206,7 @@ static bool checkComment(SAstCreateContext* pCxt, const SToken* pCommentToken, b
}
SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode) {
+ CHECK_PARSER_STATUS(pCxt);
SRawExprNode* target = (SRawExprNode*)nodesMakeNode(QUERY_NODE_RAW_EXPR);
CHECK_OUT_OF_MEM(target);
target->p = pToken->z;
@@ -215,6 +216,7 @@ SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* p
}
SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode) {
+ CHECK_PARSER_STATUS(pCxt);
SRawExprNode* target = (SRawExprNode*)nodesMakeNode(QUERY_NODE_RAW_EXPR);
CHECK_OUT_OF_MEM(target);
target->p = pStart->z;
@@ -224,7 +226,7 @@ SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const
}
SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
- CHECK_RAW_EXPR_NODE(pNode);
+ CHECK_PARSER_STATUS(pCxt);
SRawExprNode* pRawExpr = (SRawExprNode*)pNode;
SNode* pExpr = pRawExpr->pNode;
if (nodesIsExprNode(pExpr)) {
@@ -247,6 +249,7 @@ SToken getTokenFromRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
}
SNodeList* createNodeList(SAstCreateContext* pCxt, SNode* pNode) {
+ CHECK_PARSER_STATUS(pCxt);
SNodeList* list = nodesMakeList();
CHECK_OUT_OF_MEM(list);
pCxt->errCode = nodesListAppend(list, pNode);
@@ -254,11 +257,13 @@ SNodeList* createNodeList(SAstCreateContext* pCxt, SNode* pNode) {
}
SNodeList* addNodeToList(SAstCreateContext* pCxt, SNodeList* pList, SNode* pNode) {
+ CHECK_PARSER_STATUS(pCxt);
pCxt->errCode = nodesListAppend(pList, pNode);
return pList;
}
SNode* createColumnNode(SAstCreateContext* pCxt, SToken* pTableAlias, SToken* pColumnName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkTableName(pCxt, pTableAlias) || !checkColumnName(pCxt, pColumnName)) {
return NULL;
}
@@ -272,6 +277,7 @@ SNode* createColumnNode(SAstCreateContext* pCxt, SToken* pTableAlias, SToken* pC
}
SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* pLiteral) {
+ CHECK_PARSER_STATUS(pCxt);
SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
CHECK_OUT_OF_MEM(val);
val->literal = strndup(pLiteral->z, pLiteral->n);
@@ -291,6 +297,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken*
}
SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral) {
+ CHECK_PARSER_STATUS(pCxt);
SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
CHECK_OUT_OF_MEM(val);
val->literal = strndup(pLiteral->z, pLiteral->n);
@@ -304,6 +311,7 @@ SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral)
}
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
if (NULL == pCxt->pQueryCxt->db) {
return NULL;
}
@@ -321,6 +329,7 @@ SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt) {
}
SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLiteral) {
+ CHECK_PARSER_STATUS(pCxt);
SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
CHECK_OUT_OF_MEM(val);
val->literal = strndup(pLiteral->z, pLiteral->n);
@@ -338,6 +347,7 @@ SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLitera
}
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2) {
+ CHECK_PARSER_STATUS(pCxt);
SLogicConditionNode* cond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
CHECK_OUT_OF_MEM(cond);
cond->condType = type;
@@ -360,6 +370,7 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ
}
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) {
+ CHECK_PARSER_STATUS(pCxt);
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
CHECK_OUT_OF_MEM(op);
op->opType = type;
@@ -369,17 +380,20 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL
}
SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) {
+ CHECK_PARSER_STATUS(pCxt);
return createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND,
createOperatorNode(pCxt, OP_TYPE_GREATER_EQUAL, pExpr, pLeft),
createOperatorNode(pCxt, OP_TYPE_LOWER_EQUAL, nodesCloneNode(pExpr), pRight));
}
SNode* createNotBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) {
+ CHECK_PARSER_STATUS(pCxt);
return createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, createOperatorNode(pCxt, OP_TYPE_LOWER_THAN, pExpr, pLeft),
createOperatorNode(pCxt, OP_TYPE_GREATER_THAN, nodesCloneNode(pExpr), pRight));
}
static SNode* createPrimaryKeyCol(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SColumnNode* pCol = nodesMakeNode(QUERY_NODE_COLUMN);
CHECK_OUT_OF_MEM(pCol);
pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
@@ -388,6 +402,7 @@ static SNode* createPrimaryKeyCol(SAstCreateContext* pCxt) {
}
SNode* createFunctionNode(SAstCreateContext* pCxt, const SToken* pFuncName, SNodeList* pParameterList) {
+ CHECK_PARSER_STATUS(pCxt);
if (0 == strncasecmp("_rowts", pFuncName->z, pFuncName->n) || 0 == strncasecmp("_c0", pFuncName->z, pFuncName->n)) {
return createPrimaryKeyCol(pCxt);
}
@@ -399,6 +414,7 @@ SNode* createFunctionNode(SAstCreateContext* pCxt, const SToken* pFuncName, SNod
}
SNode* createCastFunctionNode(SAstCreateContext* pCxt, SNode* pExpr, SDataType dt) {
+ CHECK_PARSER_STATUS(pCxt);
SFunctionNode* func = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
CHECK_OUT_OF_MEM(func);
strcpy(func->functionName, "cast");
@@ -413,6 +429,7 @@ SNode* createCastFunctionNode(SAstCreateContext* pCxt, SNode* pExpr, SDataType d
}
SNode* createNodeListNode(SAstCreateContext* pCxt, SNodeList* pList) {
+ CHECK_PARSER_STATUS(pCxt);
SNodeListNode* list = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST);
CHECK_OUT_OF_MEM(list);
list->pNodeList = pList;
@@ -420,6 +437,7 @@ SNode* createNodeListNode(SAstCreateContext* pCxt, SNodeList* pList) {
}
SNode* createNodeListNodeEx(SAstCreateContext* pCxt, SNode* p1, SNode* p2) {
+ CHECK_PARSER_STATUS(pCxt);
SNodeListNode* list = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST);
CHECK_OUT_OF_MEM(list);
list->pNodeList = nodesMakeList();
@@ -430,6 +448,7 @@ SNode* createNodeListNodeEx(SAstCreateContext* pCxt, SNode* p1, SNode* p2) {
}
SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTableName, SToken* pTableAlias) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, true) || !checkTableName(pCxt, pTableName) || !checkTableName(pCxt, pTableAlias)) {
return NULL;
}
@@ -450,6 +469,7 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa
}
SNode* createTempTableNode(SAstCreateContext* pCxt, SNode* pSubquery, const SToken* pTableAlias) {
+ CHECK_PARSER_STATUS(pCxt);
STempTableNode* tempTable = (STempTableNode*)nodesMakeNode(QUERY_NODE_TEMP_TABLE);
CHECK_OUT_OF_MEM(tempTable);
tempTable->pSubquery = pSubquery;
@@ -467,6 +487,7 @@ SNode* createTempTableNode(SAstCreateContext* pCxt, SNode* pSubquery, const STok
}
SNode* createJoinTableNode(SAstCreateContext* pCxt, EJoinType type, SNode* pLeft, SNode* pRight, SNode* pJoinCond) {
+ CHECK_PARSER_STATUS(pCxt);
SJoinTableNode* joinTable = (SJoinTableNode*)nodesMakeNode(QUERY_NODE_JOIN_TABLE);
CHECK_OUT_OF_MEM(joinTable);
joinTable->joinType = type;
@@ -477,6 +498,7 @@ SNode* createJoinTableNode(SAstCreateContext* pCxt, EJoinType type, SNode* pLeft
}
SNode* createLimitNode(SAstCreateContext* pCxt, const SToken* pLimit, const SToken* pOffset) {
+ CHECK_PARSER_STATUS(pCxt);
SLimitNode* limitNode = (SLimitNode*)nodesMakeNode(QUERY_NODE_LIMIT);
CHECK_OUT_OF_MEM(limitNode);
limitNode->limit = taosStr2Int64(pLimit->z, NULL, 10);
@@ -487,6 +509,7 @@ SNode* createLimitNode(SAstCreateContext* pCxt, const SToken* pLimit, const STok
}
SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order, ENullOrder nullOrder) {
+ CHECK_PARSER_STATUS(pCxt);
SOrderByExprNode* orderByExpr = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR);
CHECK_OUT_OF_MEM(orderByExpr);
orderByExpr->pExpr = pExpr;
@@ -499,6 +522,7 @@ SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order
}
SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap) {
+ CHECK_PARSER_STATUS(pCxt);
SSessionWindowNode* session = (SSessionWindowNode*)nodesMakeNode(QUERY_NODE_SESSION_WINDOW);
CHECK_OUT_OF_MEM(session);
session->pCol = (SColumnNode*)pCol;
@@ -507,6 +531,7 @@ SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap
}
SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr) {
+ CHECK_PARSER_STATUS(pCxt);
SStateWindowNode* state = (SStateWindowNode*)nodesMakeNode(QUERY_NODE_STATE_WINDOW);
CHECK_OUT_OF_MEM(state);
state->pCol = createPrimaryKeyCol(pCxt);
@@ -520,6 +545,7 @@ SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr) {
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
SNode* pFill) {
+ CHECK_PARSER_STATUS(pCxt);
SIntervalWindowNode* interval = (SIntervalWindowNode*)nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW);
CHECK_OUT_OF_MEM(interval);
interval->pCol = createPrimaryKeyCol(pCxt);
@@ -535,6 +561,7 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode
}
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues) {
+ CHECK_PARSER_STATUS(pCxt);
SFillNode* fill = (SFillNode*)nodesMakeNode(QUERY_NODE_FILL);
CHECK_OUT_OF_MEM(fill);
fill->mode = mode;
@@ -549,6 +576,7 @@ SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues) {
}
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode) {
+ CHECK_PARSER_STATUS(pCxt);
SGroupingSetNode* groupingSet = (SGroupingSetNode*)nodesMakeNode(QUERY_NODE_GROUPING_SET);
CHECK_OUT_OF_MEM(groupingSet);
groupingSet->groupingSetType = GP_TYPE_NORMAL;
@@ -558,9 +586,7 @@ SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode) {
}
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias) {
- if (NULL == pNode || TSDB_CODE_SUCCESS != pCxt->errCode) {
- return pNode;
- }
+ CHECK_PARSER_STATUS(pCxt);
int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n);
strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len);
((SExprNode*)pNode)->aliasName[len] = '\0';
@@ -570,6 +596,7 @@ SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* p
}
SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pWhere = pWhere;
}
@@ -577,6 +604,7 @@ SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere) {
}
SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pPartitionByList) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pPartitionByList = pPartitionByList;
}
@@ -584,6 +612,7 @@ SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pP
}
SNode* addWindowClauseClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWindow) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pWindow = pWindow;
}
@@ -591,6 +620,7 @@ SNode* addWindowClauseClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWind
}
SNode* addGroupByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pGroupByList) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pGroupByList = pGroupByList;
}
@@ -598,6 +628,7 @@ SNode* addGroupByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pGroup
}
SNode* addHavingClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pHaving) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pHaving = pHaving;
}
@@ -605,6 +636,7 @@ SNode* addHavingClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pHaving) {
}
SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrderByList) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pOrderByList = pOrderByList;
}
@@ -612,6 +644,7 @@ SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrder
}
SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pSlimit = (SLimitNode*)pSlimit;
}
@@ -619,6 +652,7 @@ SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) {
}
SNode* addLimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pLimit) {
+ CHECK_PARSER_STATUS(pCxt);
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
((SSelectStmt*)pStmt)->pLimit = (SLimitNode*)pLimit;
}
@@ -626,6 +660,7 @@ SNode* addLimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pLimit) {
}
SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pProjectionList, SNode* pTable) {
+ CHECK_PARSER_STATUS(pCxt);
SSelectStmt* select = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT);
CHECK_OUT_OF_MEM(select);
select->isDistinct = isDistinct;
@@ -637,6 +672,7 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr
}
SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* pLeft, SNode* pRight) {
+ CHECK_PARSER_STATUS(pCxt);
SSetOperator* setOp = (SSetOperator*)nodesMakeNode(QUERY_NODE_SET_OPERATOR);
CHECK_OUT_OF_MEM(setOp);
setOp->opType = type;
@@ -647,6 +683,7 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode*
}
SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SDatabaseOptions* pOptions = nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->buffer = TSDB_DEFAULT_BUFFER_PER_VNODE;
@@ -667,10 +704,12 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->walLevel = TSDB_DEFAULT_WAL_LEVEL;
pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE;
+ pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
return (SNode*)pOptions;
}
SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SDatabaseOptions* pOptions = nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->buffer = -1;
@@ -691,10 +730,12 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->walLevel = -1;
pOptions->numOfVgroups = -1;
pOptions->singleStable = -1;
+ pOptions->schemaless = -1;
return (SNode*)pOptions;
}
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, void* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
switch (type) {
case DB_OPTION_BUFFER:
((SDatabaseOptions*)pOptions)->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
@@ -754,6 +795,9 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
case DB_OPTION_RETENTIONS:
((SDatabaseOptions*)pOptions)->pRetentions = pVal;
break;
+ case DB_OPTION_SCHEMALESS:
+ ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ break;
default:
break;
}
@@ -761,6 +805,7 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
}
SNode* setAlterDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption) {
+ CHECK_PARSER_STATUS(pCxt);
switch (pAlterOption->type) {
case DB_OPTION_KEEP:
case DB_OPTION_RETENTIONS:
@@ -772,6 +817,7 @@ SNode* setAlterDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOp
}
SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pDbName, SNode* pOptions) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
@@ -784,6 +830,7 @@ SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, STok
}
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pDbName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
@@ -795,6 +842,7 @@ SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, STo
}
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pOptions) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
@@ -806,24 +854,25 @@ SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode*
}
SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
- pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY;
pOptions->filesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR;
pOptions->ttl = TSDB_DEFAULT_TABLE_TTL;
return (SNode*)pOptions;
}
SNode* createAlterTableOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
- pOptions->delay = -1;
pOptions->filesFactor = -1;
pOptions->ttl = -1;
return (SNode*)pOptions;
}
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
switch (type) {
case TABLE_OPTION_COMMENT:
if (checkComment(pCxt, (SToken*)pVal, true)) {
@@ -831,11 +880,8 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType
sizeof(((STableOptions*)pOptions)->comment));
}
break;
- case TABLE_OPTION_DELAY:
- ((STableOptions*)pOptions)->delay = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
- break;
case TABLE_OPTION_FILE_FACTOR:
- ((STableOptions*)pOptions)->filesFactor = taosStr2Float(((SToken*)pVal)->z, NULL);
+ ((STableOptions*)pOptions)->filesFactor = taosStr2Double(((SToken*)pVal)->z, NULL);
break;
case TABLE_OPTION_ROLLUP:
((STableOptions*)pOptions)->pRollupFuncs = pVal;
@@ -853,6 +899,7 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType
}
SNode* createColumnDefNode(SAstCreateContext* pCxt, SToken* pColName, SDataType dataType, const SToken* pComment) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkColumnName(pCxt, pColName) || !checkComment(pCxt, pComment, false)) {
return NULL;
}
@@ -879,9 +926,7 @@ SDataType createVarLenDataType(uint8_t type, const SToken* pLen) {
SNode* createCreateTableStmt(SAstCreateContext* pCxt, bool ignoreExists, SNode* pRealTable, SNodeList* pCols,
SNodeList* pTags, SNode* pOptions) {
- if (NULL == pRealTable) {
- return NULL;
- }
+ CHECK_PARSER_STATUS(pCxt);
SCreateTableStmt* pStmt = (SCreateTableStmt*)nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
CHECK_OUT_OF_MEM(pStmt);
strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName);
@@ -896,9 +941,7 @@ SNode* createCreateTableStmt(SAstCreateContext* pCxt, bool ignoreExists, SNode*
SNode* createCreateSubTableClause(SAstCreateContext* pCxt, bool ignoreExists, SNode* pRealTable, SNode* pUseRealTable,
SNodeList* pSpecificTags, SNodeList* pValsOfTags, SNode* pOptions) {
- if (NULL == pRealTable) {
- return NULL;
- }
+ CHECK_PARSER_STATUS(pCxt);
SCreateSubTableClause* pStmt = nodesMakeNode(QUERY_NODE_CREATE_SUBTABLE_CLAUSE);
CHECK_OUT_OF_MEM(pStmt);
strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName);
@@ -914,6 +957,7 @@ SNode* createCreateSubTableClause(SAstCreateContext* pCxt, bool ignoreExists, SN
}
SNode* createCreateMultiTableStmt(SAstCreateContext* pCxt, SNodeList* pSubTables) {
+ CHECK_PARSER_STATUS(pCxt);
SCreateMultiTableStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_MULTI_TABLE_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->pSubTables = pSubTables;
@@ -921,9 +965,7 @@ SNode* createCreateMultiTableStmt(SAstCreateContext* pCxt, SNodeList* pSubTables
}
SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable) {
- if (NULL == pRealTable) {
- return NULL;
- }
+ CHECK_PARSER_STATUS(pCxt);
SDropTableClause* pStmt = nodesMakeNode(QUERY_NODE_DROP_TABLE_CLAUSE);
CHECK_OUT_OF_MEM(pStmt);
strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName);
@@ -934,6 +976,7 @@ SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNod
}
SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables) {
+ CHECK_PARSER_STATUS(pCxt);
SDropTableStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->pTables = pTables;
@@ -941,6 +984,7 @@ SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables) {
}
SNode* createDropSuperTableStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable) {
+ CHECK_PARSER_STATUS(pCxt);
SDropSuperTableStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_SUPER_TABLE_STMT);
CHECK_OUT_OF_MEM(pStmt);
strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName);
@@ -958,9 +1002,7 @@ static SNode* createAlterTableStmtFinalize(SNode* pRealTable, SAlterTableStmt* p
}
SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions) {
- if (NULL == pRealTable) {
- return NULL;
- }
+ CHECK_PARSER_STATUS(pCxt);
SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_OPTIONS;
@@ -968,9 +1010,10 @@ SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable,
return createAlterTableStmtFinalize(pRealTable, pStmt);
}
-SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType,
- const SToken* pColName, SDataType dataType) {
- if (NULL == pRealTable) {
+SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName,
+ SDataType dataType) {
+ CHECK_PARSER_STATUS(pCxt);
+ if (!checkColumnName(pCxt, pColName)) {
return NULL;
}
SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
@@ -981,8 +1024,9 @@ SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable,
return createAlterTableStmtFinalize(pRealTable, pStmt);
}
-SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, const SToken* pColName) {
- if (NULL == pRealTable) {
+SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName) {
+ CHECK_PARSER_STATUS(pCxt);
+ if (!checkColumnName(pCxt, pColName)) {
return NULL;
}
SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
@@ -992,9 +1036,10 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_
return createAlterTableStmtFinalize(pRealTable, pStmt);
}
-SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType,
- const SToken* pOldColName, const SToken* pNewColName) {
- if (NULL == pRealTable) {
+SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName,
+ SToken* pNewColName) {
+ CHECK_PARSER_STATUS(pCxt);
+ if (!checkColumnName(pCxt, pOldColName) || !checkColumnName(pCxt, pNewColName)) {
return NULL;
}
SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
@@ -1005,8 +1050,9 @@ SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int
return createAlterTableStmtFinalize(pRealTable, pStmt);
}
-SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const SToken* pTagName, SNode* pVal) {
- if (NULL == pRealTable) {
+SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
+ if (!checkColumnName(pCxt, pTagName)) {
return NULL;
}
SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
@@ -1018,6 +1064,7 @@ SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const
}
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
@@ -1033,13 +1080,13 @@ static bool needDbShowStmt(ENodeType type) {
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern) {
+ CHECK_PARSER_STATUS(pCxt);
if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
return NULL;
}
SShowStmt* pStmt = nodesMakeNode(type);
- ;
CHECK_OUT_OF_MEM(pStmt);
pStmt->pDbName = pDbName;
pStmt->pTbNamePattern = pTbNamePattern;
@@ -1047,18 +1094,21 @@ SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, S
}
SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_SHOW_CREATE_DATABASE_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createShowCreateTableStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pRealTable) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(type);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword) {
+ CHECK_PARSER_STATUS(pCxt);
char password[TSDB_USET_PASSWORD_LEN] = {0};
if (!checkUserName(pCxt, pUserName) || !checkPassword(pCxt, pPassword, password)) {
return NULL;
@@ -1071,6 +1121,7 @@ SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const ST
}
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkUserName(pCxt, pUserName)) {
return NULL;
}
@@ -1090,6 +1141,7 @@ SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t al
}
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkUserName(pCxt, pUserName)) {
return NULL;
}
@@ -1100,6 +1152,7 @@ SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName) {
}
SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SToken* pPort) {
+ CHECK_PARSER_STATUS(pCxt);
int32_t port = 0;
char fqdn[TSDB_FQDN_LEN] = {0};
if (NULL == pPort) {
@@ -1121,6 +1174,7 @@ SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const
}
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode) {
+ CHECK_PARSER_STATUS(pCxt);
SDropDnodeStmt* pStmt = (SDropDnodeStmt*)nodesMakeNode(QUERY_NODE_DROP_DNODE_STMT);
CHECK_OUT_OF_MEM(pStmt);
if (TK_NK_INTEGER == pDnode->type) {
@@ -1136,6 +1190,7 @@ SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode) {
SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const SToken* pConfig,
const SToken* pValue) {
+ CHECK_PARSER_STATUS(pCxt);
SAlterDnodeStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_DNODE_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->dnodeId = taosStr2Int32(pDnode->z, NULL, 10);
@@ -1148,6 +1203,7 @@ SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const
SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool ignoreExists, SToken* pIndexName,
SToken* pTableName, SNodeList* pCols, SNode* pOptions) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName) || !checkDbName(pCxt, NULL, true)) {
return NULL;
}
@@ -1164,6 +1220,7 @@ SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool igno
SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInterval, SNode* pOffset,
SNode* pSliding) {
+ CHECK_PARSER_STATUS(pCxt);
SIndexOptions* pOptions = nodesMakeNode(QUERY_NODE_INDEX_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->pFuncs = pFuncs;
@@ -1174,6 +1231,7 @@ SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInt
}
SNode* createDropIndexStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pIndexName, SToken* pTableName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName)) {
return NULL;
}
@@ -1186,6 +1244,7 @@ SNode* createDropIndexStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken
}
SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId) {
+ CHECK_PARSER_STATUS(pCxt);
SCreateComponentNodeStmt* pStmt = nodesMakeNode(type);
CHECK_OUT_OF_MEM(pStmt);
pStmt->dnodeId = taosStr2Int32(pDnodeId->z, NULL, 10);
@@ -1194,6 +1253,7 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co
}
SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId) {
+ CHECK_PARSER_STATUS(pCxt);
SDropComponentNodeStmt* pStmt = nodesMakeNode(type);
CHECK_OUT_OF_MEM(pStmt);
pStmt->dnodeId = taosStr2Int32(pDnodeId->z, NULL, 10);
@@ -1201,30 +1261,27 @@ SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, cons
return (SNode*)pStmt;
}
-SNode* createTopicOptions(SAstCreateContext* pCxt) {
- STopicOptions* pOptions = nodesMakeNode(QUERY_NODE_TOPIC_OPTIONS);
- CHECK_OUT_OF_MEM(pOptions);
- pOptions->withTable = false;
- pOptions->withSchema = false;
- pOptions->withTag = false;
- return (SNode*)pOptions;
-}
-
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery,
- const SToken* pSubscribeDbName, SNode* pOptions) {
+ const SToken* pSubDbName, SNode* pRealTable) {
+ CHECK_PARSER_STATUS(pCxt);
SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT);
CHECK_OUT_OF_MEM(pStmt);
strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
pStmt->ignoreExists = ignoreExists;
- pStmt->pQuery = pQuery;
- if (NULL != pSubscribeDbName) {
- strncpy(pStmt->subscribeDbName, pSubscribeDbName->z, pSubscribeDbName->n);
+ if (NULL != pRealTable) {
+ strcpy(pStmt->subDbName, ((SRealTableNode*)pRealTable)->table.dbName);
+ strcpy(pStmt->subSTbName, ((SRealTableNode*)pRealTable)->table.tableName);
+ nodesDestroyNode(pRealTable);
+ } else if (NULL != pSubDbName) {
+ strncpy(pStmt->subDbName, pSubDbName->z, pSubDbName->n);
+ } else {
+ pStmt->pQuery = pQuery;
}
- pStmt->pOptions = (STopicOptions*)pOptions;
return (SNode*)pStmt;
}
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName) {
+ CHECK_PARSER_STATUS(pCxt);
SDropTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_TOPIC_STMT);
CHECK_OUT_OF_MEM(pStmt);
strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
@@ -1232,7 +1289,19 @@ SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const
return (SNode*)pStmt;
}
+SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId,
+ const SToken* pTopicName) {
+ CHECK_PARSER_STATUS(pCxt);
+ SDropCGroupStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_CGROUP_STMT);
+ CHECK_OUT_OF_MEM(pStmt);
+ pStmt->ignoreNotExists = ignoreNotExists;
+ strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
+ strncpy(pStmt->cgroup, pCGroupId->z, pCGroupId->n);
+ return (SNode*)pStmt;
+}
+
SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue) {
+ CHECK_PARSER_STATUS(pCxt);
SAlterLocalStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_LOCAL_STMT);
CHECK_OUT_OF_MEM(pStmt);
trimString(pConfig->z, pConfig->n, pStmt->config, sizeof(pStmt->config));
@@ -1243,6 +1312,7 @@ SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, cons
}
SNode* createDefaultExplainOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SExplainOptions* pOptions = nodesMakeNode(QUERY_NODE_EXPLAIN_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->verbose = TSDB_DEFAULT_EXPLAIN_VERBOSE;
@@ -1251,16 +1321,19 @@ SNode* createDefaultExplainOptions(SAstCreateContext* pCxt) {
}
SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
((SExplainOptions*)pOptions)->verbose = (0 == strncasecmp(pVal->z, "true", pVal->n));
return pOptions;
}
SNode* setExplainRatio(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal) {
+ CHECK_PARSER_STATUS(pCxt);
((SExplainOptions*)pOptions)->ratio = taosStr2Double(pVal->z, NULL);
return pOptions;
}
SNode* createExplainStmt(SAstCreateContext* pCxt, bool analyze, SNode* pOptions, SNode* pQuery) {
+ CHECK_PARSER_STATUS(pCxt);
SExplainStmt* pStmt = nodesMakeNode(QUERY_NODE_EXPLAIN_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->analyze = analyze;
@@ -1270,9 +1343,7 @@ SNode* createExplainStmt(SAstCreateContext* pCxt, bool analyze, SNode* pOptions,
}
SNode* createDescribeStmt(SAstCreateContext* pCxt, SNode* pRealTable) {
- if (NULL == pRealTable) {
- return NULL;
- }
+ CHECK_PARSER_STATUS(pCxt);
SDescribeStmt* pStmt = nodesMakeNode(QUERY_NODE_DESCRIBE_STMT);
CHECK_OUT_OF_MEM(pStmt);
strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName);
@@ -1282,12 +1353,14 @@ SNode* createDescribeStmt(SAstCreateContext* pCxt, SNode* pRealTable) {
}
SNode* createResetQueryCacheStmt(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_RESET_QUERY_CACHE_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createCompactStmt(SAstCreateContext* pCxt, SNodeList* pVgroups) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_COMPACT_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
@@ -1295,6 +1368,7 @@ SNode* createCompactStmt(SAstCreateContext* pCxt, SNodeList* pVgroups) {
SNode* createCreateFunctionStmt(SAstCreateContext* pCxt, bool ignoreExists, bool aggFunc, const SToken* pFuncName,
const SToken* pLibPath, SDataType dataType, int32_t bufSize) {
+ CHECK_PARSER_STATUS(pCxt);
if (pLibPath->n <= 2) {
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
return NULL;
@@ -1311,6 +1385,7 @@ SNode* createCreateFunctionStmt(SAstCreateContext* pCxt, bool ignoreExists, bool
}
SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pFuncName) {
+ CHECK_PARSER_STATUS(pCxt);
SDropFunctionStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_FUNCTION_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->ignoreNotExists = ignoreNotExists;
@@ -1319,6 +1394,7 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con
}
SNode* createStreamOptions(SAstCreateContext* pCxt) {
+ CHECK_PARSER_STATUS(pCxt);
SStreamOptions* pOptions = nodesMakeNode(QUERY_NODE_STREAM_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->triggerType = STREAM_TRIGGER_AT_ONCE;
@@ -1327,6 +1403,7 @@ SNode* createStreamOptions(SAstCreateContext* pCxt) {
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pStreamName, SNode* pRealTable,
SNode* pOptions, SNode* pQuery) {
+ CHECK_PARSER_STATUS(pCxt);
SCreateStreamStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_STREAM_STMT);
CHECK_OUT_OF_MEM(pStmt);
strncpy(pStmt->streamName, pStreamName->z, pStreamName->n);
@@ -1342,6 +1419,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const
}
SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pStreamName) {
+ CHECK_PARSER_STATUS(pCxt);
SDropStreamStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_STREAM_STMT);
CHECK_OUT_OF_MEM(pStmt);
strncpy(pStmt->streamName, pStreamName->z, pStreamName->n);
@@ -1350,6 +1428,7 @@ SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const
}
SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId) {
+ CHECK_PARSER_STATUS(pCxt);
SKillStmt* pStmt = nodesMakeNode(type);
CHECK_OUT_OF_MEM(pStmt);
pStmt->targetId = taosStr2Int32(pId->z, NULL, 10);
@@ -1357,30 +1436,35 @@ SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId
}
SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_MERGE_VGROUP_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createRedistributeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId, SNodeList* pDnodes) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_REDISTRIBUTE_VGROUP_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createSplitVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_SPLIT_VGROUP_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createSyncdbStmt(SAstCreateContext* pCxt, const SToken* pDbName) {
+ CHECK_PARSER_STATUS(pCxt);
SNode* pStmt = nodesMakeNode(QUERY_NODE_SYNCDB_STMT);
CHECK_OUT_OF_MEM(pStmt);
return pStmt;
}
SNode* createGrantStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbName, SToken* pUserName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false) || !checkUserName(pCxt, pUserName)) {
return NULL;
}
@@ -1393,6 +1477,7 @@ SNode* createGrantStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbN
}
SNode* createRevokeStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbName, SToken* pUserName) {
+ CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false) || !checkUserName(pCxt, pUserName)) {
return NULL;
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 5b59d1c080978217577581184834595432d6edc7..68c9684c97ac8eba986a339b7618e51bc02d7d79 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -13,11 +13,12 @@
* along with this program. If not, see .
*/
+#include "functionMgt.h"
#include "os.h"
-#include "parInt.h"
-
#include "parAst.h"
+#include "parInt.h"
#include "parToken.h"
+#include "systable.h"
typedef void* (*FMalloc)(size_t);
typedef void (*FFree)(void*);
@@ -86,3 +87,317 @@ abort_parse:
taosArrayDestroy(cxt.pPlaceholderValues);
return cxt.errCode;
}
+
+typedef struct SCollectMetaKeyCxt {
+ SParseContext* pParseCxt;
+ SParseMetaCache* pMetaCache;
+} SCollectMetaKeyCxt;
+
+static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) {
+ if (NULL != pCxt->pMetaCache) {
+ // TODO
+ }
+}
+
+typedef struct SCollectMetaKeyFromExprCxt {
+ SCollectMetaKeyCxt* pComCxt;
+ int32_t errCode;
+} SCollectMetaKeyFromExprCxt;
+
+static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
+
+static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
+ if (fmIsBuiltinFunc(pFunc->functionName)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+}
+
+static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) {
+ pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
+ pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
+ pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
+ pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
+ pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser,
+ pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache);
+ }
+ return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
+}
+
+static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) {
+ pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery);
+ return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
+}
+
+static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) {
+ SCollectMetaKeyFromExprCxt* pCxt = pContext;
+ switch (nodeType(pNode)) {
+ case QUERY_NODE_FUNCTION:
+ return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode);
+ case QUERY_NODE_REAL_TABLE:
+ return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode);
+ case QUERY_NODE_TEMP_TABLE:
+ return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode);
+ default:
+ break;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) {
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt);
+ return cxt.errCode;
+}
+
+static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) {
+ int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKeyFromQuery(pCxt, pStmt->pRight);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList);
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ return cxt.errCode;
+}
+
+static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) {
+ if (NULL == pStmt->pTags) {
+ return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ } else {
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
+ }
+}
+
+static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SNode* pNode = NULL;
+ FOREACH(pNode, pStmt->pSubTables) {
+ SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode;
+ code =
+ reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ break;
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) {
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) {
+ return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (INDEX_TYPE_SMA == pStmt->indexType) {
+ code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code =
+ reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) {
+ if (NULL != pStmt->pQuery) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+}
+
+static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+}
+
+static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB,
+ TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (NULL != pStmt->pDbName) {
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ } else {
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
+ switch (nodeType(pStmt)) {
+ case QUERY_NODE_SET_OPERATOR:
+ return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt);
+ case QUERY_NODE_SELECT_STMT:
+ return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt);
+ case QUERY_NODE_CREATE_TABLE_STMT:
+ return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt);
+ case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
+ return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt);
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt);
+ case QUERY_NODE_USE_DATABASE_STMT:
+ return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt);
+ case QUERY_NODE_CREATE_INDEX_STMT:
+ return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt);
+ case QUERY_NODE_CREATE_TOPIC_STMT:
+ return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt);
+ case QUERY_NODE_EXPLAIN_STMT:
+ return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt);
+ case QUERY_NODE_CREATE_STREAM_STMT:
+ return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt);
+ case QUERY_NODE_SHOW_DNODES_STMT:
+ return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_MNODES_STMT:
+ return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_MODULES_STMT:
+ return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_QNODES_STMT:
+ return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_SNODES_STMT:
+ return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_BNODES_STMT:
+ return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_DATABASES_STMT:
+ return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_FUNCTIONS_STMT:
+ return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_INDEXES_STMT:
+ return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_STABLES_STMT:
+ return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_STREAMS_STMT:
+ return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_TABLES_STMT:
+ return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_USERS_STMT:
+ return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_LICENCE_STMT:
+ return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_VGROUPS_STMT:
+ return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_TOPICS_STMT:
+ return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
+ return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt);
+ default:
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) {
+ SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))};
+ if (NULL == cxt.pMetaCache) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot);
+ if (TSDB_CODE_SUCCESS == code) {
+ TSWAP(pQuery->pMetaCache, cxt.pMetaCache);
+ }
+ destroyCollectMetaKeyCxt(&cxt);
+ return code;
+}
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index 250e7910d69847a130fa4f0b2132b3dcb99da8e7..2670e5710b9f5418c401e9799678c68d82c8f29d 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -18,23 +18,30 @@
#include "parInt.h"
typedef struct SAuthCxt {
- SParseContext* pParseCxt;
- int32_t errCode;
+ SParseContext* pParseCxt;
+ SParseMetaCache* pMetaCache;
+ int32_t errCode;
} SAuthCxt;
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt);
-static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) {
- if (pCxt->isSuperUser) {
+static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) {
+ SParseContext* pParseCxt = pCxt->pParseCxt;
+ if (pParseCxt->isSuperUser) {
return TSDB_CODE_SUCCESS;
}
SName name;
- tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName));
+ tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
+ int32_t code = TSDB_CODE_SUCCESS;
bool pass = false;
- int32_t code =
- catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass);
+ if (NULL != pCxt->pMetaCache) {
+ code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass);
+ } else {
+ code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser,
+ dbFname, type, &pass);
+ }
return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
}
@@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) {
static EDealRes authSelectImpl(SNode* pNode, void* pContext) {
SAuthCxt* pCxt = pContext;
if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) {
- pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
+ pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
} else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) {
return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery);
@@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT:
return authSelect(pCxt, (SSelectStmt*)pStmt);
- case QUERY_NODE_CREATE_DATABASE_STMT:
- case QUERY_NODE_DROP_DATABASE_STMT:
- case QUERY_NODE_ALTER_DATABASE_STMT:
- case QUERY_NODE_CREATE_TABLE_STMT:
- case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
- case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
- case QUERY_NODE_DROP_TABLE_CLAUSE:
- case QUERY_NODE_DROP_TABLE_STMT:
- case QUERY_NODE_DROP_SUPER_TABLE_STMT:
- case QUERY_NODE_ALTER_TABLE_STMT:
- case QUERY_NODE_CREATE_USER_STMT:
- case QUERY_NODE_ALTER_USER_STMT:
- break;
- case QUERY_NODE_DROP_USER_STMT: {
+ case QUERY_NODE_DROP_USER_STMT:
return authDropUser(pCxt, (SDropUserStmt*)pStmt);
- }
- case QUERY_NODE_USE_DATABASE_STMT:
- case QUERY_NODE_CREATE_DNODE_STMT:
- case QUERY_NODE_DROP_DNODE_STMT:
- case QUERY_NODE_ALTER_DNODE_STMT:
- case QUERY_NODE_CREATE_INDEX_STMT:
- case QUERY_NODE_DROP_INDEX_STMT:
- case QUERY_NODE_CREATE_QNODE_STMT:
- case QUERY_NODE_DROP_QNODE_STMT:
- case QUERY_NODE_CREATE_BNODE_STMT:
- case QUERY_NODE_DROP_BNODE_STMT:
- case QUERY_NODE_CREATE_SNODE_STMT:
- case QUERY_NODE_DROP_SNODE_STMT:
- case QUERY_NODE_CREATE_MNODE_STMT:
- case QUERY_NODE_DROP_MNODE_STMT:
- case QUERY_NODE_CREATE_TOPIC_STMT:
- case QUERY_NODE_DROP_TOPIC_STMT:
- case QUERY_NODE_ALTER_LOCAL_STMT:
- case QUERY_NODE_EXPLAIN_STMT:
- case QUERY_NODE_DESCRIBE_STMT:
- case QUERY_NODE_RESET_QUERY_CACHE_STMT:
- case QUERY_NODE_COMPACT_STMT:
- case QUERY_NODE_CREATE_FUNCTION_STMT:
- case QUERY_NODE_DROP_FUNCTION_STMT:
- case QUERY_NODE_CREATE_STREAM_STMT:
- case QUERY_NODE_DROP_STREAM_STMT:
- case QUERY_NODE_MERGE_VGROUP_STMT:
- case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
- case QUERY_NODE_SPLIT_VGROUP_STMT:
- case QUERY_NODE_SYNCDB_STMT:
- case QUERY_NODE_GRANT_STMT:
- case QUERY_NODE_REVOKE_STMT:
- case QUERY_NODE_SHOW_DNODES_STMT:
- case QUERY_NODE_SHOW_MNODES_STMT:
- case QUERY_NODE_SHOW_MODULES_STMT:
- case QUERY_NODE_SHOW_QNODES_STMT:
- case QUERY_NODE_SHOW_SNODES_STMT:
- case QUERY_NODE_SHOW_BNODES_STMT:
- case QUERY_NODE_SHOW_CLUSTER_STMT:
- case QUERY_NODE_SHOW_DATABASES_STMT:
- case QUERY_NODE_SHOW_FUNCTIONS_STMT:
- case QUERY_NODE_SHOW_INDEXES_STMT:
- case QUERY_NODE_SHOW_STABLES_STMT:
- case QUERY_NODE_SHOW_STREAMS_STMT:
- case QUERY_NODE_SHOW_TABLES_STMT:
- case QUERY_NODE_SHOW_USERS_STMT:
- case QUERY_NODE_SHOW_LICENCE_STMT:
- case QUERY_NODE_SHOW_VGROUPS_STMT:
- case QUERY_NODE_SHOW_TOPICS_STMT:
- case QUERY_NODE_SHOW_CONSUMERS_STMT:
- case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
- case QUERY_NODE_SHOW_SMAS_STMT:
- case QUERY_NODE_SHOW_CONFIGS_STMT:
- case QUERY_NODE_SHOW_CONNECTIONS_STMT:
- case QUERY_NODE_SHOW_QUERIES_STMT:
- case QUERY_NODE_SHOW_VNODES_STMT:
- case QUERY_NODE_SHOW_APPS_STMT:
- case QUERY_NODE_SHOW_SCORES_STMT:
- case QUERY_NODE_SHOW_VARIABLE_STMT:
- case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
- case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
- case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
- case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
- case QUERY_NODE_KILL_CONNECTION_STMT:
- case QUERY_NODE_KILL_QUERY_STMT:
- case QUERY_NODE_KILL_TRANSACTION_STMT:
default:
break;
}
@@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
}
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) {
- SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS};
+ SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS};
return authQuery(&cxt, pQuery->pRoot);
}
diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c
index 646ef4cf6293eb754eb04427954104d1c2de651a..42b001c1318058be96871918bea5aee0f084c82a 100644
--- a/source/libs/parser/src/parCalcConst.c
+++ b/source/libs/parser/src/parCalcConst.c
@@ -176,11 +176,11 @@ static int32_t calcConstProject(SNode* pProject, SNode** pNew) {
}
int32_t code = scalarCalculateConstants(pProject, pNew);
- if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE == nodeType(pNew) && NULL != pAssociation) {
+ if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE == nodeType(*pNew) && NULL != pAssociation) {
int32_t size = taosArrayGetSize(pAssociation);
for (int32_t i = 0; i < size; ++i) {
- SNode** pCol = taosArrayGet(pAssociation, i);
- *pCol = nodesCloneNode(pNew);
+ SNode** pCol = taosArrayGetP(pAssociation, i);
+ *pCol = nodesCloneNode(*pNew);
if (NULL == *pCol) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -189,11 +189,18 @@ static int32_t calcConstProject(SNode* pProject, SNode** pNew) {
return code;
}
-static int32_t calcConstProjections(SCalcConstContext* pCxt, SNodeList* pProjections, bool subquery) {
+static bool isUselessCol(bool hasSelectValFunc, SExprNode* pProj) {
+ if (hasSelectValFunc && QUERY_NODE_FUNCTION == nodeType(pProj) && fmIsSelectFunc(((SFunctionNode*)pProj)->funcId)) {
+ return false;
+ }
+ return NULL == ((SExprNode*)pProj)->pAssociation;
+}
+
+static int32_t calcConstProjections(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) {
SNode* pProj = NULL;
- WHERE_EACH(pProj, pProjections) {
- if (subquery && NULL == ((SExprNode*)pProj)->pAssociation) {
- ERASE_NODE(pProjections);
+ WHERE_EACH(pProj, pSelect->pProjectionList) {
+ if (subquery && isUselessCol(pSelect->hasSelectValFunc, (SExprNode*)pProj)) {
+ ERASE_NODE(pSelect->pProjectionList);
continue;
}
SNode* pNew = NULL;
@@ -226,9 +233,9 @@ static int32_t calcConstGroupBy(SCalcConstContext* pCxt, SSelectStmt* pSelect) {
}
static int32_t calcConstSelect(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) {
- int32_t code = calcConstProjections(pCxt, pSelect->pProjectionList, subquery);
+ int32_t code = calcConstFromTable(pCxt, pSelect);
if (TSDB_CODE_SUCCESS == code) {
- code = calcConstFromTable(pCxt, pSelect);
+ code = calcConstProjections(pCxt, pSelect, subquery);
}
if (TSDB_CODE_SUCCESS == code) {
code = calcConstSelectCondition(pCxt, pSelect, &pSelect->pWhere);
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 239bd21abc53db744fb0d8def841b5d59b2eff11..422c48039743185070ca5ea5f704627d26217253 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -54,16 +54,17 @@ typedef struct SInsertParseContext {
SMsgBuf msg; // input
STableMeta* pTableMeta; // each table
SParsedDataColInfo tags; // each table
- SKVRowBuilder tagsBuilder; // each table
SVCreateTbReq createTblReq; // each table
SHashObj* pVgroupsHashObj; // global
SHashObj* pTableBlockHashObj; // global
SHashObj* pSubTableHashObj; // global
SArray* pVgDataBlocks; // global
SHashObj* pTableNameHashObj; // global
+ SHashObj* pDbFNameHashObj; // global
int32_t totalNum;
SVnodeModifOpStmt* pOutput;
SStmtCallback* pStmtCb;
+ SParseMetaCache* pMetaCache;
} SInsertParseContext;
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
@@ -72,9 +73,10 @@ static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
typedef struct SKvParam {
- SKVRowBuilder* builder;
- SSchema* schema;
- char buf[TSDB_MAX_TAGS_LEN];
+ int16_t pos;
+ SArray* pTagVals;
+ SSchema* schema;
+ char buf[TSDB_MAX_TAGS_LEN];
} SKvParam;
typedef struct SMemParam {
@@ -92,15 +94,15 @@ typedef struct SMemParam {
} \
} while (0)
-static int32_t skipInsertInto(SInsertParseContext* pCxt) {
+static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) {
SToken sToken;
- NEXT_TOKEN(pCxt->pSql, sToken);
+ NEXT_TOKEN(*pSql, sToken);
if (TK_INSERT != sToken.type) {
- return buildSyntaxErrMsg(&pCxt->msg, "keyword INSERT is expected", sToken.z);
+ return buildSyntaxErrMsg(pMsg, "keyword INSERT is expected", sToken.z);
}
- NEXT_TOKEN(pCxt->pSql, sToken);
+ NEXT_TOKEN(*pSql, sToken);
if (TK_INTO != sToken.type) {
- return buildSyntaxErrMsg(&pCxt->msg, "keyword INTO is expected", sToken.z);
+ return buildSyntaxErrMsg(pMsg, "keyword INTO is expected", sToken.z);
}
return TSDB_CODE_SUCCESS;
}
@@ -189,6 +191,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
const char* msg1 = "name too long";
const char* msg2 = "invalid database name";
const char* msg3 = "db is not specified";
+ const char* msg4 = "invalid table name";
int32_t code = TSDB_CODE_SUCCESS;
char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true);
@@ -207,7 +210,11 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
}
int32_t tbLen = pTableName->n - dbLen - 1;
- char tbname[TSDB_TABLE_FNAME_LEN] = {0};
+ if (tbLen <= 0) {
+ return buildInvalidOperationMsg(pMsgBuf, msg4);
+ }
+
+ char tbname[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(tbname, p + 1, tbLen);
/*tbLen = */ strdequote(tbname);
@@ -245,25 +252,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
return code;
}
-static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
+static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) {
+ SParseContext* pBasicCtx = pCxt->pComCxt;
+ if (NULL != pCxt->pMetaCache) {
+ return getUserAuthFromCache(pCxt->pMetaCache, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass);
+ }
+ return catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, pDbFname,
+ AUTH_TYPE_WRITE, pPass);
+}
+
+static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) {
+ SParseContext* pBasicCtx = pCxt->pComCxt;
+ if (NULL != pCxt->pMetaCache) {
+ return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta);
+ }
+ if (isStb) {
+ return catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName,
+ pTableMeta);
+ }
+ return catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pTableMeta);
+}
+
+static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) {
SParseContext* pBasicCtx = pCxt->pComCxt;
+ if (NULL != pCxt->pMetaCache) {
+ return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg);
+ }
+ return catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pVg);
+}
+static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
bool pass = false;
- CHECK_CODE(catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser,
- dbFname, AUTH_TYPE_WRITE, &pass));
+ CHECK_CODE(checkAuth(pCxt, dbFname, &pass));
if (!pass) {
return TSDB_CODE_PAR_PERMISSION_DENIED;
}
- if (isStb) {
- CHECK_CODE(catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name,
- &pCxt->pTableMeta));
- } else {
- CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name,
- &pCxt->pTableMeta));
- ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0);
+
+ CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta));
+ if (!isStb) {
SVgroupInfo vg;
- CHECK_CODE(
- catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
return TSDB_CODE_SUCCESS;
@@ -419,7 +447,7 @@ static int parseTime(char** end, SToken* pToken, int16_t timePrec, int64_t* time
return TSDB_CODE_SUCCESS;
}
-static FORCE_INLINE int32_t checkAndTrimValue(SToken* pToken, uint32_t type, char* tmpTokenBuf, SMsgBuf* pMsgBuf) {
+static FORCE_INLINE int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf) {
if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER &&
pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL &&
pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT &&
@@ -465,7 +493,7 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
uint64_t uv;
char* endptr = NULL;
- int32_t code = checkAndTrimValue(pToken, pSchema->type, tmpTokenBuf, pMsgBuf);
+ int32_t code = checkAndTrimValue(pToken, tmpTokenBuf, pMsgBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -605,7 +633,7 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
case TSDB_DATA_TYPE_BINARY: {
// Too long values will raise the invalid sql error message
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) {
- return buildSyntaxErrMsg(pMsgBuf, "string data overflow", pToken->z);
+ return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name);
}
return func(pMsgBuf, pToken->z, pToken->n, param);
@@ -614,14 +642,12 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
case TSDB_DATA_TYPE_NCHAR: {
return func(pMsgBuf, pToken->z, pToken->n, param);
}
-
case TSDB_DATA_TYPE_JSON: {
if (pToken->n > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
return buildSyntaxErrMsg(pMsgBuf, "json string too long than 4095", pToken->z);
}
return func(pMsgBuf, pToken->z, pToken->n, param);
}
-
case TSDB_DATA_TYPE_TIMESTAMP: {
int64_t tmpVal;
if (parseTime(end, pToken, timePrec, &tmpVal, pMsgBuf) != TSDB_CODE_SUCCESS) {
@@ -653,6 +679,9 @@ static FORCE_INLINE int32_t MemRowAppend(SMsgBuf* pMsgBuf, const void* value, in
int32_t output = 0;
const char* rowEnd = tdRowEnd(rb->pBuf);
if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(rowEnd), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) {
+ if (errno == E2BIG) {
+ return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name);
+ }
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
return buildSyntaxErrMsg(pMsgBuf, buf, value);
@@ -701,7 +730,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo*
}
lastColIdx = index;
pColList->cols[index].valStat = VAL_STAT_HAS;
- pColList->boundColumns[pColList->numOfBound] = index + PRIMARYKEY_TIMESTAMP_COL_ID;
+ pColList->boundColumns[pColList->numOfBound] = index;
++pColList->numOfBound;
switch (pSchema[t].type) {
case TSDB_DATA_TYPE_BINARY:
@@ -743,95 +772,282 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo*
return TSDB_CODE_SUCCESS;
}
-static int32_t KvRowAppend(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param) {
- SKvParam* pa = (SKvParam*)param;
+static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTag, int64_t suid) {
+ pTbReq->type = TD_CHILD_TABLE;
+ pTbReq->name = strdup(tname);
+ pTbReq->ctb.suid = suid;
+ pTbReq->ctb.pTag = (uint8_t*)pTag;
- int8_t type = pa->schema->type;
- int16_t colId = pa->schema->colId;
+ return;
+}
- if (TSDB_DATA_TYPE_JSON == type) {
- return parseJsontoTagData(value, pa->builder, pMsgBuf, colId);
- }
+static int32_t parseTagToken(char** end, SToken* pToken, SSchema* pSchema, int16_t timePrec, STagVal* val,
+ SMsgBuf* pMsgBuf) {
+ int64_t iv;
+ uint64_t uv;
+ char* endptr = NULL;
+
+ if (isNullStr(pToken)) {
+ if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) {
+ return buildSyntaxErrMsg(pMsgBuf, "primary timestamp should not be null", pToken->z);
+ }
- if (value == NULL) { // it is a null data
- // tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NULL, value, false, pa->toffset,
- // pa->colIdx);
return TSDB_CODE_SUCCESS;
}
- if (TSDB_DATA_TYPE_BINARY == type) {
- STR_WITH_SIZE_TO_VARSTR(pa->buf, value, len);
- tdAddColToKVRow(pa->builder, colId, pa->buf, varDataTLen(pa->buf));
- } else if (TSDB_DATA_TYPE_NCHAR == type) {
- // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
- int32_t output = 0;
- if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(pa->buf), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) {
- char buf[512] = {0};
- snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno));
- return buildSyntaxErrMsg(pMsgBuf, buf, value);
+ val->cid = pSchema->colId;
+ val->type = pSchema->type;
+
+ switch (pSchema->type) {
+ case TSDB_DATA_TYPE_BOOL: {
+ if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) {
+ if (strncmp(pToken->z, "true", pToken->n) == 0) {
+ *(int8_t*)(&val->i64) = TRUE_VALUE;
+ } else if (strncmp(pToken->z, "false", pToken->n) == 0) {
+ *(int8_t*)(&val->i64) = FALSE_VALUE;
+ } else {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
+ }
+ } else if (pToken->type == TK_NK_INTEGER) {
+ *(int8_t*)(&val->i64) = ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? FALSE_VALUE : TRUE_VALUE);
+ } else if (pToken->type == TK_NK_FLOAT) {
+ *(int8_t*)(&val->i64) = ((taosStr2Double(pToken->z, NULL) == 0) ? FALSE_VALUE : TRUE_VALUE);
+ } else {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z);
+ }
+ break;
}
- varDataSetLen(pa->buf, output);
- tdAddColToKVRow(pa->builder, colId, pa->buf, varDataTLen(pa->buf));
- } else {
- tdAddColToKVRow(pa->builder, colId, value, TYPE_BYTES[type]);
- }
+ case TSDB_DATA_TYPE_TINYINT: {
+ if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid tinyint data", pToken->z);
+ } else if (!IS_VALID_TINYINT(iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "tinyint data overflow", pToken->z);
+ }
- return TSDB_CODE_SUCCESS;
-}
+ *(int8_t*)(&val->i64) = iv;
+ break;
+ }
-static int32_t buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, SKVRow row, int64_t suid) {
- pTbReq->type = TD_CHILD_TABLE;
- pTbReq->name = strdup(tname);
- pTbReq->ctb.suid = suid;
- pTbReq->ctb.pTag = row;
+ case TSDB_DATA_TYPE_UTINYINT: {
+ if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned tinyint data", pToken->z);
+ } else if (!IS_VALID_UTINYINT(uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "unsigned tinyint data overflow", pToken->z);
+ }
+ *(uint8_t*)(&val->i64) = uv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_SMALLINT: {
+ if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid smallint data", pToken->z);
+ } else if (!IS_VALID_SMALLINT(iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "smallint data overflow", pToken->z);
+ }
+ *(int16_t*)(&val->i64) = iv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_USMALLINT: {
+ if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned smallint data", pToken->z);
+ } else if (!IS_VALID_USMALLINT(uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "unsigned smallint data overflow", pToken->z);
+ }
+ *(uint16_t*)(&val->i64) = uv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_INT: {
+ if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid int data", pToken->z);
+ } else if (!IS_VALID_INT(iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "int data overflow", pToken->z);
+ }
+ *(int32_t*)(&val->i64) = iv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_UINT: {
+ if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned int data", pToken->z);
+ } else if (!IS_VALID_UINT(uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "unsigned int data overflow", pToken->z);
+ }
+ *(uint32_t*)(&val->i64) = uv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_BIGINT: {
+ if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid bigint data", pToken->z);
+ } else if (!IS_VALID_BIGINT(iv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "bigint data overflow", pToken->z);
+ }
+
+ val->i64 = iv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_UBIGINT: {
+ if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned bigint data", pToken->z);
+ } else if (!IS_VALID_UBIGINT(uv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "unsigned bigint data overflow", pToken->z);
+ }
+ *(uint64_t*)(&val->i64) = uv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_FLOAT: {
+ double dv;
+ if (TK_NK_ILLEGAL == toDouble(pToken, &dv, &endptr)) {
+ return buildSyntaxErrMsg(pMsgBuf, "illegal float data", pToken->z);
+ }
+ if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
+ isnan(dv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "illegal float data", pToken->z);
+ }
+ *(float*)(&val->i64) = dv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_DOUBLE: {
+ double dv;
+ if (TK_NK_ILLEGAL == toDouble(pToken, &dv, &endptr)) {
+ return buildSyntaxErrMsg(pMsgBuf, "illegal double data", pToken->z);
+ }
+ if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
+ return buildSyntaxErrMsg(pMsgBuf, "illegal double data", pToken->z);
+ }
+
+ *(double*)(&val->i64) = dv;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_BINARY: {
+ // Too long values will raise the invalid sql error message
+ if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) {
+ return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name);
+ }
+ val->pData = pToken->z;
+ val->nData = pToken->n;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_NCHAR: {
+ int32_t output = 0;
+ void* p = taosMemoryCalloc(1, pToken->n * TSDB_NCHAR_SIZE);
+ if (p == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ if (!taosMbsToUcs4(pToken->z, pToken->n, (TdUcs4*)(p), pToken->n * TSDB_NCHAR_SIZE, &output)) {
+ if (errno == E2BIG) {
+ taosMemoryFree(p);
+ return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name);
+ }
+ char buf[512] = {0};
+ snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno));
+ taosMemoryFree(p);
+ return buildSyntaxErrMsg(pMsgBuf, buf, pToken->z);
+ }
+ val->pData = p;
+ val->nData = output;
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ if (parseTime(end, pToken, timePrec, &iv, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid timestamp", pToken->z);
+ }
+
+ val->i64 = iv;
+ break;
+ }
+ }
return TSDB_CODE_SUCCESS;
}
// pSql -> tag1_value, ...)
static int32_t parseTagsClause(SInsertParseContext* pCxt, SSchema* pSchema, uint8_t precision, const char* tName) {
- if (tdInitKVRowBuilder(&pCxt->tagsBuilder) < 0) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- SKvParam param = {.builder = &pCxt->tagsBuilder};
- SToken sToken;
- bool isParseBindParam = false;
- char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pTagVals = taosArrayInit(pCxt->tags.numOfBound, sizeof(STagVal));
+ SToken sToken;
+ bool isParseBindParam = false;
+ bool isJson = false;
+ STag* pTag = NULL;
for (int i = 0; i < pCxt->tags.numOfBound; ++i) {
NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken);
if (sToken.type == TK_NK_QUESTION) {
isParseBindParam = true;
if (NULL == pCxt->pStmtCb) {
- return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", sToken.z);
+ code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", sToken.z);
+ goto end;
}
continue;
}
if (isParseBindParam) {
- return buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and tag values");
+ code = buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and tag values");
+ goto end;
}
- SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i] - 1]; // colId starts with 1
- param.schema = pTagSchema;
- CHECK_CODE(
- parseValueToken(&pCxt->pSql, &sToken, pTagSchema, precision, tmpTokenBuf, KvRowAppend, ¶m, &pCxt->msg));
+ SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i]];
+ char* tmpTokenBuf = taosMemoryCalloc(1, sToken.n); // this can be optimize with parse column
+ code = checkAndTrimValue(&sToken, tmpTokenBuf, &pCxt->msg);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFree(tmpTokenBuf);
+ goto end;
+ }
+ if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
+ if (sToken.n > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
+ code = buildSyntaxErrMsg(&pCxt->msg, "json string too long than 4095", sToken.z);
+ taosMemoryFree(tmpTokenBuf);
+ goto end;
+ }
+ code = parseJsontoTagData(sToken.z, pTagVals, &pTag, &pCxt->msg);
+ taosMemoryFree(tmpTokenBuf);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+ isJson = true;
+ } else {
+ STagVal val = {0};
+ code = parseTagToken(&pCxt->pSql, &sToken, pTagSchema, precision, &val, &pCxt->msg);
+ if (TSDB_CODE_SUCCESS != code) {
+ taosMemoryFree(tmpTokenBuf);
+ goto end;
+ }
+ if (pTagSchema->type != TSDB_DATA_TYPE_BINARY) {
+ taosMemoryFree(tmpTokenBuf);
+ }
+ taosArrayPush(pTagVals, &val);
+ }
}
if (isParseBindParam) {
- return TSDB_CODE_SUCCESS;
+ code = TSDB_CODE_SUCCESS;
+ goto end;
}
- SKVRow row = tdGetKVRowFromBuilder(&pCxt->tagsBuilder);
- if (NULL == row) {
- return buildInvalidOperationMsg(&pCxt->msg, "out of memory");
+ if (!isJson && (code = tTagNew(pTagVals, 1, false, &pTag)) != TSDB_CODE_SUCCESS) {
+ goto end;
}
- tdSortKVRowByColIdx(row);
- return buildCreateTbReq(&pCxt->createTblReq, tName, row, pCxt->pTableMeta->suid);
+ buildCreateTbReq(&pCxt->createTblReq, tName, pTag, pCxt->pTableMeta->suid);
+
+end:
+ for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
+ STagVal* p = (STagVal*)taosArrayGet(pTagVals, i);
+ if (IS_VAR_DATA_TYPE(p->type)) {
+ taosMemoryFree(p->pData);
+ }
+ }
+ taosArrayDestroy(pTagVals);
+ return code;
}
static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
@@ -845,10 +1061,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName,
int32_t len, STableMeta* pMeta) {
- SVgroupInfo vg;
- SParseContext* pBasicCtx = pCxt->pComCxt;
- CHECK_CODE(
- catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTableName, &vg));
+ SVgroupInfo vg;
+ CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
pMeta->uid = 0;
@@ -903,7 +1117,7 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
if (TK_NK_LP != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
- CHECK_CODE(parseTagsClause(pCxt, pCxt->pTableMeta->schema, getTableInfo(pCxt->pTableMeta).precision, name->tname));
+ CHECK_CODE(parseTagsClause(pCxt, pTagsSchema, getTableInfo(pCxt->pTableMeta).precision, name->tname));
NEXT_VALID_TOKEN(pCxt->pSql, sToken);
if (TK_NK_COMMA == sToken.type) {
return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
@@ -929,7 +1143,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
// 1. set the parsed value from sql string
for (int i = 0; i < spd->numOfBound; ++i) {
NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken);
- SSchema* pSchema = &schema[spd->boundColumns[i] - 1];
+ SSchema* pSchema = &schema[spd->boundColumns[i]];
if (sToken.type == TK_NK_QUESTION) {
isParseBindParam = true;
@@ -940,6 +1154,10 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
continue;
}
+ if (TK_NK_RP == sToken.type) {
+ return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
+ }
+
if (isParseBindParam) {
return buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and values");
}
@@ -1050,7 +1268,6 @@ void destroyCreateSubTbReq(SVCreateTbReq* pReq) {
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
taosMemoryFreeClear(pCxt->pTableMeta);
destroyBoundColumnInfo(&pCxt->tags);
- tdDestroyKVRowBuilder(&pCxt->tagsBuilder);
destroyCreateSubTbReq(&pCxt->createTblReq);
}
@@ -1059,6 +1276,7 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
taosHashCleanup(pCxt->pVgroupsHashObj);
taosHashCleanup(pCxt->pSubTableHashObj);
taosHashCleanup(pCxt->pTableNameHashObj);
+ taosHashCleanup(pCxt->pDbFNameHashObj);
destroyBlockHashmap(pCxt->pTableBlockHashObj);
destroyBlockArrayList(pCxt->pVgDataBlocks);
@@ -1070,10 +1288,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
static int32_t parseInsertBody(SInsertParseContext* pCxt) {
- int32_t tbNum = 0;
- char tbFName[TSDB_TABLE_FNAME_LEN];
- bool autoCreateTbl = false;
- STableMeta* pMeta = NULL;
+ int32_t tbNum = 0;
+ char tbFName[TSDB_TABLE_FNAME_LEN];
+ bool autoCreateTbl = false;
// for each table
while (1) {
@@ -1088,7 +1305,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
if (sToken.type && pCxt->pSql[0]) {
return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z);
}
-
+
if (0 == pCxt->totalNum && (!TSDB_QUERY_HAS_TYPE(pCxt->pOutput->insertType, TSDB_QUERY_TYPE_STMT_INSERT))) {
return buildInvalidOperationMsg(&pCxt->msg, "no data in sql");
}
@@ -1116,19 +1333,22 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
NEXT_TOKEN(pCxt->pSql, sToken);
SName name;
- createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
- tNameExtractFullName(&name, tbFName);
+ CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
+ CHECK_CODE(isNotSchemalessDb(pCxt->pComCxt, name.dbname));
+
+ tNameExtractFullName(&name, tbFName);
CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName)));
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameGetFullDbName(&name, dbFName);
+ CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName)));
- // USING cluase
+ // USING clause
if (TK_USING == sToken.type) {
CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else {
- char dbFName[TSDB_DB_FNAME_LEN];
- tNameGetFullDbName(&name, dbFName);
CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
}
@@ -1136,12 +1356,10 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
&dataBuf, NULL, &pCxt->createTblReq));
- pMeta = pCxt->pTableMeta;
- pCxt->pTableMeta = NULL;
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
- CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pMeta)));
+ CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
@@ -1177,8 +1395,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(tags, &pCxt->tags, sizeof(pCxt->tags));
- (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj,
- pCxt->pTableBlockHashObj);
+ (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl,
+ pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj);
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
pCxt->pVgroupsHashObj = NULL;
@@ -1195,6 +1413,23 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
return buildOutput(pCxt);
}
+int32_t isNotSchemalessDb(SParseContext* pContext, char *dbName){
+ SName name;
+ tNameSetDbName(&name, pContext->acctId, dbName, strlen(dbName));
+ char dbFname[TSDB_DB_FNAME_LEN] = {0};
+ tNameGetFullDbName(&name, dbFname);
+ SDbCfgInfo pInfo = {0};
+ int32_t code = catalogGetDBCfg(pContext->pCatalog, pContext->pTransporter, &pContext->mgmtEpSet, dbFname, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname);
+ return code;
+ }
+ if (pInfo.schemaless){
+ parserError("can not insert into schemaless db:%s", dbFname);
+ return TSDB_CODE_SML_INVALID_DB_CONF;
+ }
+ return TSDB_CODE_SUCCESS;
+}
// INSERT INTO
// tb_name
// [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
@@ -1209,6 +1444,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
.pTableMeta = NULL,
.pSubTableHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK),
.pTableNameHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK),
+ .pDbFNameHashObj = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK),
.totalNum = 0,
.pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT),
.pStmtCb = pContext->pStmtCb};
@@ -1223,7 +1459,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
}
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj ||
- NULL == context.pTableNameHashObj || NULL == context.pOutput) {
+ NULL == context.pTableNameHashObj || NULL == context.pDbFNameHashObj || NULL == context.pOutput) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1236,12 +1472,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
if (NULL == *pQuery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
-
- (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
- (*pQuery)->haveResultSet = false;
- (*pQuery)->msgType = TDMT_VND_SUBMIT;
- (*pQuery)->pRoot = (SNode*)context.pOutput;
}
+ (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ (*pQuery)->haveResultSet = false;
+ (*pQuery)->msgType = TDMT_VND_SUBMIT;
+ (*pQuery)->pRoot = (SNode*)context.pOutput;
if (NULL == (*pQuery)->pTableList) {
(*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName));
@@ -1250,9 +1485,16 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
}
}
+ if (NULL == (*pQuery)->pDbList) {
+ (*pQuery)->pDbList = taosArrayInit(taosHashGetSize(context.pDbFNameHashObj), TSDB_DB_FNAME_LEN);
+ if (NULL == (*pQuery)->pDbList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
context.pOutput->payloadType = PAYLOAD_TYPE_KV;
- int32_t code = skipInsertInto(&context);
+ int32_t code = skipInsertInto(&context.pSql, &context.msg);
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(&context);
}
@@ -1262,11 +1504,182 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
taosArrayPush((*pQuery)->pTableList, pTable);
pTable = taosHashIterate(context.pTableNameHashObj, pTable);
}
+
+ char* pDb = taosHashIterate(context.pDbFNameHashObj, NULL);
+ while (NULL != pDb) {
+ taosArrayPush((*pQuery)->pDbList, pDb);
+ pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
+ }
}
destroyInsertParseContext(&context);
return code;
}
+typedef struct SInsertParseSyntaxCxt {
+ SParseContext* pComCxt;
+ char* pSql;
+ SMsgBuf msg;
+ SParseMetaCache* pMetaCache;
+} SInsertParseSyntaxCxt;
+
+static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) {
+ SToken sToken;
+ while (1) {
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_RP == sToken.type) {
+ break;
+ }
+ if (0 == sToken.n) {
+ return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
+
+// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
+static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
+ int32_t numOfRows = 0;
+ SToken sToken;
+ while (1) {
+ int32_t index = 0;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_NK_LP != sToken.type) {
+ break;
+ }
+ pCxt->pSql += index;
+
+ CHECK_CODE(skipParentheses(pCxt));
+ ++numOfRows;
+ }
+ if (0 == numOfRows) {
+ return buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
+
+// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...)
+static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
+ SToken sToken;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_LP == sToken.type) {
+ CHECK_CODE(skipBoundColumns(pCxt));
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ }
+
+ if (TK_TAGS != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "TAGS is expected", sToken.z);
+ }
+ // pSql -> (tag1_value, ...)
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_LP != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
+ }
+ CHECK_CODE(skipTagsClause(pCxt));
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+ SName name;
+ CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
+ CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
+ bool hasData = false;
+ // for each table
+ while (1) {
+ SToken sToken;
+
+ // pSql -> tb_name ...
+ NEXT_TOKEN(pCxt->pSql, sToken);
+
+ // no data in the sql string anymore.
+ if (sToken.n == 0) {
+ if (sToken.type && pCxt->pSql[0]) {
+ return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z);
+ }
+
+ if (!hasData) {
+ return buildInvalidOperationMsg(&pCxt->msg, "no data in sql");
+ }
+ break;
+ }
+
+ hasData = false;
+
+ SToken tbnameToken = sToken;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+
+ // USING clause
+ if (TK_USING == sToken.type) {
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(skipUsingClause(pCxt));
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ } else {
+ CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
+ }
+
+ if (TK_NK_LP == sToken.type) {
+ // pSql -> field1_name, ...)
+ CHECK_CODE(skipBoundColumns(pCxt));
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ }
+
+ if (TK_VALUES == sToken.type) {
+ // pSql -> (field1_value, ...) [(field1_value2, ...) ...]
+ CHECK_CODE(skipValuesClause(pCxt));
+ hasData = true;
+ continue;
+ }
+
+ // FILE csv_file_path
+ if (TK_FILE == sToken.type) {
+ // pSql -> csv_file_path
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (0 == sToken.n || (TK_NK_STRING != sToken.type && TK_NK_ID != sToken.type)) {
+ return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", sToken.z);
+ }
+ hasData = true;
+ continue;
+ }
+
+ return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", sToken.z);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery) {
+ SInsertParseSyntaxCxt context = {.pComCxt = pContext,
+ .pSql = (char*)pContext->pSql,
+ .msg = {.buf = pContext->pMsg, .len = pContext->msgLen},
+ .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))};
+ if (NULL == context.pMetaCache) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ int32_t code = skipInsertInto(&context.pSql, &context.msg);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = parseInsertBodySyntax(&context);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ *pQuery = taosMemoryCalloc(1, sizeof(SQuery));
+ if (NULL == *pQuery) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ TSWAP((*pQuery)->pMetaCache, context.pMetaCache);
+ }
+ return code;
+}
+
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen) {
SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen};
@@ -1323,46 +1736,93 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tN
return TSDB_CODE_QRY_APP_ERROR;
}
- SKVRowBuilder tagBuilder;
- if (tdInitKVRowBuilder(&tagBuilder) < 0) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal));
+ if (!pTagArray) {
+ return buildInvalidOperationMsg(&pBuf, "out of memory");
}
+ int32_t code = TSDB_CODE_SUCCESS;
SSchema* pSchema = pDataBlock->pTableMeta->schema;
- SKvParam param = {.builder = &tagBuilder};
+
+ bool isJson = false;
+ STag* pTag = NULL;
for (int c = 0; c < tags->numOfBound; ++c) {
if (bind[c].is_null && bind[c].is_null[0]) {
- KvRowAppend(&pBuf, NULL, 0, ¶m);
continue;
}
- SSchema* pTagSchema = &pSchema[tags->boundColumns[c] - 1]; // colId starts with 1
- param.schema = pTagSchema;
-
- int32_t colLen = pTagSchema->bytes;
+ SSchema* pTagSchema = &pSchema[tags->boundColumns[c]];
+ int32_t colLen = pTagSchema->bytes;
if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
colLen = bind[c].length[0];
}
+ if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
+ if (colLen > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
+ code = buildSyntaxErrMsg(&pBuf, "json string too long than 4095", bind[c].buffer);
+ goto end;
+ }
- CHECK_CODE(KvRowAppend(&pBuf, (char*)bind[c].buffer, colLen, ¶m));
+ isJson = true;
+ char* tmp = taosMemoryCalloc(1, colLen + 1);
+ memcpy(tmp, bind[c].buffer, colLen);
+ code = parseJsontoTagData(tmp, pTagArray, &pTag, &pBuf);
+ taosMemoryFree(tmp);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+ } else {
+ STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
+ if (pTagSchema->type == TSDB_DATA_TYPE_BINARY) {
+ val.pData = (uint8_t*)bind[c].buffer;
+ val.nData = colLen;
+ } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t output = 0;
+ void* p = taosMemoryCalloc(1, colLen * TSDB_NCHAR_SIZE);
+ if (p == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ if (!taosMbsToUcs4(bind[c].buffer, colLen, (TdUcs4*)(p), colLen * TSDB_NCHAR_SIZE, &output)) {
+ if (errno == E2BIG) {
+ taosMemoryFree(p);
+ code = generateSyntaxErrMsg(&pBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name);
+ goto end;
+ }
+ char buf[512] = {0};
+ snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno));
+ taosMemoryFree(p);
+ code = buildSyntaxErrMsg(&pBuf, buf, bind[c].buffer);
+ goto end;
+ }
+ val.pData = p;
+ val.nData = output;
+ } else {
+ memcpy(&val.i64, bind[c].buffer, colLen);
+ }
+ taosArrayPush(pTagArray, &val);
+ }
}
- SKVRow row = tdGetKVRowFromBuilder(&tagBuilder);
- if (NULL == row) {
- tdDestroyKVRowBuilder(&tagBuilder);
- return buildInvalidOperationMsg(&pBuf, "out of memory");
+ if (!isJson && (code = tTagNew(pTagArray, 1, false, &pTag)) != TSDB_CODE_SUCCESS) {
+ goto end;
}
- tdSortKVRowByColIdx(row);
SVCreateTbReq tbReq = {0};
- CHECK_CODE(buildCreateTbReq(&tbReq, tName, row, suid));
- CHECK_CODE(buildCreateTbMsg(pDataBlock, &tbReq));
-
+ buildCreateTbReq(&tbReq, tName, pTag, suid);
+ code = buildCreateTbMsg(pDataBlock, &tbReq);
destroyCreateSubTbReq(&tbReq);
- tdDestroyKVRowBuilder(&tagBuilder);
- return TSDB_CODE_SUCCESS;
+end:
+ for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
+ STagVal* p = (STagVal*)taosArrayGet(pTagArray, i);
+ if (p->type == TSDB_DATA_TYPE_NCHAR) {
+ taosMemoryFree(p->pData);
+ }
+ }
+ taosArrayDestroy(pTagArray);
+
+ return code;
}
int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen) {
@@ -1384,7 +1844,7 @@ int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, in
tdSRowResetBuf(pBuilder, row);
for (int c = 0; c < spd->numOfBound; ++c) {
- SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1];
+ SSchema* pColSchema = &pSchema[spd->boundColumns[c]];
if (bind[c].num != rowNum) {
return buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same");
@@ -1467,7 +1927,7 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
tdSRowGetBuf(pBuilder, row);
}
- SSchema* pColSchema = &pSchema[spd->boundColumns[colIdx] - 1];
+ SSchema* pColSchema = &pSchema[spd->boundColumns[colIdx]];
if (bind->num != rowNum) {
return buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same");
@@ -1531,18 +1991,24 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
return TSDB_CODE_SUCCESS;
}
-int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD** fields) {
+int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields,
+ uint8_t timePrec) {
if (fields) {
*fields = taosMemoryCalloc(boundInfo->numOfBound, sizeof(TAOS_FIELD));
if (NULL == *fields) {
return TSDB_CODE_OUT_OF_MEMORY;
}
+ SSchema* schema = &pSchema[boundInfo->boundColumns[0]];
+ if (TSDB_DATA_TYPE_TIMESTAMP == schema->type) {
+ (*fields)[0].precision = timePrec;
+ }
+
for (int32_t i = 0; i < boundInfo->numOfBound; ++i) {
- SSchema* pTagSchema = &pSchema[boundInfo->boundColumns[i] - 1];
- strcpy((*fields)[i].name, pTagSchema->name);
- (*fields)[i].type = pTagSchema->type;
- (*fields)[i].bytes = pTagSchema->bytes;
+ schema = &pSchema[boundInfo->boundColumns[i]];
+ strcpy((*fields)[i].name, schema->name);
+ (*fields)[i].type = schema->type;
+ (*fields)[i].bytes = schema->bytes;
}
}
@@ -1551,7 +2017,7 @@ int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_
return TSDB_CODE_SUCCESS;
}
-int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD** fields) {
+int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields) {
STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock;
SParsedDataColInfo* tags = (SParsedDataColInfo*)boundTags;
if (NULL == tags) {
@@ -1566,12 +2032,12 @@ int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TA
return TSDB_CODE_SUCCESS;
}
- CHECK_CODE(buildBoundFields(tags, pSchema, fieldNum, fields));
+ CHECK_CODE(buildBoundFields(tags, pSchema, fieldNum, fields, 0));
return TSDB_CODE_SUCCESS;
}
-int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields) {
+int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fields) {
STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock;
SSchema* pSchema = getTableColumnSchema(pDataBlock->pTableMeta);
if (pDataBlock->boundColumnInfo.numOfBound <= 0) {
@@ -1583,7 +2049,8 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields
return TSDB_CODE_SUCCESS;
}
- CHECK_CODE(buildBoundFields(&pDataBlock->boundColumnInfo, pSchema, fieldNum, fields));
+ CHECK_CODE(buildBoundFields(&pDataBlock->boundColumnInfo, pSchema, fieldNum, fields,
+ pDataBlock->pTableMeta->tableInfo.precision));
return TSDB_CODE_SUCCESS;
}
@@ -1592,7 +2059,6 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields
typedef struct SmlExecTableHandle {
SParsedDataColInfo tags; // each table
- SKVRowBuilder tagsBuilder; // each table
SVCreateTbReq createTblReq; // each table
} SmlExecTableHandle;
@@ -1604,7 +2070,6 @@ typedef struct SmlExecHandle {
static void smlDestroyTableHandle(void* pHandle) {
SmlExecTableHandle* handle = (SmlExecTableHandle*)pHandle;
- tdDestroyKVRowBuilder(&handle->tagsBuilder);
destroyBoundColumnInfo(&handle->tags);
destroyCreateSubTbReq(&handle->createTblReq);
}
@@ -1638,7 +2103,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
}
lastColIdx = index;
pColList->cols[index].valStat = VAL_STAT_HAS;
- pColList->boundColumns[pColList->numOfBound] = index + PRIMARYKEY_TIMESTAMP_COL_ID;
+ pColList->boundColumns[pColList->numOfBound] = index;
++pColList->numOfBound;
switch (pSchema[t].type) {
case TSDB_DATA_TYPE_BINARY:
@@ -1680,30 +2145,68 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
return TSDB_CODE_SUCCESS;
}
-static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema,
- SKVRow* row, SMsgBuf* msg) {
- if (tdInitKVRowBuilder(tagsBuilder) < 0) {
+/**
+ * @brief No json tag for schemaless
+ *
+ * @param cols
+ * @param tags
+ * @param pSchema
+ * @param ppTag
+ * @param msg
+ * @return int32_t
+ */
+static int32_t smlBuildTagRow(SArray* cols, SParsedDataColInfo* tags, SSchema* pSchema, STag** ppTag, SMsgBuf* msg) {
+ SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal));
+ if (!pTagArray) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SKvParam param = {.builder = tagsBuilder};
+ int32_t code = TSDB_CODE_SUCCESS;
for (int i = 0; i < tags->numOfBound; ++i) {
- SSchema* pTagSchema = &pSchema[tags->boundColumns[i] - 1]; // colId starts with 1
- param.schema = pTagSchema;
- SSmlKv* kv = taosArrayGetP(cols, i);
- if (IS_VAR_DATA_TYPE(kv->type)) {
- KvRowAppend(msg, kv->value, kv->length, ¶m);
+ SSchema* pTagSchema = &pSchema[tags->boundColumns[i]];
+ SSmlKv* kv = taosArrayGetP(cols, i);
+
+ STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
+ if (pTagSchema->type == TSDB_DATA_TYPE_BINARY) {
+ val.pData = (uint8_t*)kv->value;
+ val.nData = kv->length;
+ } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t output = 0;
+ void *p = taosMemoryCalloc(1, kv->length * TSDB_NCHAR_SIZE);
+ if(p == NULL){
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)(p), kv->length * TSDB_NCHAR_SIZE, &output)) {
+ if (errno == E2BIG) {
+ taosMemoryFree(p);
+ code = generateSyntaxErrMsg(msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name);
+ goto end;
+ }
+ char buf[512] = {0};
+ snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno));
+ taosMemoryFree(p);
+ code = buildSyntaxErrMsg(msg, buf, kv->value);
+ goto end;
+ }
+ val.pData = p;
+ val.nData = output;
} else {
- KvRowAppend(msg, &(kv->value), kv->length, ¶m);
+ memcpy(&val.i64, &(kv->value), kv->length);
}
+ taosArrayPush(pTagArray, &val);
}
- *row = tdGetKVRowFromBuilder(tagsBuilder);
- if (*row == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ code = tTagNew(pTagArray, 1, false, ppTag);
+end:
+ for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
+ STagVal* p = (STagVal*)taosArrayGet(pTagArray, i);
+ if (p->type == TSDB_DATA_TYPE_NCHAR) {
+ taosMemoryFree(p->pData);
+ }
}
- tdSortKVRowByColIdx(*row);
- return TSDB_CODE_SUCCESS;
+ taosArrayDestroy(pTagArray);
+ return code;
}
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
@@ -1719,14 +2222,13 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
buildInvalidOperationMsg(&pBuf, "bound tags error");
return ret;
}
- SKVRow row = NULL;
- ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema,
- &row, &pBuf);
+ STag* pTag = NULL;
+ ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tags, pTagsSchema, &pTag, &pBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, row, pTableMeta->suid);
+ buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, pTag, pTableMeta->suid);
STableDataBlocks* pDataBlock = NULL;
ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid),
@@ -1771,7 +2273,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
// 1. set the parsed value from sql string
for (int c = 0, j = 0; c < spd->numOfBound; ++c) {
- SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1];
+ SSchema* pColSchema = &pSchema[spd->boundColumns[c]];
param.schema = pColSchema;
getSTSRowAppendInfo(pBuilder->rowType, spd, c, ¶m.toffset, ¶m.colIdx);
diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c
index f82c792c96bb9affb839c37c7ee82358e6c84162..1960073f295e278a66eec6e49d8d2b97418a14a5 100644
--- a/source/libs/parser/src/parInsertData.c
+++ b/source/libs/parser/src/parInsertData.c
@@ -74,7 +74,7 @@ void setBoundColumnInfo(SParsedDataColInfo* pColList, SSchema* pSchema, col_id_t
default:
break;
}
- pColList->boundColumns[i] = pSchema[i].colId;
+ pColList->boundColumns[i] = i;
}
pColList->allNullLen += pColList->flen;
pColList->boundNullLen = pColList->allNullLen; // default set allNullLen
diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c
index 8fb9780f8a5b52c62822c25eb1b52be40d30c1d9..e9539073583c6d21a100efa5b33516eb9db18393 100644
--- a/source/libs/parser/src/parTokenizer.c
+++ b/source/libs/parser/src/parTokenizer.c
@@ -61,13 +61,13 @@ static SKeyword keywordTable[] = {
{"CONNS", TK_CONNS},
{"CONNECTION", TK_CONNECTION},
{"CONNECTIONS", TK_CONNECTIONS},
+ {"CONSUMER", TK_CONSUMER},
{"COUNT", TK_COUNT},
{"CREATE", TK_CREATE},
{"DATABASE", TK_DATABASE},
{"DATABASES", TK_DATABASES},
{"DAYS", TK_DAYS},
{"DBS", TK_DBS},
- {"DELAY", TK_DELAY},
{"DESC", TK_DESC},
{"DESCRIBE", TK_DESCRIBE},
{"DISTINCT", TK_DISTINCT},
@@ -155,7 +155,7 @@ static SKeyword keywordTable[] = {
{"RETENTIONS", TK_RETENTIONS},
{"REVOKE", TK_REVOKE},
{"ROLLUP", TK_ROLLUP},
- {"SCHEMA", TK_SCHEMA},
+ {"SCHEMALESS", TK_SCHEMALESS},
{"SCORES", TK_SCORES},
{"SELECT", TK_SELECT},
{"SESSION", TK_SESSION},
@@ -212,7 +212,6 @@ static SKeyword keywordTable[] = {
{"WATERMARK", TK_WATERMARK},
{"WHERE", TK_WHERE},
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
- {"WITH", TK_WITH},
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
{"_QENDTS", TK_QENDTS},
@@ -605,12 +604,12 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) {
}
return i;
}
- case '[': {
- for (i = 1; z[i] && z[i - 1] != ']'; i++) {
- }
- *tokenId = TK_NK_ID;
- return i;
- }
+ // case '[': {
+ // for (i = 1; z[i] && z[i - 1] != ']'; i++) {
+ // }
+ // *tokenId = TK_NK_ID;
+ // return i;
+ // }
case 'T':
case 't':
case 'F':
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index e57fc3556455d731dc937e4dcdb9cdabe12c350b..f0cba6ddc9031387db7528fc1f47706f23ee2863 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -40,14 +40,16 @@ typedef struct STranslateContext {
SHashObj* pDbs;
SHashObj* pTables;
SExplainOptions* pExplainOpt;
+ SParseMetaCache* pMetaCache;
} STranslateContext;
typedef struct SFullDatabaseName {
char fullDbName[TSDB_DB_FNAME_LEN];
} SFullDatabaseName;
-static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
-static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
+static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
+static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
+static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal);
static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; }
@@ -101,12 +103,17 @@ static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) {
static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) {
SParseContext* pParCxt = pCxt->pParseCxt;
- int32_t code = collectUseDatabase(pName, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = collectUseTable(pName, pCxt->pTables);
- }
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getTableMetaFromCache(pCxt->pMetaCache, pName, pMeta);
+ } else {
+ code = collectUseDatabase(pName, pCxt->pDbs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectUseTable(pName, pCxt->pTables);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta);
+ }
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
@@ -125,8 +132,13 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
SParseContext* pParCxt = pCxt->pParseCxt;
SName name;
toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name);
- int32_t code =
- catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getTableMetaFromCache(pCxt->pMetaCache, &name, pMeta);
+ } else {
+ code =
+ catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
+ }
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName,
pTableName);
@@ -134,29 +146,18 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
return code;
}
-static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
- SParseContext* pParCxt = pCxt->pParseCxt;
- int32_t code = collectUseDatabase(pName, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = collectUseTable(pName, pCxt->pTables);
- }
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetTableDistVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pVgInfo);
- }
- if (TSDB_CODE_SUCCESS != code) {
- parserError("catalogGetTableDistVgInfo error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
- pName->tname);
- }
- return code;
-}
-
static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
char fullDbName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pName, fullDbName);
- int32_t code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
+ } else {
+ code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo);
+ }
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName);
@@ -174,12 +175,17 @@ static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray*
static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pName, SVgroupInfo* pInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
- int32_t code = collectUseDatabase(pName, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = collectUseTable(pName, pCxt->pTables);
- }
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
+ } else {
+ code = collectUseDatabase(pName, pCxt->pDbs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectUseTable(pName, pCxt->pTables);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo);
+ }
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
@@ -197,9 +203,14 @@ static int32_t getTableHashVgroup(STranslateContext* pCxt, const char* pDbName,
static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum) {
SParseContext* pParCxt = pCxt->pParseCxt;
- int32_t code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
+ } else {
+ code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum);
+ }
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName);
@@ -213,9 +224,14 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
- int32_t code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
- if (TSDB_CODE_SUCCESS == code) {
- code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
+ } else {
+ code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo);
+ }
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname);
@@ -223,7 +239,28 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
return code;
}
-static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt) {
+static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
+ SParseContext* pParCxt = pCxt->pParseCxt;
+ SFuncInfo funcInfo = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo);
+ } else {
+ code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName,
+ &funcInfo);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ pFunc->funcType = FUNCTION_TYPE_UDF;
+ pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
+ pFunc->node.resType.type = funcInfo.outputType;
+ pFunc->node.resType.bytes = funcInfo.outputLen;
+ pFunc->udfBufSize = funcInfo.bufSize;
+ tFreeSFuncInfo(&funcInfo);
+ }
+ return code;
+}
+
+static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) {
pCxt->pParseCxt = pParseCxt;
pCxt->errCode = TSDB_CODE_SUCCESS;
pCxt->msgBuf.buf = pParseCxt->pMsg;
@@ -231,6 +268,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext*
pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
pCxt->currLevel = 0;
pCxt->currClause = 0;
+ pCxt->pMetaCache = pMetaCache;
pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) {
@@ -342,12 +380,14 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p
}
}
-static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SColumnNode* pCol) {
+static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef) {
+ SColumnNode* pCol = *pColRef;
+
pCol->pProjectRef = (SNode*)pExpr;
if (NULL == pExpr->pAssociation) {
pExpr->pAssociation = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
}
- taosArrayPush(pExpr->pAssociation, &pCol);
+ taosArrayPush(pExpr->pAssociation, &pColRef);
if (NULL != pTable) {
strcpy(pCol->tableAlias, pTable->tableAlias);
} else if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
@@ -385,7 +425,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
if (NULL == pCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY);
}
- setColumnInfoByExpr(pTable, (SExprNode*)pNode, pCol);
+ setColumnInfoByExpr(pTable, (SExprNode*)pNode, &pCol);
nodesListAppend(pList, (SNode*)pCol);
}
}
@@ -425,19 +465,22 @@ static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) {
return isPrimaryKeyImpl(pTable, pExpr);
}
-static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
- bool found = false;
+static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, const STableNode* pTable,
+ bool* pFound) {
+ SColumnNode* pCol = *pColRef;
+ *pFound = false;
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta;
if (isInternalPrimaryKey(pCol)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, false, pCol);
- return true;
+ *pFound = true;
+ return TSDB_CODE_SUCCESS;
}
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i >= pMeta->tableInfo.numOfColumns), pCol);
- found = true;
+ *pFound = true;
break;
}
}
@@ -448,50 +491,62 @@ static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp(pCol->colName, pExpr->aliasName) ||
(isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) {
- setColumnInfoByExpr(pTable, pExpr, pCol);
- found = true;
- break;
+ if (*pFound) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
+ }
+ setColumnInfoByExpr(pTable, pExpr, pColRef);
+ *pFound = true;
}
}
}
- return found;
+ return TSDB_CODE_SUCCESS;
}
-static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode* pCol) {
+static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** pCol) {
SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel);
size_t nums = taosArrayGetSize(pTables);
bool foundTable = false;
for (size_t i = 0; i < nums; ++i) {
STableNode* pTable = taosArrayGetP(pTables, i);
- if (belongTable(pCxt->pParseCxt->db, pCol, pTable)) {
+ if (belongTable(pCxt->pParseCxt->db, (*pCol), pTable)) {
foundTable = true;
- if (findAndSetColumn(pCol, pTable)) {
+ bool foundCol = false;
+ pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol);
+ if (TSDB_CODE_SUCCESS != pCxt->errCode) {
+ return DEAL_RES_ERROR;
+ }
+ if (foundCol) {
break;
}
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName);
+ return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName);
}
}
if (!foundTable) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_TABLE_NOT_EXIST, pCol->tableAlias);
+ return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_TABLE_NOT_EXIST, (*pCol)->tableAlias);
}
return DEAL_RES_CONTINUE;
}
-static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNode* pCol) {
+static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNode** pCol) {
SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel);
size_t nums = taosArrayGetSize(pTables);
bool found = false;
- bool isInternalPk = isInternalPrimaryKey(pCol);
+ bool isInternalPk = isInternalPrimaryKey(*pCol);
for (size_t i = 0; i < nums; ++i) {
STableNode* pTable = taosArrayGetP(pTables, i);
- if (findAndSetColumn(pCol, pTable)) {
+ bool foundCol = false;
+ pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol);
+ if (TSDB_CODE_SUCCESS != pCxt->errCode) {
+ return DEAL_RES_ERROR;
+ }
+ if (foundCol) {
if (found) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
+ return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, (*pCol)->colName);
}
found = true;
- if (isInternalPk) {
- break;
- }
+ }
+ if (isInternalPk) {
+ break;
}
}
if (!found) {
@@ -501,18 +556,18 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
}
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_INTERNAL_PK);
} else {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName);
+ return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName);
}
}
return DEAL_RES_CONTINUE;
}
-static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode* pCol) {
+static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol) {
SNodeList* pProjectionList = pCxt->pCurrStmt->pProjectionList;
SNode* pNode;
FOREACH(pNode, pProjectionList) {
SExprNode* pExpr = (SExprNode*)pNode;
- if (0 == strcmp(pCol->colName, pExpr->aliasName)) {
+ if (0 == strcmp((*pCol)->colName, pExpr->aliasName)) {
setColumnInfoByExpr(NULL, pExpr, pCol);
return true;
}
@@ -520,14 +575,14 @@ static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode* pCol)
return false;
}
-static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) {
+static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) {
// count(*)/first(*)/last(*) and so on
- if (0 == strcmp(pCol->colName, "*")) {
+ if (0 == strcmp((*pCol)->colName, "*")) {
return DEAL_RES_CONTINUE;
}
EDealRes res = DEAL_RES_CONTINUE;
- if ('\0' != pCol->tableAlias[0]) {
+ if ('\0' != (*pCol)->tableAlias[0]) {
res = translateColumnWithPrefix(pCxt, pCol);
} else {
bool found = false;
@@ -539,17 +594,18 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) {
return res;
}
-static int32_t parseTimeFromValueNode(SValueNode* pVal) {
- if (IS_SIGNED_NUMERIC_TYPE(pVal->node.resType.type)) {
- return TSDB_CODE_SUCCESS;
- } else if (IS_UNSIGNED_NUMERIC_TYPE(pVal->node.resType.type)) {
- pVal->datum.i = pVal->datum.u;
- return TSDB_CODE_SUCCESS;
- } else if (IS_FLOAT_TYPE(pVal->node.resType.type)) {
- pVal->datum.i = pVal->datum.d;
- return TSDB_CODE_SUCCESS;
- } else if (TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) {
- pVal->datum.i = pVal->datum.b;
+static int32_t parseTimeFromValueNode(STranslateContext* pCxt, SValueNode* pVal) {
+ if (IS_NUMERIC_TYPE(pVal->node.resType.type) || TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) {
+ if (DEAL_RES_ERROR == translateValue(pCxt, pVal)) {
+ return pCxt->errCode;
+ }
+ if (IS_UNSIGNED_NUMERIC_TYPE(pVal->node.resType.type)) {
+ pVal->datum.i = pVal->datum.u;
+ } else if (IS_FLOAT_TYPE(pVal->node.resType.type)) {
+ pVal->datum.i = pVal->datum.d;
+ } else if (TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) {
+ pVal->datum.i = pVal->datum.b;
+ }
return TSDB_CODE_SUCCESS;
} else if (IS_VAR_DATA_TYPE(pVal->node.resType.type) || TSDB_DATA_TYPE_TIMESTAMP == pVal->node.resType.type) {
if (TSDB_CODE_SUCCESS == taosParseTime(pVal->literal, &pVal->datum.i, pVal->node.resType.bytes,
@@ -585,62 +641,52 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
*(bool*)&pVal->typeData = pVal->datum.b;
break;
case TSDB_DATA_TYPE_TINYINT: {
- char* endPtr = NULL;
- pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10);
+ pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10);
*(int8_t*)&pVal->typeData = pVal->datum.i;
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
- char* endPtr = NULL;
- pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10);
+ pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10);
*(int16_t*)&pVal->typeData = pVal->datum.i;
break;
}
case TSDB_DATA_TYPE_INT: {
- char* endPtr = NULL;
- pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10);
+ pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10);
*(int32_t*)&pVal->typeData = pVal->datum.i;
break;
}
case TSDB_DATA_TYPE_BIGINT: {
- char* endPtr = NULL;
- pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10);
+ pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10);
*(int64_t*)&pVal->typeData = pVal->datum.i;
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
- char* endPtr = NULL;
- pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10);
+ pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10);
*(uint8_t*)&pVal->typeData = pVal->datum.u;
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
- char* endPtr = NULL;
- pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10);
+ pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10);
*(uint16_t*)&pVal->typeData = pVal->datum.u;
break;
}
case TSDB_DATA_TYPE_UINT: {
- char* endPtr = NULL;
- pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10);
+ pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10);
*(uint32_t*)&pVal->typeData = pVal->datum.u;
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
- char* endPtr = NULL;
- pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10);
+ pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10);
*(uint64_t*)&pVal->typeData = pVal->datum.u;
break;
}
case TSDB_DATA_TYPE_FLOAT: {
- char* endPtr = NULL;
- pVal->datum.d = taosStr2Double(pVal->literal, &endPtr);
+ pVal->datum.d = taosStr2Double(pVal->literal, NULL);
*(float*)&pVal->typeData = pVal->datum.d;
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
- char* endPtr = NULL;
- pVal->datum.d = taosStr2Double(pVal->literal, &endPtr);
+ pVal->datum.d = taosStr2Double(pVal->literal, NULL);
*(double*)&pVal->typeData = pVal->datum.d;
break;
}
@@ -656,7 +702,7 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
break;
}
case TSDB_DATA_TYPE_TIMESTAMP: {
- if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pVal)) {
+ if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int64_t*)&pVal->typeData = pVal->datum.i;
@@ -666,7 +712,6 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1);
if (NULL == pVal->datum.p) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
- ;
}
int32_t len = 0;
@@ -720,18 +765,30 @@ static bool isMultiResFunc(SNode* pNode) {
return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false);
}
-static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
+static int32_t rewriteNegativeOperator(SNode** pOp) {
+ SNode* pRes = NULL;
+ int32_t code = scalarCalculateConstants(*pOp, &pRes);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pOp = pRes;
+ }
+ return code;
+}
+
+static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) {
+ SOperatorNode* pOp = *pOpRef;
if (OP_TYPE_MINUS == pOp->opType) {
if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
}
pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE;
pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes;
+
+ pCxt->errCode = rewriteNegativeOperator((SNode**)pOpRef);
} else {
pOp->node.resType.type = TSDB_DATA_TYPE_BOOL;
pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
}
- return DEAL_RES_CONTINUE;
+ return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
@@ -772,7 +829,8 @@ static EDealRes translateComparisonOperator(STranslateContext* pCxt, SOperatorNo
if (!IS_VAR_DATA_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
}
- if (QUERY_NODE_VALUE != nodeType(pOp->pRight) || !IS_STR_DATA_TYPE(((SExprNode*)(pOp->pRight))->resType.type)) {
+ if (QUERY_NODE_VALUE != nodeType(pOp->pRight) ||
+ ((!IS_STR_DATA_TYPE(((SExprNode*)(pOp->pRight))->resType.type)) && (((SExprNode*)(pOp->pRight))->resType.type != TSDB_DATA_TYPE_NULL))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName);
}
}
@@ -792,7 +850,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO
return DEAL_RES_CONTINUE;
}
-static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
+static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) {
+ SOperatorNode* pOp = *pOpRef;
+
if (isMultiResFunc(pOp->pLeft)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
}
@@ -801,7 +861,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
}
if (nodesIsUnaryOp(pOp)) {
- return translateUnaryOperator(pCxt, pOp);
+ return translateUnaryOperator(pCxt, pOpRef);
} else if (nodesIsArithmeticOp(pOp)) {
return translateArithmeticOperator(pCxt, pOp);
} else if (nodesIsComparisonOp(pOp)) {
@@ -812,7 +872,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
return DEAL_RES_CONTINUE;
}
-static EDealRes haveAggOrNonstdFunction(SNode* pNode, void* pContext) {
+static EDealRes haveVectorFunction(SNode* pNode, void* pContext) {
if (isAggFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
@@ -857,17 +917,65 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount)
static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
bool hasInvalidFunc = false;
- nodesWalkExprs(pParameterList, haveAggOrNonstdFunction, &hasInvalidFunc);
+ nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc);
return hasInvalidFunc;
}
static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
- SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog,
- .pRpc = pCxt->pParseCxt->pTransporter,
- .pMgmtEps = &pCxt->pParseCxt->mgmtEpSet,
- .pErrBuf = pCxt->msgBuf.buf,
- .errBufLen = pCxt->msgBuf.len};
- return fmGetFuncInfo(¶m, pFunc);
+ int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len);
+ if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) {
+ code = getUdfInfo(pCxt, pFunc);
+ }
+ return code;
+}
+
+static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
+ if (beforeHaving(pCxt->currClause)) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION);
+ }
+ if (hasInvalidFuncNesting(pFunc->pParameterList)) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AGG_FUNC_NESTING);
+ }
+ if (pCxt->pCurrStmt->hasIndefiniteRowsFunc) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
+ }
+
+ if (isCountStar(pFunc)) {
+ return rewriteCountStar(pCxt, pFunc);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t translateScanPseudoColumnFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
+ if (0 == LIST_LENGTH(pFunc->pParameterList)) {
+ if (QUERY_NODE_REAL_TABLE != nodeType(pCxt->pCurrStmt->pFromTable)) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TBNAME);
+ }
+ } else {
+ SValueNode* pVal = nodesListGetNode(pFunc->pParameterList, 0);
+ STableNode* pTable = NULL;
+ pCxt->errCode = findTable(pCxt, pVal->literal, &pTable);
+ if (TSDB_CODE_SUCCESS == pCxt->errCode && (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable))) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TBNAME);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t translateIndefiniteRowsFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
+ if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasIndefiniteRowsFunc || pCxt->pCurrStmt->hasAggFuncs) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
+ }
+ if (hasInvalidFuncNesting(pFunc->pParameterList)) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AGG_FUNC_NESTING);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void setFuncClassification(SSelectStmt* pSelect, SFunctionNode* pFunc) {
+ pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId);
+ pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId);
+ pSelect->hasIndefiniteRowsFunc = pSelect->hasIndefiniteRowsFunc ? true : fmIsIndefiniteRowsFunc(pFunc->funcId);
}
static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
@@ -880,48 +988,16 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc)
pCxt->errCode = getFuncInfo(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsAggFunc(pFunc->funcId)) {
- if (beforeHaving(pCxt->currClause)) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION);
- }
- if (hasInvalidFuncNesting(pFunc->pParameterList)) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
- }
- if (pCxt->pCurrStmt->hasIndefiniteRowsFunc) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
- }
-
- pCxt->pCurrStmt->hasAggFuncs = true;
- if (isCountStar(pFunc)) {
- pCxt->errCode = rewriteCountStar(pCxt, pFunc);
- }
-
- if (fmIsRepeatScanFunc(pFunc->funcId)) {
- pCxt->pCurrStmt->hasRepeatScanFuncs = true;
- }
+ pCxt->errCode = translateAggFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsScanPseudoColumnFunc(pFunc->funcId)) {
- if (0 == LIST_LENGTH(pFunc->pParameterList)) {
- if (QUERY_NODE_REAL_TABLE != nodeType(pCxt->pCurrStmt->pFromTable)) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_TBNAME);
- }
- } else {
- SValueNode* pVal = nodesListGetNode(pFunc->pParameterList, 0);
- STableNode* pTable = NULL;
- pCxt->errCode = findTable(pCxt, pVal->literal, &pTable);
- if (TSDB_CODE_SUCCESS == pCxt->errCode && (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable))) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_TBNAME);
- }
- }
+ pCxt->errCode = translateScanPseudoColumnFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsIndefiniteRowsFunc(pFunc->funcId)) {
- if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasIndefiniteRowsFunc ||
- pCxt->pCurrStmt->hasAggFuncs) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
- }
- if (hasInvalidFuncNesting(pFunc->pParameterList)) {
- return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
- }
- pCxt->pCurrStmt->hasIndefiniteRowsFunc = true;
+ pCxt->errCode = translateIndefiniteRowsFunc(pCxt, pFunc);
+ }
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
+ setFuncClassification(pCxt->pCurrStmt, pFunc);
}
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@@ -936,34 +1012,34 @@ static EDealRes translateLogicCond(STranslateContext* pCxt, SLogicConditionNode*
return DEAL_RES_CONTINUE;
}
-static EDealRes doTranslateExpr(SNode* pNode, void* pContext) {
+static EDealRes doTranslateExpr(SNode** pNode, void* pContext) {
STranslateContext* pCxt = (STranslateContext*)pContext;
- switch (nodeType(pNode)) {
+ switch (nodeType(*pNode)) {
case QUERY_NODE_COLUMN:
- return translateColumn(pCxt, (SColumnNode*)pNode);
+ return translateColumn(pCxt, (SColumnNode**)pNode);
case QUERY_NODE_VALUE:
- return translateValue(pCxt, (SValueNode*)pNode);
+ return translateValue(pCxt, (SValueNode*)*pNode);
case QUERY_NODE_OPERATOR:
- return translateOperator(pCxt, (SOperatorNode*)pNode);
+ return translateOperator(pCxt, (SOperatorNode**)pNode);
case QUERY_NODE_FUNCTION:
- return translateFunction(pCxt, (SFunctionNode*)pNode);
+ return translateFunction(pCxt, (SFunctionNode*)*pNode);
case QUERY_NODE_LOGIC_CONDITION:
- return translateLogicCond(pCxt, (SLogicConditionNode*)pNode);
+ return translateLogicCond(pCxt, (SLogicConditionNode*)*pNode);
case QUERY_NODE_TEMP_TABLE:
- return translateExprSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery);
+ return translateExprSubquery(pCxt, ((STempTableNode*)*pNode)->pSubquery);
default:
break;
}
return DEAL_RES_CONTINUE;
}
-static int32_t translateExpr(STranslateContext* pCxt, SNode* pNode) {
- nodesWalkExprPostOrder(pNode, doTranslateExpr, pCxt);
+static int32_t translateExpr(STranslateContext* pCxt, SNode** pNode) {
+ nodesRewriteExprPostOrder(pNode, doTranslateExpr, pCxt);
return pCxt->errCode;
}
static int32_t translateExprList(STranslateContext* pCxt, SNodeList* pList) {
- nodesWalkExprsPostOrder(pList, doTranslateExpr, pCxt);
+ nodesRewriteExprsPostOrder(pList, doTranslateExpr, pCxt);
return pCxt->errCode;
}
@@ -1009,6 +1085,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode
}
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
*pNode = (SNode*)pFunc;
+ pCxt->pCurrStmt->hasSelectValFunc = true;
} else {
nodesDestroyNode(pFunc);
}
@@ -1096,7 +1173,7 @@ typedef struct CheckAggColCoexistCxt {
STranslateContext* pTranslateCxt;
bool existAggFunc;
bool existCol;
- bool existNonstdFunc;
+ bool existIndefiniteRowsFunc;
int32_t selectFuncNum;
bool existOtherAggFunc;
} CheckAggColCoexistCxt;
@@ -1113,7 +1190,7 @@ static EDealRes doCheckAggColCoexist(SNode* pNode, void* pContext) {
return DEAL_RES_IGNORE_CHILD;
}
if (isIndefiniteRowsFunc(pNode)) {
- pCxt->existNonstdFunc = true;
+ pCxt->existIndefiniteRowsFunc = true;
return DEAL_RES_IGNORE_CHILD;
}
if (isScanPseudoColumnFunc(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)) {
@@ -1129,7 +1206,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt,
.existAggFunc = false,
.existCol = false,
- .existNonstdFunc = false,
+ .existIndefiniteRowsFunc = false,
.selectFuncNum = 0,
.existOtherAggFunc = false};
nodesWalkExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
@@ -1142,7 +1219,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SINGLE_GROUP);
}
- if (cxt.existNonstdFunc && cxt.existCol) {
+ if (cxt.existIndefiniteRowsFunc && cxt.existCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
return TSDB_CODE_SUCCESS;
@@ -1183,7 +1260,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
int32_t code = TSDB_CODE_SUCCESS;
SArray* vgroupList = NULL;
if ('\0' != pRealTable->qualDbName[0]) {
- // todo release after mnode can be processed
if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
}
@@ -1191,7 +1267,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
- // todo release after mnode can be processed
if (TSDB_CODE_SUCCESS == code) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
@@ -1212,7 +1287,7 @@ static int32_t setTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTa
int32_t code = TSDB_CODE_SUCCESS;
if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
SArray* vgroupList = NULL;
- code = getTableDistVgInfo(pCxt, pName, &vgroupList);
+ code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
if (TSDB_CODE_SUCCESS == code) {
code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList);
}
@@ -1237,12 +1312,31 @@ static uint8_t getStmtPrecision(SNode* pStmt) {
return 0;
}
+static bool stmtIsSingleTable(SNode* pStmt) {
+ if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
+ return ((STableNode*)((SSelectStmt*)pStmt)->pFromTable)->singleTable;
+ }
+ return false;
+}
+
static uint8_t getJoinTablePrecision(SJoinTableNode* pJoinTable) {
uint8_t lp = ((STableNode*)pJoinTable->pLeft)->precision;
uint8_t rp = ((STableNode*)pJoinTable->pRight)->precision;
return (lp > rp ? rp : lp);
}
+static bool joinTableIsSingleTable(SJoinTableNode* pJoinTable) {
+ return (((STableNode*)pJoinTable->pLeft)->singleTable && ((STableNode*)pJoinTable->pRight)->singleTable);
+}
+
+static bool isSingleTable(SRealTableNode* pRealTable) {
+ int8_t tableType = pRealTable->pMeta->tableType;
+ if (TSDB_SYSTEM_TABLE == tableType) {
+ return 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES);
+ }
+ return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType);
+}
+
static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
int32_t code = TSDB_CODE_SUCCESS;
switch (nodeType(pTable)) {
@@ -1261,6 +1355,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
code = setTableVgroupList(pCxt, &name, pRealTable);
}
pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
+ pRealTable->table.singleTable = isSingleTable(pRealTable);
if (TSDB_CODE_SUCCESS == code) {
code = addNamespace(pCxt, pRealTable);
}
@@ -1271,6 +1366,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
code = translateSubquery(pCxt, pTempTable->pSubquery);
if (TSDB_CODE_SUCCESS == code) {
pTempTable->table.precision = getStmtPrecision(pTempTable->pSubquery);
+ pTempTable->table.singleTable = stmtIsSingleTable(pTempTable->pSubquery);
code = addNamespace(pCxt, pTempTable);
}
break;
@@ -1283,7 +1379,8 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
}
if (TSDB_CODE_SUCCESS == code) {
pJoinTable->table.precision = getJoinTablePrecision(pJoinTable);
- code = translateExpr(pCxt, pJoinTable->pOnCond);
+ pJoinTable->table.singleTable = joinTableIsSingleTable(pJoinTable);
+ code = translateExpr(pCxt, &pJoinTable->pOnCond);
}
break;
}
@@ -1516,7 +1613,7 @@ static int32_t translateOrderByPosition(STranslateContext* pCxt, SNodeList* pPro
if (NULL == pCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY);
}
- setColumnInfoByExpr(NULL, (SExprNode*)nodesListGetNode(pProjectionList, pos - 1), pCol);
+ setColumnInfoByExpr(NULL, (SExprNode*)nodesListGetNode(pProjectionList, pos - 1), &pCol);
((SOrderByExprNode*)pNode)->pExpr = (SNode*)pCol;
nodesDestroyNode(pExpr);
}
@@ -1562,7 +1659,7 @@ static int32_t translateHaving(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION);
}
pCxt->currClause = SQL_CLAUSE_HAVING;
- int32_t code = translateExpr(pCxt, pSelect->pHaving);
+ int32_t code = translateExpr(pCxt, &pSelect->pHaving);
if (TSDB_CODE_SUCCESS == code) {
code = checkExprForGroupBy(pCxt, &pSelect->pHaving);
}
@@ -1810,7 +1907,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
return TSDB_CODE_SUCCESS;
}
pCxt->currClause = SQL_CLAUSE_WINDOW;
- int32_t code = translateExpr(pCxt, pSelect->pWindow);
+ int32_t code = translateExpr(pCxt, &pSelect->pWindow);
if (TSDB_CODE_SUCCESS == code) {
code = checkWindow(pCxt, pSelect);
}
@@ -1822,7 +1919,7 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti
return translateExprList(pCxt, pPartitionByList);
}
-static int32_t translateWhere(STranslateContext* pCxt, SNode* pWhere) {
+static int32_t translateWhere(STranslateContext* pCxt, SNode** pWhere) {
pCxt->currClause = SQL_CLAUSE_WHERE;
return translateExpr(pCxt, pWhere);
}
@@ -1856,7 +1953,9 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p
}
pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME);
- if (!findAndSetColumn(pCol, pTable)) {
+ bool found = false;
+ int32_t code = findAndSetColumn(pCxt, &pCol, pTable, &found);
+ if (TSDB_CODE_SUCCESS != code || !found) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC);
}
*pPrimaryKey = (SNode*)pCol;
@@ -1895,7 +1994,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) {
pCxt->pCurrStmt = pSelect;
int32_t code = translateFrom(pCxt, pSelect);
if (TSDB_CODE_SUCCESS == code) {
- code = translateWhere(pCxt, pSelect->pWhere);
+ code = translateWhere(pCxt, &pSelect->pWhere);
}
if (TSDB_CODE_SUCCESS == code) {
code = translatePartitionBy(pCxt, pSelect->pPartitionByList);
@@ -2074,6 +2173,7 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
pReq->replications = pStmt->pOptions->replica;
pReq->strict = pStmt->pOptions->strict;
pReq->cacheLastRow = pStmt->pOptions->cachelast;
+ pReq->schemaless = pStmt->pOptions->schemaless;
pReq->ignoreExist = pStmt->ignoreExists;
return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq);
}
@@ -2273,6 +2373,9 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
if (TSDB_CODE_SUCCESS == code) {
code = checkDbRetentionsOption(pCxt, pOptions->pRetentions);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbEnumOption(pCxt, "schemaless", pOptions->schemaless, TSDB_DB_SCHEMALESS_ON, TSDB_DB_SCHEMALESS_OFF);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = checkOptionsDependency(pCxt, pDbName, pOptions);
}
@@ -2530,10 +2633,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt
}
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
- int32_t code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY);
- if (TSDB_CODE_SUCCESS == code) {
- code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor);
- }
+ int32_t code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor);
if (TSDB_CODE_SUCCESS == code) {
code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs);
}
@@ -2546,6 +2646,11 @@ static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt
if (TSDB_CODE_SUCCESS == code) {
code = checkTableSchema(pCxt, pStmt);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ if(pCxt->pParseCxt->schemalessType == 0){
+ code = isNotSchemalessDb(pCxt->pParseCxt, pStmt->dbName);
+ }
+ }
return code;
}
@@ -2752,12 +2857,13 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt,
int32_t code = getDBCfg(pCxt, pStmt->dbName, &dbCfg);
int32_t num = taosArrayGetSize(dbCfg.pRetensions);
if (TSDB_CODE_SUCCESS != code || num < 2) {
+ taosArrayDestroy(dbCfg.pRetensions);
return code;
}
for (int32_t i = 1; i < num; ++i) {
SRetention* pRetension = taosArrayGet(dbCfg.pRetensions, i);
STranslateContext cxt = {0};
- initTranslateContext(pCxt->pParseCxt, &cxt);
+ initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt);
code = getRollupAst(&cxt, pStmt, pRetension, dbCfg.precision, 1 == i ? &pReq->pAst1 : &pReq->pAst2,
1 == i ? &pReq->ast1Len : &pReq->ast2Len);
destroyTranslateContext(&cxt);
@@ -2765,13 +2871,14 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt,
break;
}
}
+
+ taosArrayDestroy(dbCfg.pRetensions);
return code;
}
static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStmt, SMCreateStbReq* pReq) {
pReq->igExists = pStmt->ignoreExists;
pReq->xFilesFactor = pStmt->pOptions->filesFactor;
- pReq->delay = pStmt->pOptions->delay;
pReq->ttl = pStmt->pOptions->ttl;
columnDefNodeToField(pStmt->pCols, &pReq->pColumns);
columnDefNodeToField(pStmt->pTags, &pReq->pTags);
@@ -3197,9 +3304,6 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName));
tNameGetFullDbName(&name, pReq->name);
pReq->igExists = pStmt->ignoreExists;
- pReq->withTbName = pStmt->pOptions->withTable;
- pReq->withSchema = pStmt->pOptions->withSchema;
- pReq->withTag = pStmt->pOptions->withTag;
pReq->sql = strdup(pCxt->pParseCxt->pSql);
if (NULL == pReq->sql) {
@@ -3208,19 +3312,26 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
int32_t code = TSDB_CODE_SUCCESS;
- const char* dbName;
- if (NULL != pStmt->pQuery) {
- dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName;
+ if ('\0' != pStmt->subSTbName[0]) {
+ pReq->subType = TOPIC_SUB_TYPE__TABLE;
+ toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name);
+ tNameGetFullDbName(&name, pReq->subDbName);
+ tNameExtractFullName(&name, pReq->subStbName);
+ } else if ('\0' != pStmt->subDbName[0]) {
+ pReq->subType = TOPIC_SUB_TYPE__DB;
+ tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName));
+ tNameGetFullDbName(&name, pReq->subDbName);
+ } else {
+ pReq->subType = TOPIC_SUB_TYPE__COLUMN;
+ char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName;
+ tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName));
+ tNameGetFullDbName(&name, pReq->subDbName);
pCxt->pParseCxt->topicQuery = true;
code = translateQuery(pCxt, pStmt->pQuery);
if (TSDB_CODE_SUCCESS == code) {
code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
- } else {
- dbName = pStmt->subscribeDbName;
}
- tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName));
- tNameGetFullDbName(&name, pReq->subscribeDbName);
return code;
}
@@ -3266,6 +3377,18 @@ static int32_t translateDropTopic(STranslateContext* pCxt, SDropTopicStmt* pStmt
return buildCmdMsg(pCxt, TDMT_MND_DROP_TOPIC, (FSerializeFunc)tSerializeSMDropTopicReq, &dropReq);
}
+static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pStmt) {
+ SMDropCgroupReq dropReq = {0};
+
+ SName name;
+ tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName));
+ tNameGetFullDbName(&name, dropReq.topic);
+ dropReq.igNotExists = pStmt->ignoreNotExists;
+ strcpy(dropReq.cgroup, pStmt->cgroup);
+
+ return buildCmdMsg(pCxt, TDMT_MND_MQ_DROP_CGROUP, (FSerializeFunc)tSerializeSMDropCgroupReq, &dropReq);
+}
+
static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pStmt) {
// todo
return TSDB_CODE_SUCCESS;
@@ -3325,7 +3448,9 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
pReq->igExists = pStmt->ignoreExists;
SName name;
- tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->streamName, &name), pReq->name);
+ tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName));
+ tNameGetFullDbName(&name, pReq->name);
+ // tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->streamName, &name), pReq->name);
if ('\0' != pStmt->targetTabName[0]) {
strcpy(name.dbname, pStmt->targetDbName);
@@ -3549,6 +3674,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
case QUERY_NODE_DROP_TOPIC_STMT:
code = translateDropTopic(pCxt, (SDropTopicStmt*)pNode);
break;
+ case QUERY_NODE_DROP_CGROUP_STMT:
+ code = translateDropCGroup(pCxt, (SDropCGroupStmt*)pNode);
+ break;
case QUERY_NODE_ALTER_LOCAL_STMT:
code = translateAlterLocal(pCxt, (SAlterLocalStmt*)pNode);
break;
@@ -3700,7 +3828,6 @@ static const char* getSysDbName(ENodeType type) {
case QUERY_NODE_SHOW_QNODES_STMT:
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
case QUERY_NODE_SHOW_INDEXES_STMT:
- case QUERY_NODE_SHOW_STREAMS_STMT:
case QUERY_NODE_SHOW_BNODES_STMT:
case QUERY_NODE_SHOW_SNODES_STMT:
case QUERY_NODE_SHOW_LICENCE_STMT:
@@ -3709,6 +3836,7 @@ static const char* getSysDbName(ENodeType type) {
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:
case QUERY_NODE_SHOW_TOPICS_STMT:
+ case QUERY_NODE_SHOW_STREAMS_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return TSDB_PERFORMANCE_SCHEMA_DB;
default:
@@ -3742,7 +3870,7 @@ static const char* getSysTableName(ENodeType type) {
case QUERY_NODE_SHOW_INDEXES_STMT:
return TSDB_INS_TABLE_USER_INDEXES;
case QUERY_NODE_SHOW_STREAMS_STMT:
- return TSDB_INS_TABLE_USER_STREAMS;
+ return TSDB_PERFS_TABLE_STREAMS;
case QUERY_NODE_SHOW_BNODES_STMT:
return TSDB_INS_TABLE_BNODES;
case QUERY_NODE_SHOW_SNODES_STMT:
@@ -3885,7 +4013,7 @@ typedef struct SVgroupCreateTableBatch {
static void destroyCreateTbReq(SVCreateTbReq* pReq) {
taosMemoryFreeClear(pReq->name);
- taosMemoryFreeClear(pReq->ntb.schema.pSchema);
+ taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema);
}
static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo,
@@ -3898,10 +4026,10 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
SVCreateTbReq req = {0};
req.type = TD_NORMAL_TABLE;
req.name = strdup(pStmt->tableName);
- req.ntb.schema.nCols = LIST_LENGTH(pStmt->pCols);
- req.ntb.schema.sver = 1;
- req.ntb.schema.pSchema = taosMemoryCalloc(req.ntb.schema.nCols, sizeof(SSchema));
- if (NULL == req.name || NULL == req.ntb.schema.pSchema) {
+ req.ntb.schemaRow.nCols = LIST_LENGTH(pStmt->pCols);
+ req.ntb.schemaRow.version = 1;
+ req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema));
+ if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) {
destroyCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -3911,7 +4039,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
SNode* pCol;
col_id_t index = 0;
FOREACH(pCol, pStmt->pCols) {
- toSchema((SColumnDefNode*)pCol, index + 1, req.ntb.schema.pSchema + index);
+ toSchema((SColumnDefNode*)pCol, index + 1, req.ntb.schemaRow.pSchema + index);
++index;
}
pBatch->info = *pVgroupInfo;
@@ -3965,7 +4093,7 @@ static void destroyCreateTbReqBatch(SVgroupCreateTableBatch* pTbBatch) {
taosMemoryFreeClear(pTableReq->name);
if (pTableReq->type == TSDB_NORMAL_TABLE) {
- taosMemoryFreeClear(pTableReq->ntb.schema.pSchema);
+ taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema);
} else if (pTableReq->type == TSDB_CHILD_TABLE) {
taosMemoryFreeClear(pTableReq->ctb.pTag);
}
@@ -4038,8 +4166,8 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
return code;
}
-static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, SCreateSubTableClause* pStmt, SKVRow row,
- uint64_t suid, SVgroupInfo* pVgInfo) {
+static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, SCreateSubTableClause* pStmt,
+ const STag* pTag, uint64_t suid, SVgroupInfo* pVgInfo) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
SName name = {.type = TSDB_DB_NAME_T, .acctId = acctId};
strcpy(name.dbname, pStmt->dbName);
@@ -4049,7 +4177,7 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S
req.type = TD_CHILD_TABLE;
req.name = strdup(pStmt->tableName);
req.ctb.suid = suid;
- req.ctb.pTag = row;
+ req.ctb.pTag = (uint8_t*)pTag;
if (pStmt->ignoreExists) {
req.flags |= TD_CREATE_IF_NOT_EXISTS;
}
@@ -4069,26 +4197,6 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S
}
}
-static int32_t addValToKVRow(STranslateContext* pCxt, SValueNode* pVal, const SSchema* pSchema,
- SKVRowBuilder* pBuilder) {
- if (pSchema->type == TSDB_DATA_TYPE_JSON) {
- if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
- return buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal);
- }
-
- return parseJsontoTagData(pVal->literal, pBuilder, &pCxt->msgBuf, pSchema->colId);
- }
-
- if (pVal->node.resType.type == TSDB_DATA_TYPE_NULL) {
- // todo
- } else {
- tdAddColToKVRow(pBuilder, pSchema->colId, nodesGetValueFromNode(pVal),
- IS_VAR_DATA_TYPE(pSchema->type) ? varDataTLen(pVal->datum.p) : TYPE_BYTES[pSchema->type]);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* pFunc, SValueNode** pVal) {
int32_t code = getFuncInfo(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == code) {
@@ -4097,16 +4205,17 @@ static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* p
return code;
}
-static SDataType schemaToDataType(SSchema* pSchema) {
- SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = 0, .scale = 0};
+static SDataType schemaToDataType(uint8_t precision, SSchema* pSchema) {
+ SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = precision, .scale = 0};
return dt;
}
-static int32_t translateTagVal(STranslateContext* pCxt, SSchema* pSchema, SNode* pNode, SValueNode** pVal) {
+static int32_t translateTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode,
+ SValueNode** pVal) {
if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
return createValueFromFunction(pCxt, (SFunctionNode*)pNode, pVal);
} else if (QUERY_NODE_VALUE == nodeType(pNode)) {
- return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(pSchema))
+ return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(precision, pSchema))
? pCxt->errCode
: TSDB_CODE_SUCCESS);
} else {
@@ -4115,15 +4224,22 @@ static int32_t translateTagVal(STranslateContext* pCxt, SSchema* pSchema, SNode*
}
static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta,
- SKVRowBuilder* pBuilder) {
+ STag** ppTag) {
int32_t numOfTags = getNumOfTags(pSuperTableMeta);
if (LIST_LENGTH(pStmt->pValsOfTags) != LIST_LENGTH(pStmt->pSpecificTags) ||
numOfTags < LIST_LENGTH(pStmt->pValsOfTags)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
}
+ SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal));
+ if (!pTagArray) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY);
+ }
+ int32_t code = TSDB_CODE_SUCCESS;
+ int16_t nTags = 0, nBufPos = 0;
SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta);
- SNode * pTag, *pNode;
+ SNode * pTag = NULL, *pNode = NULL;
+ bool isJson = false;
FORBOTH(pTag, pStmt->pSpecificTags, pNode, pStmt->pValsOfTags) {
SColumnNode* pCol = (SColumnNode*)pTag;
SSchema* pSchema = NULL;
@@ -4134,56 +4250,125 @@ static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableCla
}
}
if (NULL == pSchema) {
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName);
+ code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName);
+ goto end;
}
SValueNode* pVal = NULL;
- int32_t code = translateTagVal(pCxt, pSchema, pNode, &pVal);
- if (TSDB_CODE_SUCCESS == code) {
- if (NULL == pVal) {
- pVal = (SValueNode*)pNode;
- } else {
- REPLACE_LIST2_NODE(pVal);
- }
+ code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal);
+ if (TSDB_CODE_SUCCESS != code) {
+ goto end;
}
- if (TSDB_CODE_SUCCESS == code) {
- code = addValToKVRow(pCxt, pVal, pSchema, pBuilder);
+
+ if (NULL == pVal) {
+ pVal = (SValueNode*)pNode;
+ } else {
+ REPLACE_LIST2_NODE(pVal);
}
- if (TSDB_CODE_SUCCESS != code) {
- return code;
+ if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
+ if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
+ code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal);
+ goto end;
+ }
+
+ isJson = true;
+ code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf);
+ if(code != TSDB_CODE_SUCCESS){
+ goto end;
+ }
+ }else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) {
+ void* nodeVal = nodesGetValueFromNode(pVal);
+ STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
+ if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
+ val.pData = varDataVal(nodeVal);
+ val.nData = varDataLen(nodeVal);
+ } else {
+ memcpy(&val.i64, nodeVal, pTagSchema->bytes);
+ }
+ taosArrayPush(pTagArray, &val);
}
}
+ if(!isJson) code = tTagNew(pTagArray, 1, false, ppTag);
+
+end:
+ if(isJson){
+ for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
+ STagVal *p = (STagVal *)taosArrayGet(pTagArray, i);
+ if(IS_VAR_DATA_TYPE(p->type)){
+ taosMemoryFree(p->pData);
+ }
+ }
+ }
+ taosArrayDestroy(pTagArray);
return TSDB_CODE_SUCCESS;
}
static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta,
- SKVRowBuilder* pBuilder) {
+ STag** ppTag) {
if (getNumOfTags(pSuperTableMeta) != LIST_LENGTH(pStmt->pValsOfTags)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
}
- SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta);
+ SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta);
SNode* pNode;
+ int32_t code = TSDB_CODE_SUCCESS;
int32_t index = 0;
+ SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal));
+ if (!pTagArray) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY);
+ }
+
+ bool isJson = false;
FOREACH(pNode, pStmt->pValsOfTags) {
SValueNode* pVal = NULL;
- int32_t code = translateTagVal(pCxt, pTagSchema + index, pNode, &pVal);
- if (TSDB_CODE_SUCCESS == code) {
- if (NULL == pVal) {
- pVal = (SValueNode*)pNode;
+ SSchema* pTagSchema = pTagSchemas + index;
+ code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal);
+ if (TSDB_CODE_SUCCESS != code) {
+ goto end;
+ }
+ if (NULL == pVal) {
+ pVal = (SValueNode*)pNode;
+ } else {
+ REPLACE_NODE(pVal);
+ }
+ if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
+ if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
+ code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal);
+ goto end;
+ }
+
+ isJson = true;
+ code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf);
+ if(code != TSDB_CODE_SUCCESS){
+ goto end;
+ }
+ }else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) {
+ char* tmpVal = nodesGetValueFromNode(pVal);
+ STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
+ if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
+ val.pData = varDataVal(tmpVal);
+ val.nData = varDataLen(tmpVal);
} else {
- REPLACE_NODE(pVal);
+ memcpy(&val.i64, tmpVal, pTagSchema->bytes);
}
+ taosArrayPush(pTagArray, &val);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = addValToKVRow(pCxt, pVal, pTagSchema + index++, pBuilder);
- }
- if (TSDB_CODE_SUCCESS != code) {
- return code;
+ ++index;
+ }
+ if(!isJson) code = tTagNew(pTagArray, 1, false, ppTag);
+
+end:
+ if(isJson){
+ for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
+ STagVal *p = (STagVal *)taosArrayGet(pTagArray, i);
+ if(IS_VAR_DATA_TYPE(p->type)){
+ taosMemoryFree(p->pData);
+ }
}
}
- return TSDB_CODE_SUCCESS;
+ taosArrayDestroy(pTagArray);
+ return code;
}
static int32_t checkCreateSubTable(STranslateContext* pCxt, SCreateSubTableClause* pStmt) {
@@ -4200,26 +4385,13 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
}
- SKVRowBuilder kvRowBuilder = {0};
- if (TSDB_CODE_SUCCESS == code) {
- code = tdInitKVRowBuilder(&kvRowBuilder);
- }
+ STag* pTag = NULL;
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pStmt->pSpecificTags) {
- code = buildKVRowForBindTags(pCxt, pStmt, pSuperTableMeta, &kvRowBuilder);
- } else {
- code = buildKVRowForAllTags(pCxt, pStmt, pSuperTableMeta, &kvRowBuilder);
- }
- }
-
- SKVRow row = NULL;
- if (TSDB_CODE_SUCCESS == code) {
- row = tdGetKVRowFromBuilder(&kvRowBuilder);
- if (NULL == row) {
- code = TSDB_CODE_OUT_OF_MEMORY;
+ code = buildKVRowForBindTags(pCxt, pStmt, pSuperTableMeta, &pTag);
} else {
- tdSortKVRowByColIdx(row);
+ code = buildKVRowForAllTags(pCxt, pStmt, pSuperTableMeta, &pTag);
}
}
@@ -4228,11 +4400,10 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
- addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, row, pSuperTableMeta->uid, &info);
+ addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid, &info);
}
taosMemoryFreeClear(pSuperTableMeta);
- tdDestroyKVRowBuilder(&kvRowBuilder);
return code;
}
@@ -4258,6 +4429,7 @@ static SArray* serializeVgroupsCreateTableBatch(int32_t acctId, SHashObj* pVgrou
}
static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery) {
+
SCreateMultiTableStmt* pStmt = (SCreateMultiTableStmt*)pQuery->pRoot;
SHashObj* pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
@@ -4268,6 +4440,10 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery)
int32_t code = TSDB_CODE_SUCCESS;
SNode* pNode;
FOREACH(pNode, pStmt->pSubTables) {
+ if(pCxt->pParseCxt->schemalessType == 0 &&
+ (code = isNotSchemalessDb(pCxt->pParseCxt, ((SCreateSubTableClause*)pNode)->dbName)) != TSDB_CODE_SUCCESS){
+ return code;
+ }
code = rewriteCreateSubTable(pCxt, (SCreateSubTableClause*)pNode, pVgroupHashmap);
if (TSDB_CODE_SUCCESS != code) {
taosHashCleanup(pVgroupHashmap);
@@ -4447,42 +4623,48 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
return TSDB_CODE_OUT_OF_MEMORY;
}
- if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, schemaToDataType(pSchema))) {
+ if (DEAL_RES_ERROR ==
+ translateValueImpl(pCxt, pStmt->pVal, schemaToDataType(pTableMeta->tableInfo.precision, pSchema))) {
return pCxt->errCode;
}
pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type);
- if(pStmt->pVal->node.resType.type == TSDB_DATA_TYPE_JSON){
- SKVRowBuilder kvRowBuilder = {0};
- int32_t code = tdInitKVRowBuilder(&kvRowBuilder);
-
- if (TSDB_CODE_SUCCESS != code) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- if (pStmt->pVal->literal && strlen(pStmt->pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
+ if (pStmt->pVal->node.resType.type == TSDB_DATA_TYPE_JSON) {
+ if (pStmt->pVal->literal &&
+ strlen(pStmt->pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
return buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pStmt->pVal->literal);
}
-
- code = parseJsontoTagData(pStmt->pVal->literal, &kvRowBuilder, &pCxt->msgBuf, pSchema->colId);
- if (TSDB_CODE_SUCCESS != code) {
- return code;
+ SArray *pTagVals = taosArrayInit(1, sizeof(STagVal));
+ int32_t code = TSDB_CODE_SUCCESS;
+ STag* pTag = NULL;
+ do{
+ code = parseJsontoTagData(pStmt->pVal->literal, pTagVals, &pTag, &pCxt->msgBuf);
+ if (TSDB_CODE_SUCCESS != code) {
+ break;
+ }
+ }while(0);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
+ STagVal *p = (STagVal *)taosArrayGet(pTagVals, i);
+ if(IS_VAR_DATA_TYPE(p->type)){
+ taosMemoryFree(p->pData);
+ }
}
-
- SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
- if (NULL == row) {
- tdDestroyKVRowBuilder(&kvRowBuilder);
- return TSDB_CODE_OUT_OF_MEMORY;
+ taosArrayDestroy(pTagVals);
+ if (code != TSDB_CODE_SUCCESS){
+ return code;
}
- pReq->nTagVal = kvRowLen(row);
- pReq->pTagVal = row;
- pStmt->pVal->datum.p = row; // for free
- tdDestroyKVRowBuilder(&kvRowBuilder);
- }else{
+ pReq->nTagVal = pTag->len;
+ pReq->pTagVal = (uint8_t *)pTag;
+ pStmt->pVal->datum.p = (char*)pTag; // for free
+ } else {
pReq->nTagVal = pStmt->pVal->node.resType.bytes;
- if (TSDB_DATA_TYPE_NCHAR == pStmt->pVal->node.resType.type) {
- pReq->nTagVal = pReq->nTagVal * TSDB_NCHAR_SIZE;
- }
pReq->pTagVal = nodesGetValueFromNode(pStmt->pVal);
+
+ // data and length are seperated for new tag format STagVal
+ if (IS_VAR_DATA_TYPE(pStmt->pVal->node.resType.type)) {
+ pReq->nTagVal = varDataLen(pReq->pTagVal);
+ pReq->pTagVal = varDataVal(pReq->pTagVal);
+ }
}
return TSDB_CODE_SUCCESS;
@@ -4673,9 +4855,14 @@ static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* p
static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot;
+ int32_t code = TSDB_CODE_SUCCESS;
+ if(pCxt->pParseCxt->schemalessType == 0 &&
+ (code = isNotSchemalessDb(pCxt->pParseCxt, pStmt->dbName)) != TSDB_CODE_SUCCESS){
+ return code;
+ }
STableMeta* pTableMeta = NULL;
- int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
+ code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
@@ -4688,16 +4875,16 @@ static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
}
- if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "can not drop tag if there is only one tag");
+ if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
+ "can not drop tag if there is only one tag");
}
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
- (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG ||
- pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
- pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) {
+ (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
+ pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
return TSDB_CODE_SUCCESS;
@@ -4764,6 +4951,47 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
return code;
}
+static int32_t toMsgType(ENodeType type) {
+ switch (type) {
+ case QUERY_NODE_CREATE_TABLE_STMT:
+ return TDMT_VND_CREATE_TABLE;
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return TDMT_VND_ALTER_TABLE;
+ case QUERY_NODE_DROP_TABLE_STMT:
+ return TDMT_VND_DROP_TABLE;
+ default:
+ break;
+ }
+ return TDMT_VND_CREATE_TABLE;
+}
+
+static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
+ if (NULL != pCxt->pDbs) {
+ pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
+ if (NULL == pQuery->pDbList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ SFullDatabaseName* pDb = taosHashIterate(pCxt->pDbs, NULL);
+ while (NULL != pDb) {
+ taosArrayPush(pQuery->pDbList, pDb->fullDbName);
+ pDb = taosHashIterate(pCxt->pDbs, pDb);
+ }
+ }
+
+ if (NULL != pCxt->pTables) {
+ pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
+ if (NULL == pQuery->pTableList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ SName* pTable = taosHashIterate(pCxt->pTables, NULL);
+ while (NULL != pTable) {
+ taosArrayPush(pQuery->pTableList, pTable);
+ pTable = taosHashIterate(pCxt->pTables, pTable);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
switch (nodeType(pQuery->pRoot)) {
case QUERY_NODE_SELECT_STMT:
@@ -4775,7 +5003,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
break;
case QUERY_NODE_VNODE_MODIF_STMT:
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_CREATE_TABLE;
+ pQuery->msgType = toMsgType(((SVnodeModifOpStmt*)pQuery->pRoot)->sqlNodeType);
break;
case QUERY_NODE_DESCRIBE_STMT:
pQuery->execMode = QUERY_EXEC_MODE_LOCAL;
@@ -4803,37 +5031,13 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
}
}
- if (NULL != pCxt->pDbs) {
- pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
- if (NULL == pQuery->pDbList) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- SFullDatabaseName* pDb = taosHashIterate(pCxt->pDbs, NULL);
- while (NULL != pDb) {
- taosArrayPush(pQuery->pDbList, pDb->fullDbName);
- pDb = taosHashIterate(pCxt->pDbs, pDb);
- }
- }
-
- if (NULL != pCxt->pTables) {
- pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
- if (NULL == pQuery->pTableList) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- SName* pTable = taosHashIterate(pCxt->pTables, NULL);
- while (NULL != pTable) {
- taosArrayPush(pQuery->pTableList, pTable);
- pTable = taosHashIterate(pCxt->pTables, pTable);
- }
- }
-
return TSDB_CODE_SUCCESS;
}
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) {
STranslateContext cxt = {0};
- int32_t code = initTranslateContext(pParseCxt, &cxt);
+ int32_t code = initTranslateContext(pParseCxt, pQuery->pMetaCache, &cxt);
if (TSDB_CODE_SUCCESS == code) {
code = fmFuncMgtInit();
}
@@ -4846,6 +5050,7 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) {
if (TSDB_CODE_SUCCESS == code) {
code = setQuery(&cxt, pQuery);
}
+ setRefreshMate(&cxt, pQuery);
destroyTranslateContext(&cxt);
return code;
}
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 652ed10ce8b71c71c8eaa640dde145499fe69098..0a1915d6c2fc5c1c776f6e991a2e39ee6d8a9aa3 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -15,6 +15,9 @@
#include "parUtil.h"
#include "cJSON.h"
+#include "querynodes.h"
+
+#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2
static char* getSyntaxErrFormat(int32_t errCode) {
switch (errCode) {
@@ -173,6 +176,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "No columns can be dropped";
case TSDB_CODE_PAR_INVALID_COL_JSON:
return "Only tag can be json type";
+ case TSDB_CODE_PAR_VALUE_TOO_LONG:
+ return "Value too long for column/tag: %s";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:
@@ -253,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo;
}
-static uint32_t getTableMetaSize(const STableMeta* pTableMeta) {
- int32_t totalCols = 0;
- if (pTableMeta->tableInfo.numOfColumns >= 0) {
- totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
- }
-
- return sizeof(STableMeta) + totalCols * sizeof(SSchema);
-}
-
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
- size_t size = getTableMetaSize(pTableMeta);
+ size_t size = TABLE_META_SIZE(pTableMeta);
STableMeta* p = taosMemoryMalloc(size);
memcpy(p, pTableMeta, size);
@@ -326,33 +322,35 @@ static bool isValidateTag(char* input) {
return true;
}
-int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) {
+int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag **ppTag, SMsgBuf* pMsgBuf) {
+ int32_t retCode = TSDB_CODE_SUCCESS;
+ cJSON* root = NULL;
+ SHashObj* keyHash = NULL;
+ int32_t size = 0;
// set json NULL data
- uint8_t jsonNULL = TSDB_DATA_TYPE_NULL;
- int jsonIndex = startColId + 1;
- if (!json || strtrim((char*)json) == 0 ||strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) {
- tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES);
- return TSDB_CODE_SUCCESS;
+ if (!json || strtrim((char*)json) == 0 || strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) {
+ retCode = TSDB_CODE_SUCCESS;
+ goto end;
}
// set json real data
- cJSON* root = cJSON_Parse(json);
+ root = cJSON_Parse(json);
if (root == NULL) {
- return buildSyntaxErrMsg(pMsgBuf, "json parse error", json);
+ retCode = buildSyntaxErrMsg(pMsgBuf, "json parse error", json);
+ goto end;
}
- int size = cJSON_GetArraySize(root);
+ size = cJSON_GetArraySize(root);
if (!cJSON_IsObject(root)) {
- return buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json);
+ retCode = buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json);
+ goto end;
}
- int retCode = 0;
- char* tagKV = NULL;
- SHashObj* keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false);
- for (int i = 0; i < size; i++) {
+ keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false);
+ for (int32_t i = 0; i < size; i++) {
cJSON* item = cJSON_GetArrayItem(root, i);
if (!item) {
- qError("json inner error:%d", i);
+ uError("json inner error:%d", i);
retCode = buildSyntaxErrMsg(pMsgBuf, "json inner error", json);
goto end;
}
@@ -363,86 +361,467 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p
goto end;
}
size_t keyLen = strlen(jsonKey);
- if(keyLen > TSDB_MAX_JSON_KEY_LEN){
- qError("json key too long error");
- retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey);
+ if (keyLen > TSDB_MAX_JSON_KEY_LEN) {
+ uError("json key too long error");
+ retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey);
goto end;
}
if (keyLen == 0 || taosHashGet(keyHash, jsonKey, keyLen) != NULL) {
continue;
}
- // key: keyLen + VARSTR_HEADER_SIZE, value type: CHAR_BYTES, value reserved: DOUBLE_BYTES
- tagKV = taosMemoryCalloc(keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES, 1);
- if (!tagKV) {
- retCode = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- strncpy(varDataVal(tagKV), jsonKey, keyLen);
- varDataSetLen(tagKV, keyLen);
- if (taosHashGetSize(keyHash) == 0) {
- uint8_t jsonNotNULL = TSDB_DATA_TYPE_JSON;
- tdAddColToKVRow(kvRowBuilder, jsonIndex++, &jsonNotNULL, CHAR_BYTES); // add json type
- }
- taosHashPut(keyHash, jsonKey, keyLen, &keyLen,
- CHAR_BYTES); // add key to hash to remove dumplicate, value is useless
+ STagVal val = {0};
+ val.pKey = jsonKey;
+ taosHashPut(keyHash, jsonKey, keyLen, &keyLen, CHAR_BYTES); // add key to hash to remove dumplicate, value is useless
if (item->type == cJSON_String) { // add json value format: type|data
char* jsonValue = item->valuestring;
int32_t valLen = (int32_t)strlen(jsonValue);
- int32_t totalLen = keyLen + VARSTR_HEADER_SIZE + valLen * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE + CHAR_BYTES;
- char* tmp = taosMemoryRealloc(tagKV, totalLen);
+ char* tmp = taosMemoryCalloc(1, valLen * TSDB_NCHAR_SIZE);
if (!tmp) {
retCode = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto end;
}
- tagKV = tmp;
- char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
- char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);
- *valueType = TSDB_DATA_TYPE_NCHAR;
- if (valLen > 0 && !taosMbsToUcs4(jsonValue, valLen, (TdUcs4*)varDataVal(valueData),
+ val.type = TSDB_DATA_TYPE_NCHAR;
+ if (valLen > 0 && !taosMbsToUcs4(jsonValue, valLen, (TdUcs4*)tmp,
(int32_t)(valLen * TSDB_NCHAR_SIZE), &valLen)) {
- qError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue,
+ uError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue,
strerror(errno));
retCode = buildSyntaxErrMsg(pMsgBuf, "charset convert json error", jsonValue);
goto end;
}
-
- varDataSetLen(valueData, valLen);
- tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, totalLen);
+ val.nData = valLen;
+ val.pData = tmp;
} else if (item->type == cJSON_Number) {
if (!isfinite(item->valuedouble)) {
- qError("json value is invalidate");
+ uError("json value is invalidate");
retCode = buildSyntaxErrMsg(pMsgBuf, "json value number is illegal", json);
goto end;
}
- char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
- char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);
- *valueType = TSDB_DATA_TYPE_DOUBLE;
- *((double*)valueData) = item->valuedouble;
- tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES);
+ val.type = TSDB_DATA_TYPE_DOUBLE;
+ *((double*)&(val.i64)) = item->valuedouble;
} else if (item->type == cJSON_True || item->type == cJSON_False) {
- char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
- char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);
- *valueType = TSDB_DATA_TYPE_BOOL;
- *valueData = (char)(item->valueint);
- tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + CHAR_BYTES);
+ val.type = TSDB_DATA_TYPE_BOOL;
+ *((char*)&(val.i64)) = (char)(item->valueint);
} else if (item->type == cJSON_NULL) {
- char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
- *valueType = TSDB_DATA_TYPE_NULL;
- tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);
+ val.type = TSDB_DATA_TYPE_NULL;
} else {
retCode = buildSyntaxErrMsg(pMsgBuf, "invalidate json value", json);
goto end;
}
- }
-
- if (taosHashGetSize(keyHash) == 0) { // set json NULL true
- tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES);
+ taosArrayPush(pTagVals, &val);
}
end:
- taosMemoryFree(tagKV);
taosHashCleanup(keyHash);
+ if(retCode == TSDB_CODE_SUCCESS){
+ tTagNew(pTagVals, 1, true, ppTag);
+ }
cJSON_Delete(root);
return retCode;
-}
\ No newline at end of file
+}
+
+static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) {
+ return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type);
+}
+
+static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) {
+ return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type);
+}
+
+static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
+ char* p1 = strchr(pStr, '*');
+ strncpy(pUserAuth->user, pStr, p1 - pStr);
+ ++p1;
+ char* p2 = strchr(p1, '*');
+ strncpy(pUserAuth->dbFName, p1, p2 - p1);
+ ++p2;
+ char buf[10] = {0};
+ strncpy(buf, p2, len - (p2 - pStr));
+ pUserAuth->type = taosStr2Int32(buf, NULL, 10);
+}
+
+static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) {
+ if (NULL != pTablesHash) {
+ *pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName));
+ if (NULL == *pTables) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pTablesHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pKey = taosHashGetKey(p, &len);
+ char fullName[TSDB_TABLE_FNAME_LEN] = {0};
+ strncpy(fullName, pKey, len);
+ SName name = {0};
+ tNameFromString(&name, fullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ taosArrayPush(*pTables, &name);
+ p = taosHashIterate(pTablesHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) {
+ if (NULL != pDbsHash) {
+ *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), TSDB_DB_FNAME_LEN);
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pDbsHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pKey = taosHashGetKey(p, &len);
+ char fullName[TSDB_DB_FNAME_LEN] = {0};
+ strncpy(fullName, pKey, len);
+ taosArrayPush(*pDbs, fullName);
+ p = taosHashIterate(pDbsHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t buildTableMetaReq(SHashObj* pTableMetaHash, SArray** pTableMeta) {
+ return buildTableReq(pTableMetaHash, pTableMeta);
+}
+
+static int32_t buildDbVgroupReq(SHashObj* pDbVgroupHash, SArray** pDbVgroup) {
+ return buildDbReq(pDbVgroupHash, pDbVgroup);
+}
+
+static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVgroup) {
+ return buildTableReq(pTableVgroupHash, pTableVgroup);
+}
+
+static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); }
+
+static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) {
+ if (NULL != pUserAuthHash) {
+ *pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo));
+ if (NULL == *pUserAuth) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pUserAuthHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pKey = taosHashGetKey(p, &len);
+ SUserAuthInfo userAuth = {0};
+ stringToUserAuth(pKey, len, &userAuth);
+ taosArrayPush(*pUserAuth, &userAuth);
+ p = taosHashIterate(pUserAuthHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
+ if (NULL != pUdfHash) {
+ *pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN);
+ if (NULL == *pUdf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pUdfHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pFunc = taosHashGetKey(p, &len);
+ char func[TSDB_FUNC_NAME_LEN] = {0};
+ strncpy(func, pFunc, len);
+ taosArrayPush(*pUdf, func);
+ p = taosHashIterate(pUdfHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildDbVgroupReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf);
+ }
+ return code;
+}
+
+static int32_t putTableMetaToCache(const SArray* pTableMetaReq, const SArray* pTableMetaData, SHashObj* pTableMeta) {
+ int32_t ntables = taosArrayGetSize(pTableMetaReq);
+ for (int32_t i = 0; i < ntables; ++i) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(taosArrayGet(pTableMetaReq, i), fullName);
+ if (TSDB_CODE_SUCCESS !=
+ taosHashPut(pTableMeta, fullName, strlen(fullName), taosArrayGet(pTableMetaData, i), POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putDbVgroupToCache(const SArray* pDbVgroupReq, const SArray* pDbVgroupData, SHashObj* pDbVgroup) {
+ int32_t nvgs = taosArrayGetSize(pDbVgroupReq);
+ for (int32_t i = 0; i < nvgs; ++i) {
+ char* pDbFName = taosArrayGet(pDbVgroupReq, i);
+ if (TSDB_CODE_SUCCESS !=
+ taosHashPut(pDbVgroup, pDbFName, strlen(pDbFName), taosArrayGet(pDbVgroupData, i), POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putTableVgroupToCache(const SArray* pTableVgroupReq, const SArray* pTableVgroupData,
+ SHashObj* pTableVgroup) {
+ int32_t ntables = taosArrayGetSize(pTableVgroupReq);
+ for (int32_t i = 0; i < ntables; ++i) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(taosArrayGet(pTableVgroupReq, i), fullName);
+ SVgroupInfo* pInfo = taosArrayGet(pTableVgroupData, i);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pTableVgroup, fullName, strlen(fullName), &pInfo, POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData, SHashObj* pDbCfg) {
+ int32_t nvgs = taosArrayGetSize(pDbCfgReq);
+ for (int32_t i = 0; i < nvgs; ++i) {
+ char* pDbFName = taosArrayGet(pDbCfgReq, i);
+ SDbCfgInfo* pInfo = taosArrayGet(pDbCfgData, i);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pDbCfg, pDbFName, strlen(pDbFName), &pInfo, POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) {
+ int32_t nvgs = taosArrayGetSize(pUserAuthReq);
+ for (int32_t i = 0; i < nvgs; ++i) {
+ SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i);
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) {
+ int32_t num = taosArrayGetSize(pUdfReq);
+ for (int32_t i = 0; i < num; ++i) {
+ char* pFunc = taosArrayGet(pUdfReq, i);
+ SFuncInfo* pInfo = taosArrayGet(pUdfData, i);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+ int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putDbVgroupToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, pMetaCache->pDbVgroup);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putTableVgroupToCache(pCatalogReq->pTableHash, pMetaData->pTableHash, pMetaCache->pTableVgroup);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf);
+ }
+ return code;
+}
+
+static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) {
+ if (NULL == *pTables) {
+ *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == *pTables) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return taosHashPut(*pTables, pTbFName, len, &pTables, POINTER_BYTES);
+}
+
+static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable);
+ return reserveTableReqInCacheImpl(fullName, len, pTables);
+}
+
+int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
+ return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta);
+}
+
+int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(pName, fullName);
+ return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableMeta);
+}
+
+int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(pName, fullName);
+ STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pMeta = tableMetaDup(*pRes);
+ if (NULL == *pMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) {
+ if (NULL == *pDbs) {
+ *pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb);
+ return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES);
+}
+
+int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup);
+}
+
+int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
+ SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
+ if (NULL == pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ // *pRes is null, which is a legal value, indicating that the user DB has not been created
+ if (NULL != *pRes) {
+ *pVgInfo = taosArrayDup(*pRes);
+ if (NULL == *pVgInfo) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
+ return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup);
+}
+
+int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(pName, fullName);
+ return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableVgroup);
+}
+
+int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ tNameExtractFullName(pName, fullName);
+ SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ memcpy(pVgroup, *pRes, sizeof(SVgroupInfo));
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
+}
+
+int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
+ int32_t* pTableNum) {
+ SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pVersion = (*pRes)->vgVer;
+ *pDbId = (*pRes)->dbId;
+ *pTableNum = (*pRes)->tbNum;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
+}
+
+int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
+ SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ memcpy(pInfo, *pRes, sizeof(SDbCfgInfo));
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveUserAuthInCacheImpl(const char* pKey, int32_t len, SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pUserAuth) {
+ pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pUserAuth) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ bool pass = false;
+ return taosHashPut(pMetaCache->pUserAuth, pKey, len, &pass, sizeof(pass));
+}
+
+int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
+ SParseMetaCache* pMetaCache) {
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToString(acctId, pUser, pDb, type, key);
+ return reserveUserAuthInCacheImpl(key, len, pMetaCache);
+}
+
+int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache) {
+ char dbFName[TSDB_DB_FNAME_LEN] = {0};
+ tNameGetFullDbName(pName, dbFName);
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToStringExt(pUser, dbFName, type, key);
+ return reserveUserAuthInCacheImpl(key, len, pMetaCache);
+}
+
+int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
+ bool* pPass) {
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToStringExt(pUser, pDbFName, type, key);
+ bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len);
+ if (NULL == pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pPass = *pRes;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pUdf) {
+ pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pUdf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES);
+}
+
+int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) {
+ SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ memcpy(pInfo, *pRes, sizeof(SFuncInfo));
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 688e20063a4f02f3b077b116e1b702c428562c71..c2e1eba4727281a54b778d64afc25ea159a11880 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -19,7 +19,7 @@
#include "parInt.h"
#include "parToken.h"
-bool isInsertSql(const char* pStr, size_t length) {
+bool qIsInsertSql(const char* pStr, size_t length) {
if (NULL == pStr) {
return false;
}
@@ -34,22 +34,35 @@ bool isInsertSql(const char* pStr, size_t length) {
} while (1);
}
-static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
- int32_t code = parse(pCxt, pQuery);
- if (TSDB_CODE_SUCCESS == code) {
- code = authenticate(pCxt, *pQuery);
- }
+static int32_t analyseSemantic(SParseContext* pCxt, SQuery* pQuery) {
+ int32_t code = authenticate(pCxt, pQuery);
- if (TSDB_CODE_SUCCESS == code && (*pQuery)->placeholderNum > 0) {
- TSWAP((*pQuery)->pPrepareRoot, (*pQuery)->pRoot);
+ if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) {
+ TSWAP(pQuery->pPrepareRoot, pQuery->pRoot);
return TSDB_CODE_SUCCESS;
}
if (TSDB_CODE_SUCCESS == code) {
- code = translate(pCxt, *pQuery);
+ code = translate(pCxt, pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
- code = calculateConstant(pCxt, *pQuery);
+ code = calculateConstant(pCxt, pQuery);
+ }
+ return code;
+}
+
+static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
+ int32_t code = parse(pCxt, pQuery);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = analyseSemantic(pCxt, *pQuery);
+ }
+ return code;
+}
+
+static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery) {
+ int32_t code = parse(pCxt, pQuery);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKey(pCxt, *pQuery);
}
return code;
}
@@ -63,28 +76,8 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes);
pVal->node.resType.type = pParam->buffer_type;
pVal->node.resType.bytes = inputSize;
+
switch (pParam->buffer_type) {
- case TSDB_DATA_TYPE_BOOL:
- pVal->datum.b = *((bool*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_TINYINT:
- pVal->datum.i = *((int8_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- pVal->datum.i = *((int16_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_INT:
- pVal->datum.i = *((int32_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_BIGINT:
- pVal->datum.i = *((int64_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_FLOAT:
- pVal->datum.d = *((float*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- pVal->datum.d = *((double*)pParam->buffer);
- break;
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
pVal->datum.p = taosMemoryCalloc(1, pVal->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
@@ -93,6 +86,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
varDataSetLen(pVal->datum.p, pVal->node.resType.bytes);
strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes);
+ pVal->node.resType.bytes += VARSTR_HEADER_SIZE;
break;
case TSDB_DATA_TYPE_NCHAR: {
pVal->node.resType.bytes *= TSDB_NCHAR_SIZE;
@@ -107,31 +101,16 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
return errno;
}
varDataSetLen(pVal->datum.p, output);
- pVal->node.resType.bytes = output;
+ pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE;
break;
}
- case TSDB_DATA_TYPE_TIMESTAMP:
- pVal->datum.i = *((int64_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_UTINYINT:
- pVal->datum.u = *((uint8_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_USMALLINT:
- pVal->datum.u = *((uint16_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_UINT:
- pVal->datum.u = *((uint32_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_UBIGINT:
- pVal->datum.u = *((uint64_t*)pParam->buffer);
- break;
- case TSDB_DATA_TYPE_JSON:
- case TSDB_DATA_TYPE_DECIMAL:
- case TSDB_DATA_TYPE_BLOB:
- case TSDB_DATA_TYPE_MEDIUMBLOB:
- // todo
- default:
+ default: {
+ int32_t code = nodesSetValueNodeValue(pVal, pParam->buffer);
+ if (code) {
+ return code;
+ }
break;
+ }
}
pVal->translate = true;
return TSDB_CODE_SUCCESS;
@@ -169,7 +148,7 @@ static void rewriteExprAlias(SNode* pRoot) {
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = TSDB_CODE_SUCCESS;
- if (isInsertSql(pCxt->pSql, pCxt->sqlLen)) {
+ if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
code = parseInsertSql(pCxt, pQuery);
} else {
code = parseSqlIntoAst(pCxt, pQuery);
@@ -178,12 +157,47 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
return code;
}
+int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
+ code = parseInsertSyntax(pCxt, pQuery);
+ } else {
+ code = parseSqlSyntax(pCxt, pQuery);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq);
+ }
+ terrno = code;
+ return code;
+}
+
+int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
+ const struct SMetaData* pMetaData, SQuery* pQuery) {
+ int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache);
+ if (NULL == pQuery->pRoot) {
+ return parseInsertSql(pCxt, &pQuery);
+ }
+ return analyseSemantic(pCxt, pQuery);
+}
+
void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); }
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) {
return extractResultSchema(pRoot, numOfCols, pSchema);
}
+int32_t qSetSTableIdForRSma(SNode* pStmt, int64_t uid) {
+ if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
+ SNode* pTable = ((SSelectStmt*)pStmt)->pFromTable;
+ if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
+ ((SRealTableNode*)pTable)->pMeta->uid = uid;
+ ((SRealTableNode*)pTable)->pMeta->suid = uid;
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+ return TSDB_CODE_FAILED;
+}
+
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx) {
int32_t code = TSDB_CODE_SUCCESS;
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index 0854bb83e471151e83efd066192ee576561b28ee..ff4fe4032e9be6ab95696bf41d6e1f398983e7b1 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -32,11 +32,15 @@
#include
#include
+#define ALLOW_FORBID_FUNC
+
#include "functionMgt.h"
#include "nodes.h"
#include "parToken.h"
#include "ttokendef.h"
#include "parAst.h"
+
+#define YYSTACKDEPTH 0
/**************** End of %include directives **********************************/
/* These constants specify the various numeric values for terminal symbols
** in a format understandable to "makeheaders". This section is blank unless
@@ -100,25 +104,25 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 358
+#define YYNOCODE 357
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- EOrder yy14;
- ENullOrder yy17;
- SNodeList* yy60;
- SToken yy105;
- int32_t yy140;
- SNode* yy172;
- EFillMode yy202;
- SDataType yy248;
- EOperatorType yy572;
- int64_t yy593;
- SAlterOption yy609;
- bool yy617;
- EJoinType yy636;
+ SAlterOption yy53;
+ ENullOrder yy109;
+ SToken yy113;
+ EJoinType yy120;
+ int64_t yy123;
+ bool yy131;
+ EOrder yy428;
+ SDataType yy490;
+ EFillMode yy522;
+ int32_t yy550;
+ EOperatorType yy632;
+ SNodeList* yy670;
+ SNode* yy686;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -134,17 +138,18 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 605
-#define YYNRULE 452
-#define YYNTOKEN 238
-#define YY_MAX_SHIFT 604
-#define YY_MIN_SHIFTREDUCE 893
-#define YY_MAX_SHIFTREDUCE 1344
-#define YY_ERROR_ACTION 1345
-#define YY_ACCEPT_ACTION 1346
-#define YY_NO_ACTION 1347
-#define YY_MIN_REDUCE 1348
-#define YY_MAX_REDUCE 1799
+#define YYNSTATE 612
+#define YYNRULE 451
+#define YYNRULE_WITH_ACTION 451
+#define YYNTOKEN 237
+#define YY_MAX_SHIFT 611
+#define YY_MIN_SHIFTREDUCE 898
+#define YY_MAX_SHIFTREDUCE 1348
+#define YY_ERROR_ACTION 1349
+#define YY_ACCEPT_ACTION 1350
+#define YY_NO_ACTION 1351
+#define YY_MIN_REDUCE 1352
+#define YY_MAX_REDUCE 1802
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -211,601 +216,622 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2154)
+#define YY_ACTTAB_COUNT (2125)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 1467, 1777, 1777, 1646, 383, 1634, 384, 1380, 292, 11,
- /* 10 */ 10, 343, 35, 33, 1776, 146, 24, 923, 1774, 1774,
- /* 20 */ 301, 391, 1159, 384, 1380, 1631, 36, 34, 32, 31,
- /* 30 */ 30, 1662, 26, 36, 34, 32, 31, 30, 518, 503,
- /* 40 */ 1627, 1633, 36, 34, 32, 31, 30, 1157, 1346, 502,
- /* 50 */ 1777, 522, 130, 1617, 1360, 927, 928, 518, 14, 483,
- /* 60 */ 35, 33, 1285, 145, 1165, 28, 223, 1774, 301, 1675,
- /* 70 */ 1159, 349, 80, 1647, 505, 1649, 1650, 501, 77, 522,
- /* 80 */ 1, 62, 1715, 1777, 1181, 519, 273, 1711, 518, 1261,
- /* 90 */ 1634, 113, 398, 309, 108, 1157, 1775, 104, 1777, 1470,
- /* 100 */ 1774, 1646, 601, 1473, 419, 271, 14, 317, 35, 33,
- /* 110 */ 1631, 147, 1165, 1158, 1478, 1774, 301, 38, 1159, 36,
- /* 120 */ 34, 32, 31, 30, 388, 1627, 1633, 56, 2, 1662,
- /* 130 */ 1181, 36, 34, 32, 31, 30, 522, 503, 36, 34,
- /* 140 */ 32, 31, 30, 1157, 55, 1523, 1777, 502, 39, 131,
- /* 150 */ 601, 1617, 291, 1435, 14, 1371, 1160, 1521, 1662, 145,
- /* 160 */ 1165, 1158, 559, 1774, 1450, 274, 472, 1675, 140, 1341,
- /* 170 */ 132, 1647, 505, 1649, 1650, 501, 2, 522, 1163, 1164,
- /* 180 */ 1517, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520,
- /* 190 */ 1224, 1225, 1226, 1227, 1228, 1229, 1247, 1410, 601, 519,
- /* 200 */ 1299, 471, 1183, 55, 1160, 1617, 1456, 1196, 148, 1158,
- /* 210 */ 473, 347, 447, 94, 484, 1791, 93, 92, 91, 90,
- /* 220 */ 89, 88, 87, 86, 85, 1729, 1163, 1164, 1478, 1209,
- /* 230 */ 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, 1224, 1225,
- /* 240 */ 1226, 1227, 1228, 1229, 1454, 148, 1248, 479, 1523, 1726,
- /* 250 */ 1340, 1777, 1160, 597, 596, 306, 1309, 433, 432, 55,
- /* 260 */ 1521, 1523, 431, 398, 145, 109, 428, 1253, 1774, 427,
- /* 270 */ 426, 425, 148, 1522, 1163, 1164, 112, 1209, 1210, 1212,
- /* 280 */ 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, 1226, 1227,
- /* 290 */ 1228, 1229, 35, 33, 1349, 465, 1307, 1308, 1310, 1311,
- /* 300 */ 301, 556, 1159, 27, 299, 1242, 1243, 1244, 1245, 1246,
- /* 310 */ 1250, 1251, 1252, 110, 939, 94, 62, 1469, 93, 92,
- /* 320 */ 91, 90, 89, 88, 87, 86, 85, 1157, 143, 1722,
- /* 330 */ 1723, 148, 1727, 519, 1184, 519, 479, 1631, 1474, 417,
- /* 340 */ 35, 33, 1230, 506, 1165, 348, 304, 104, 301, 1568,
- /* 350 */ 1159, 55, 1627, 1633, 424, 36, 34, 32, 31, 30,
- /* 360 */ 8, 479, 1478, 522, 1478, 112, 36, 34, 32, 31,
- /* 370 */ 30, 1556, 433, 432, 1635, 1157, 154, 431, 156, 1646,
- /* 380 */ 109, 428, 601, 519, 427, 426, 425, 148, 35, 33,
- /* 390 */ 112, 556, 1165, 1158, 1631, 358, 301, 305, 1159, 1523,
- /* 400 */ 60, 274, 110, 59, 1182, 128, 312, 1662, 9, 1627,
- /* 410 */ 1633, 1521, 1478, 1185, 1480, 503, 481, 142, 1722, 1723,
- /* 420 */ 522, 1727, 468, 1157, 1196, 502, 342, 110, 341, 1617,
- /* 430 */ 601, 313, 1247, 64, 289, 1397, 1160, 188, 1559, 1561,
- /* 440 */ 1165, 1158, 144, 1722, 1723, 1675, 1727, 548, 263, 1647,
- /* 450 */ 505, 1649, 1650, 501, 1370, 522, 9, 434, 1163, 1164,
- /* 460 */ 334, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520,
- /* 470 */ 1224, 1225, 1226, 1227, 1228, 1229, 283, 311, 601, 148,
- /* 480 */ 336, 332, 1248, 1120, 1160, 128, 475, 455, 148, 1158,
- /* 490 */ 373, 1122, 474, 469, 1480, 1463, 519, 36, 34, 32,
- /* 500 */ 31, 30, 1369, 1253, 1617, 1465, 1163, 1164, 359, 1209,
- /* 510 */ 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, 1224, 1225,
- /* 520 */ 1226, 1227, 1228, 1229, 284, 1478, 282, 281, 1368, 421,
- /* 530 */ 558, 1292, 1160, 423, 158, 157, 456, 1183, 214, 27,
- /* 540 */ 299, 1242, 1243, 1244, 1245, 1246, 1250, 1251, 1252, 1646,
- /* 550 */ 382, 1121, 1617, 386, 1163, 1164, 422, 1209, 1210, 1212,
- /* 560 */ 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, 1226, 1227,
- /* 570 */ 1228, 1229, 35, 33, 270, 1777, 1181, 1662, 1617, 519,
- /* 580 */ 301, 314, 1159, 366, 1249, 482, 378, 1461, 145, 128,
- /* 590 */ 554, 397, 1774, 390, 423, 502, 386, 1235, 1480, 1617,
- /* 600 */ 940, 54, 939, 1183, 379, 1254, 70, 1157, 1478, 553,
- /* 610 */ 552, 1367, 551, 550, 549, 1675, 1366, 422, 81, 1647,
- /* 620 */ 505, 1649, 1650, 501, 1165, 522, 1365, 1471, 1715, 941,
- /* 630 */ 127, 1186, 294, 1711, 141, 1407, 32, 31, 30, 191,
- /* 640 */ 2, 25, 1029, 545, 544, 543, 1033, 542, 1035, 1036,
- /* 650 */ 541, 1038, 538, 1743, 1044, 535, 1046, 1047, 532, 529,
- /* 660 */ 497, 1617, 601, 1364, 1363, 1362, 1617, 1359, 1358, 1357,
- /* 670 */ 1356, 1355, 1354, 1158, 377, 1353, 1617, 372, 371, 370,
- /* 680 */ 369, 368, 365, 364, 363, 362, 361, 357, 356, 355,
- /* 690 */ 354, 353, 352, 351, 350, 577, 576, 575, 316, 1211,
- /* 700 */ 574, 573, 572, 114, 567, 566, 565, 564, 563, 562,
- /* 710 */ 561, 560, 121, 1617, 1617, 1617, 1160, 1617, 1617, 1617,
- /* 720 */ 1617, 1617, 1617, 1352, 7, 1617, 128, 1351, 430, 429,
- /* 730 */ 1646, 571, 569, 1560, 1561, 1481, 927, 928, 1163, 1164,
- /* 740 */ 1729, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520,
- /* 750 */ 1224, 1225, 1226, 1227, 1228, 1229, 199, 129, 1662, 519,
- /* 760 */ 1284, 1646, 252, 1729, 1725, 1211, 503, 991, 1159, 519,
- /* 770 */ 1183, 1475, 1606, 1617, 250, 53, 502, 1617, 52, 506,
- /* 780 */ 1617, 1597, 1734, 1280, 993, 1569, 483, 1724, 1478, 1662,
- /* 790 */ 1144, 1145, 487, 1157, 479, 159, 1675, 482, 1478, 80,
- /* 800 */ 1647, 505, 1649, 1650, 501, 246, 522, 502, 1508, 1715,
- /* 810 */ 1165, 1617, 519, 273, 1711, 570, 454, 324, 179, 55,
- /* 820 */ 1168, 177, 485, 112, 516, 1777, 181, 1675, 490, 180,
- /* 830 */ 81, 1647, 505, 1649, 1650, 501, 1637, 522, 145, 519,
- /* 840 */ 1715, 1478, 1774, 483, 294, 1711, 141, 183, 601, 185,
- /* 850 */ 182, 517, 184, 445, 495, 337, 79, 1646, 215, 1158,
- /* 860 */ 110, 519, 519, 547, 461, 1742, 443, 964, 1478, 47,
- /* 870 */ 272, 118, 1639, 236, 315, 212, 1722, 478, 1392, 477,
- /* 880 */ 11, 10, 1777, 1390, 965, 1662, 1171, 58, 57, 346,
- /* 890 */ 1478, 1478, 153, 503, 1280, 147, 1361, 340, 46, 1774,
- /* 900 */ 436, 1167, 1160, 502, 202, 439, 37, 1617, 1436, 269,
- /* 910 */ 37, 457, 330, 37, 326, 322, 150, 225, 1343, 1344,
- /* 920 */ 1646, 1455, 116, 1675, 1163, 1164, 81, 1647, 505, 1649,
- /* 930 */ 1650, 501, 1211, 522, 218, 117, 1715, 466, 1306, 76,
- /* 940 */ 294, 1711, 1790, 448, 204, 118, 1255, 148, 1662, 72,
- /* 950 */ 1217, 1749, 1663, 1115, 1381, 46, 503, 227, 527, 209,
- /* 960 */ 416, 174, 511, 480, 1518, 1283, 502, 1170, 1745, 217,
- /* 970 */ 1617, 117, 1646, 139, 1239, 233, 220, 488, 222, 415,
- /* 980 */ 411, 407, 403, 173, 118, 1022, 1675, 119, 117, 81,
- /* 990 */ 1647, 505, 1649, 1650, 501, 245, 522, 1453, 1050, 1715,
- /* 1000 */ 1662, 3, 1181, 294, 1711, 1790, 319, 63, 503, 323,
- /* 1010 */ 171, 1054, 991, 554, 1772, 491, 280, 279, 502, 241,
- /* 1020 */ 1128, 155, 1617, 360, 1061, 1558, 367, 1059, 120, 375,
- /* 1030 */ 374, 1646, 553, 552, 380, 551, 550, 549, 1675, 1187,
- /* 1040 */ 376, 81, 1647, 505, 1649, 1650, 501, 438, 522, 381,
- /* 1050 */ 389, 1715, 1190, 162, 392, 294, 1711, 1790, 393, 1662,
- /* 1060 */ 1189, 164, 446, 394, 166, 395, 1733, 503, 170, 1188,
- /* 1070 */ 165, 396, 167, 399, 169, 61, 187, 502, 418, 172,
- /* 1080 */ 1165, 1617, 1646, 420, 1468, 176, 1464, 483, 441, 554,
- /* 1090 */ 84, 242, 163, 435, 288, 1601, 178, 1675, 186, 1646,
- /* 1100 */ 259, 1647, 505, 1649, 1650, 501, 122, 522, 553, 552,
- /* 1110 */ 1662, 551, 550, 549, 123, 1466, 1462, 124, 503, 125,
- /* 1120 */ 449, 189, 51, 453, 450, 50, 1777, 1662, 502, 243,
- /* 1130 */ 458, 192, 1617, 194, 1186, 503, 197, 459, 483, 147,
- /* 1140 */ 1746, 467, 509, 1774, 1756, 502, 6, 200, 1675, 1617,
- /* 1150 */ 463, 259, 1647, 505, 1649, 1650, 501, 1646, 522, 464,
- /* 1160 */ 476, 5, 1736, 203, 1755, 1675, 293, 210, 82, 1647,
- /* 1170 */ 505, 1649, 1650, 501, 1280, 522, 111, 1777, 1715, 470,
- /* 1180 */ 208, 1185, 1714, 1711, 1348, 1662, 40, 211, 492, 1646,
- /* 1190 */ 145, 1773, 135, 503, 1774, 1730, 295, 489, 18, 1567,
- /* 1200 */ 1793, 507, 508, 502, 1566, 512, 303, 1617, 103, 102,
- /* 1210 */ 101, 100, 99, 98, 97, 96, 95, 1662, 216, 479,
- /* 1220 */ 1696, 513, 229, 1675, 219, 500, 82, 1647, 505, 1649,
- /* 1230 */ 1650, 501, 514, 522, 231, 502, 1715, 69, 486, 1617,
- /* 1240 */ 494, 1711, 1646, 493, 221, 244, 1479, 71, 112, 525,
- /* 1250 */ 1451, 247, 600, 238, 48, 1675, 134, 253, 267, 1647,
- /* 1260 */ 505, 1649, 1650, 501, 499, 522, 496, 1687, 483, 290,
- /* 1270 */ 1662, 260, 249, 254, 251, 1611, 1610, 318, 503, 1607,
- /* 1280 */ 320, 321, 1153, 1154, 151, 110, 325, 1605, 502, 327,
- /* 1290 */ 328, 329, 1617, 1604, 331, 1603, 333, 1602, 335, 1646,
- /* 1300 */ 212, 1722, 478, 1587, 477, 152, 339, 1777, 1675, 338,
- /* 1310 */ 1131, 82, 1647, 505, 1649, 1650, 501, 1581, 522, 1130,
- /* 1320 */ 145, 1715, 1580, 344, 1774, 345, 1712, 1662, 604, 1579,
- /* 1330 */ 1578, 1551, 1098, 1550, 1549, 503, 1548, 1547, 1546, 1545,
- /* 1340 */ 1544, 1543, 240, 1100, 115, 502, 1646, 1542, 1541, 1617,
- /* 1350 */ 1540, 1539, 462, 1538, 105, 1537, 1536, 1535, 1534, 1533,
- /* 1360 */ 593, 589, 585, 581, 239, 1675, 1532, 1531, 268, 1647,
- /* 1370 */ 505, 1649, 1650, 501, 1662, 522, 1530, 1529, 1528, 1527,
- /* 1380 */ 1526, 1525, 503, 1524, 1409, 1646, 1377, 138, 78, 160,
- /* 1390 */ 1376, 234, 502, 1595, 1589, 1573, 1617, 106, 385, 930,
- /* 1400 */ 161, 929, 107, 1564, 1457, 387, 168, 1408, 1406, 401,
- /* 1410 */ 400, 958, 1675, 1662, 1404, 132, 1647, 505, 1649, 1650,
- /* 1420 */ 501, 503, 522, 1402, 1400, 515, 402, 1389, 406, 404,
- /* 1430 */ 1388, 502, 175, 405, 410, 1617, 1646, 414, 298, 408,
- /* 1440 */ 1375, 409, 1459, 1458, 413, 412, 1398, 1064, 1065, 990,
- /* 1450 */ 460, 1675, 989, 195, 268, 1647, 505, 1649, 1650, 501,
- /* 1460 */ 1792, 522, 45, 988, 1662, 1646, 987, 568, 1393, 570,
- /* 1470 */ 285, 1136, 500, 190, 286, 1391, 984, 287, 983, 1374,
- /* 1480 */ 982, 440, 502, 437, 442, 1373, 1617, 444, 1594, 83,
- /* 1490 */ 1588, 1138, 451, 1662, 1572, 1571, 126, 1646, 1563, 4,
- /* 1500 */ 65, 503, 1675, 196, 37, 267, 1647, 505, 1649, 1650,
- /* 1510 */ 501, 502, 522, 49, 1688, 1617, 452, 193, 300, 15,
- /* 1520 */ 201, 43, 1305, 206, 41, 1662, 1298, 133, 207, 205,
- /* 1530 */ 22, 1675, 23, 503, 268, 1647, 505, 1649, 1650, 501,
- /* 1540 */ 1637, 522, 1277, 502, 66, 213, 198, 1617, 1276, 42,
- /* 1550 */ 302, 136, 1334, 1646, 16, 17, 13, 1323, 1329, 10,
- /* 1560 */ 1328, 19, 296, 1675, 1333, 1332, 268, 1647, 505, 1649,
- /* 1570 */ 1650, 501, 297, 522, 1219, 137, 149, 29, 1204, 510,
- /* 1580 */ 1218, 1662, 12, 20, 1646, 21, 226, 1240, 1562, 503,
- /* 1590 */ 504, 224, 230, 232, 72, 1636, 235, 1303, 1175, 502,
- /* 1600 */ 1221, 228, 67, 1617, 68, 526, 1678, 521, 44, 310,
- /* 1610 */ 1051, 1048, 1662, 1646, 524, 528, 530, 531, 533, 1675,
- /* 1620 */ 503, 1045, 255, 1647, 505, 1649, 1650, 501, 534, 522,
- /* 1630 */ 502, 1039, 536, 537, 1617, 1037, 539, 1043, 1042, 540,
- /* 1640 */ 1041, 1662, 1040, 1028, 73, 74, 1060, 75, 1057, 503,
- /* 1650 */ 1675, 1056, 546, 262, 1647, 505, 1649, 1650, 501, 502,
- /* 1660 */ 522, 956, 1646, 1617, 555, 557, 237, 997, 308, 307,
- /* 1670 */ 978, 977, 973, 1646, 1058, 976, 994, 975, 1173, 1675,
- /* 1680 */ 974, 972, 264, 1647, 505, 1649, 1650, 501, 971, 522,
- /* 1690 */ 1662, 992, 968, 967, 966, 963, 962, 1405, 503, 961,
- /* 1700 */ 578, 1662, 579, 1166, 580, 1403, 582, 583, 502, 503,
- /* 1710 */ 584, 1401, 1617, 586, 587, 588, 1399, 590, 592, 502,
- /* 1720 */ 1165, 591, 1387, 1617, 1646, 1386, 594, 1372, 1675, 595,
- /* 1730 */ 598, 256, 1647, 505, 1649, 1650, 501, 1161, 522, 1675,
- /* 1740 */ 599, 603, 265, 1647, 505, 1649, 1650, 501, 248, 522,
- /* 1750 */ 602, 1347, 1662, 1646, 1347, 1347, 1347, 1347, 523, 1347,
- /* 1760 */ 503, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1169,
- /* 1770 */ 502, 1347, 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347,
- /* 1780 */ 1347, 1662, 1347, 1347, 1347, 1646, 1347, 1347, 1347, 503,
- /* 1790 */ 1675, 1347, 1347, 257, 1647, 505, 1649, 1650, 501, 502,
- /* 1800 */ 522, 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 1810 */ 1347, 1347, 1174, 1662, 1347, 1347, 1347, 1347, 1347, 1675,
- /* 1820 */ 1347, 503, 266, 1647, 505, 1649, 1650, 501, 1347, 522,
- /* 1830 */ 1347, 502, 1347, 1347, 1177, 1617, 1347, 1347, 1347, 1347,
- /* 1840 */ 1347, 1646, 1347, 1347, 1347, 520, 1224, 1225, 1347, 1347,
- /* 1850 */ 1347, 1675, 1347, 1347, 258, 1647, 505, 1649, 1650, 501,
- /* 1860 */ 1347, 522, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1662,
- /* 1870 */ 1347, 1347, 1646, 1347, 1347, 1347, 1347, 503, 1347, 1347,
- /* 1880 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 502, 1347, 1347,
- /* 1890 */ 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 1900 */ 1662, 1646, 1347, 1347, 1347, 1347, 1347, 1675, 503, 1347,
- /* 1910 */ 1658, 1647, 505, 1649, 1650, 501, 1347, 522, 502, 1347,
- /* 1920 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1662,
- /* 1930 */ 1347, 1347, 1646, 1347, 1347, 1347, 1347, 503, 1675, 1347,
- /* 1940 */ 1347, 1657, 1647, 505, 1649, 1650, 501, 502, 522, 1347,
- /* 1950 */ 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 1960 */ 1662, 1646, 1347, 1347, 1347, 1347, 1347, 1675, 503, 1347,
- /* 1970 */ 1656, 1647, 505, 1649, 1650, 501, 1347, 522, 502, 1347,
- /* 1980 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1662,
- /* 1990 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 503, 1675, 1347,
- /* 2000 */ 1347, 277, 1647, 505, 1649, 1650, 501, 502, 522, 1347,
- /* 2010 */ 1347, 1617, 1646, 1347, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 2020 */ 1347, 1347, 1347, 1646, 1347, 1347, 1347, 1675, 1347, 1347,
- /* 2030 */ 276, 1647, 505, 1649, 1650, 501, 1347, 522, 1347, 1347,
- /* 2040 */ 1662, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 503, 1347,
- /* 2050 */ 1347, 1662, 1347, 1347, 1347, 1347, 1347, 1347, 502, 503,
- /* 2060 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 502,
- /* 2070 */ 1347, 1347, 1347, 1617, 1347, 1347, 1347, 1646, 1675, 1347,
- /* 2080 */ 1347, 278, 1647, 505, 1649, 1650, 501, 1347, 522, 1675,
- /* 2090 */ 1347, 1347, 275, 1647, 505, 1649, 1650, 501, 1347, 522,
- /* 2100 */ 1347, 1347, 1347, 1347, 1347, 1662, 1347, 1347, 1347, 1347,
- /* 2110 */ 1347, 1347, 1347, 503, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 2120 */ 1347, 1347, 1347, 502, 1347, 1347, 1347, 1617, 1347, 1347,
- /* 2130 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347,
- /* 2140 */ 1347, 1347, 1347, 1675, 1347, 1347, 261, 1647, 505, 1649,
- /* 2150 */ 1650, 501, 1347, 522,
+ /* 0 */ 132, 1780, 345, 1636, 1440, 1636, 294, 385, 311, 386,
+ /* 10 */ 1384, 78, 35, 33, 1779, 1472, 24, 1649, 1777, 131,
+ /* 20 */ 303, 1364, 1162, 1633, 114, 1633, 36, 34, 32, 31,
+ /* 30 */ 30, 1780, 1475, 36, 34, 32, 31, 30, 1629, 1635,
+ /* 40 */ 1629, 1635, 1780, 525, 147, 1665, 928, 1160, 1777, 529,
+ /* 50 */ 525, 529, 1350, 489, 393, 146, 386, 1384, 14, 1777,
+ /* 60 */ 35, 33, 1289, 509, 1168, 56, 384, 1619, 303, 388,
+ /* 70 */ 1162, 36, 34, 32, 31, 30, 36, 34, 32, 31,
+ /* 80 */ 30, 1, 77, 1678, 932, 933, 82, 1650, 512, 1652,
+ /* 90 */ 1653, 508, 73, 529, 1375, 1160, 1718, 1414, 1780, 1296,
+ /* 100 */ 296, 1714, 142, 608, 39, 1186, 14, 1353, 35, 33,
+ /* 110 */ 319, 1778, 1168, 1161, 220, 1777, 303, 277, 1162, 462,
+ /* 120 */ 468, 1745, 36, 34, 32, 31, 30, 71, 95, 2,
+ /* 130 */ 1374, 94, 93, 92, 91, 90, 89, 88, 87, 86,
+ /* 140 */ 525, 1200, 55, 1160, 1619, 315, 1303, 307, 1476, 1251,
+ /* 150 */ 1780, 608, 1563, 1565, 14, 129, 1163, 438, 437, 1780,
+ /* 160 */ 1168, 1161, 436, 146, 1485, 110, 433, 1777, 277, 432,
+ /* 170 */ 431, 430, 146, 945, 497, 944, 1777, 2, 1166, 1167,
+ /* 180 */ 1619, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527,
+ /* 190 */ 1228, 1229, 1230, 1231, 1232, 1233, 286, 1239, 1252, 608,
+ /* 200 */ 1251, 38, 946, 1186, 1163, 55, 62, 95, 149, 1161,
+ /* 210 */ 94, 93, 92, 91, 90, 89, 88, 87, 86, 1257,
+ /* 220 */ 1732, 36, 34, 32, 31, 30, 1166, 1167, 1479, 1213,
+ /* 230 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229,
+ /* 240 */ 1230, 1231, 1232, 1233, 1729, 287, 1373, 285, 284, 1252,
+ /* 250 */ 426, 403, 1163, 1372, 428, 27, 301, 1246, 1247, 1248,
+ /* 260 */ 1249, 1250, 1254, 1255, 1256, 513, 1188, 1215, 306, 149,
+ /* 270 */ 1257, 1572, 28, 228, 1166, 1167, 427, 1213, 1214, 1216,
+ /* 280 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231,
+ /* 290 */ 1232, 1233, 35, 33, 1352, 1313, 1619, 64, 292, 1461,
+ /* 300 */ 303, 192, 1162, 1619, 526, 351, 27, 301, 1246, 1247,
+ /* 310 */ 1248, 1249, 1250, 1254, 1255, 1256, 349, 1189, 104, 103,
+ /* 320 */ 102, 101, 100, 99, 98, 97, 96, 1160, 149, 452,
+ /* 330 */ 560, 1649, 149, 1483, 472, 1311, 1312, 1314, 1315, 275,
+ /* 340 */ 35, 33, 1234, 1162, 1168, 486, 313, 1665, 303, 559,
+ /* 350 */ 1162, 558, 557, 556, 129, 479, 403, 1345, 526, 1665,
+ /* 360 */ 498, 8, 1371, 1485, 1560, 1215, 526, 489, 1160, 1780,
+ /* 370 */ 350, 157, 1527, 392, 113, 1160, 388, 509, 360, 293,
+ /* 380 */ 1186, 1619, 146, 608, 1525, 1168, 1777, 1483, 35, 33,
+ /* 390 */ 478, 219, 1168, 1161, 1370, 1483, 303, 1678, 1162, 1459,
+ /* 400 */ 82, 1650, 512, 1652, 1653, 508, 55, 529, 26, 9,
+ /* 410 */ 1718, 111, 1619, 1288, 296, 1714, 142, 141, 36, 34,
+ /* 420 */ 32, 31, 30, 1160, 608, 488, 143, 1725, 1726, 1521,
+ /* 430 */ 1730, 608, 62, 1369, 1161, 1746, 1163, 1344, 438, 437,
+ /* 440 */ 1168, 1161, 204, 436, 1619, 109, 110, 433, 11, 10,
+ /* 450 */ 432, 431, 430, 480, 1478, 1368, 562, 9, 1166, 1167,
+ /* 460 */ 475, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527,
+ /* 470 */ 1228, 1229, 1230, 1231, 1232, 1233, 1187, 1163, 316, 608,
+ /* 480 */ 344, 336, 343, 1619, 1163, 1460, 129, 997, 149, 1161,
+ /* 490 */ 36, 34, 32, 31, 30, 1485, 1527, 604, 603, 1166,
+ /* 500 */ 1167, 338, 334, 308, 999, 1619, 1166, 1167, 1525, 1213,
+ /* 510 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229,
+ /* 520 */ 1230, 1231, 1232, 1233, 36, 34, 32, 31, 30, 1265,
+ /* 530 */ 481, 476, 1163, 149, 7, 1035, 552, 551, 550, 1039,
+ /* 540 */ 549, 1041, 1042, 548, 1044, 545, 1367, 1050, 542, 1052,
+ /* 550 */ 1053, 539, 536, 1366, 1166, 1167, 1649, 1213, 1214, 1216,
+ /* 560 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231,
+ /* 570 */ 1232, 1233, 35, 33, 274, 375, 1184, 1411, 560, 526,
+ /* 580 */ 303, 526, 1162, 368, 1665, 1732, 380, 250, 155, 390,
+ /* 590 */ 1513, 105, 507, 361, 1200, 1184, 1619, 559, 424, 558,
+ /* 600 */ 557, 556, 509, 1619, 381, 1527, 1619, 1160, 1483, 1728,
+ /* 610 */ 1483, 1363, 314, 60, 1253, 513, 59, 1525, 1287, 159,
+ /* 620 */ 158, 1573, 1678, 128, 1168, 270, 1650, 512, 1652, 1653,
+ /* 630 */ 508, 506, 529, 503, 1690, 1258, 486, 584, 583, 582,
+ /* 640 */ 318, 2, 581, 580, 579, 115, 574, 573, 572, 571,
+ /* 650 */ 570, 569, 568, 567, 122, 563, 1362, 32, 31, 30,
+ /* 660 */ 1186, 1619, 1361, 608, 1360, 113, 435, 434, 562, 578,
+ /* 670 */ 576, 25, 1359, 1161, 379, 1458, 1358, 374, 373, 372,
+ /* 680 */ 371, 370, 367, 366, 365, 364, 363, 359, 358, 357,
+ /* 690 */ 356, 355, 354, 353, 352, 486, 1564, 1565, 526, 932,
+ /* 700 */ 933, 1732, 111, 1527, 1284, 198, 1619, 54, 1357, 1356,
+ /* 710 */ 402, 526, 1619, 1355, 1619, 1526, 1163, 144, 1725, 1726,
+ /* 720 */ 1185, 1730, 1619, 105, 113, 1727, 1619, 1483, 129, 1649,
+ /* 730 */ 429, 55, 566, 65, 1455, 1365, 1474, 1486, 1166, 1167,
+ /* 740 */ 1483, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527,
+ /* 750 */ 1228, 1229, 1230, 1231, 1232, 1233, 1633, 1665, 1619, 1619,
+ /* 760 */ 1649, 111, 1608, 1619, 428, 510, 969, 944, 560, 1737,
+ /* 770 */ 1284, 1629, 1635, 1147, 1148, 509, 145, 1725, 1726, 1619,
+ /* 780 */ 1730, 555, 529, 970, 490, 526, 427, 559, 1665, 558,
+ /* 790 */ 557, 556, 422, 502, 577, 1678, 510, 1480, 81, 1650,
+ /* 800 */ 512, 1652, 1653, 508, 494, 529, 509, 326, 1718, 1468,
+ /* 810 */ 1619, 526, 276, 1714, 1483, 490, 183, 185, 1637, 181,
+ /* 820 */ 184, 1649, 1215, 1599, 1780, 187, 1678, 1470, 186, 81,
+ /* 830 */ 1650, 512, 1652, 1653, 508, 339, 529, 148, 1633, 1718,
+ /* 840 */ 1483, 1777, 1639, 276, 1714, 130, 310, 309, 526, 1665,
+ /* 850 */ 256, 565, 450, 1629, 1635, 1780, 1176, 510, 149, 1466,
+ /* 860 */ 460, 195, 254, 53, 529, 448, 52, 509, 146, 504,
+ /* 870 */ 526, 1619, 1777, 443, 526, 189, 119, 1483, 188, 1641,
+ /* 880 */ 46, 1169, 523, 160, 1649, 207, 524, 1678, 451, 526,
+ /* 890 */ 82, 1650, 512, 1652, 1653, 508, 1171, 529, 1168, 1483,
+ /* 900 */ 1718, 241, 191, 1483, 296, 1714, 1793, 1401, 55, 1396,
+ /* 910 */ 1394, 526, 1665, 1243, 446, 1752, 554, 464, 1483, 440,
+ /* 920 */ 510, 1310, 1441, 317, 190, 37, 209, 492, 1170, 439,
+ /* 930 */ 509, 441, 444, 37, 1619, 1347, 1348, 530, 46, 223,
+ /* 940 */ 1483, 37, 11, 10, 80, 230, 117, 1172, 459, 51,
+ /* 950 */ 1678, 473, 50, 82, 1650, 512, 1652, 1653, 508, 453,
+ /* 960 */ 529, 214, 1174, 1718, 1666, 421, 1259, 296, 1714, 1793,
+ /* 970 */ 1649, 1385, 118, 119, 1221, 58, 57, 348, 1775, 249,
+ /* 980 */ 154, 1522, 1120, 222, 1748, 342, 232, 518, 495, 534,
+ /* 990 */ 1177, 118, 119, 487, 1173, 225, 1184, 273, 1665, 3,
+ /* 1000 */ 332, 1649, 328, 324, 151, 321, 510, 227, 325, 120,
+ /* 1010 */ 282, 997, 1180, 238, 1028, 1131, 509, 246, 118, 283,
+ /* 1020 */ 1619, 362, 1562, 527, 1228, 1229, 156, 369, 377, 1665,
+ /* 1030 */ 1056, 376, 1060, 1066, 378, 149, 1678, 510, 382, 82,
+ /* 1040 */ 1650, 512, 1652, 1653, 508, 1190, 529, 509, 383, 1718,
+ /* 1050 */ 1064, 1619, 391, 296, 1714, 1793, 490, 1193, 486, 121,
+ /* 1060 */ 394, 163, 1649, 395, 1736, 165, 1192, 1678, 1194, 397,
+ /* 1070 */ 261, 1650, 512, 1652, 1653, 508, 396, 529, 168, 399,
+ /* 1080 */ 170, 400, 1191, 401, 173, 61, 425, 113, 404, 176,
+ /* 1090 */ 1665, 1473, 423, 180, 1168, 291, 1780, 1469, 510, 85,
+ /* 1100 */ 247, 454, 1603, 455, 182, 193, 490, 458, 509, 148,
+ /* 1110 */ 123, 124, 1619, 1777, 1471, 1467, 125, 490, 196, 126,
+ /* 1120 */ 461, 199, 202, 1189, 111, 1649, 1759, 466, 1678, 474,
+ /* 1130 */ 516, 261, 1650, 512, 1652, 1653, 508, 1758, 529, 217,
+ /* 1140 */ 1725, 485, 465, 484, 6, 483, 1780, 471, 463, 205,
+ /* 1150 */ 208, 470, 295, 1665, 477, 213, 1649, 1780, 1739, 148,
+ /* 1160 */ 1284, 510, 5, 1777, 1749, 1188, 112, 1733, 40, 136,
+ /* 1170 */ 146, 509, 499, 496, 1777, 1619, 215, 18, 1571, 1570,
+ /* 1180 */ 1796, 514, 519, 297, 1665, 515, 305, 520, 234, 216,
+ /* 1190 */ 521, 1678, 510, 236, 83, 1650, 512, 1652, 1653, 508,
+ /* 1200 */ 1699, 529, 509, 248, 1718, 70, 1619, 72, 1717, 1714,
+ /* 1210 */ 1649, 1484, 251, 607, 532, 1456, 1776, 221, 47, 1649,
+ /* 1220 */ 135, 493, 1678, 243, 224, 83, 1650, 512, 1652, 1653,
+ /* 1230 */ 508, 500, 529, 226, 262, 1718, 272, 263, 1665, 501,
+ /* 1240 */ 1714, 253, 255, 1613, 1612, 320, 510, 1665, 1609, 322,
+ /* 1250 */ 323, 1156, 1157, 152, 327, 510, 509, 1607, 329, 330,
+ /* 1260 */ 1619, 331, 1606, 333, 1605, 509, 335, 1604, 337, 1619,
+ /* 1270 */ 1589, 153, 340, 341, 1134, 1133, 1678, 346, 347, 133,
+ /* 1280 */ 1650, 512, 1652, 1653, 508, 1678, 529, 1583, 83, 1650,
+ /* 1290 */ 512, 1652, 1653, 508, 1582, 529, 611, 1649, 1718, 1103,
+ /* 1300 */ 1555, 1554, 1553, 1715, 1581, 1580, 1552, 1551, 1649, 1550,
+ /* 1310 */ 245, 1549, 1548, 1547, 1546, 1545, 1544, 1543, 1542, 1541,
+ /* 1320 */ 1540, 1539, 106, 491, 1794, 1665, 1538, 1537, 600, 596,
+ /* 1330 */ 592, 588, 244, 510, 1536, 116, 1665, 1535, 1534, 1533,
+ /* 1340 */ 1532, 1531, 1530, 509, 510, 1105, 1529, 1619, 161, 935,
+ /* 1350 */ 469, 1528, 1413, 1381, 509, 1380, 1597, 79, 1619, 107,
+ /* 1360 */ 239, 934, 108, 1678, 1591, 139, 271, 1650, 512, 1652,
+ /* 1370 */ 1653, 508, 387, 529, 1678, 389, 1649, 266, 1650, 512,
+ /* 1380 */ 1652, 1653, 508, 162, 529, 1579, 167, 169, 1578, 1568,
+ /* 1390 */ 1462, 172, 963, 522, 1412, 1410, 407, 405, 1408, 411,
+ /* 1400 */ 1406, 1404, 406, 415, 1665, 409, 410, 413, 414, 419,
+ /* 1410 */ 418, 1393, 510, 1392, 417, 482, 1070, 179, 467, 1379,
+ /* 1420 */ 1464, 200, 509, 1463, 1069, 1649, 1619, 996, 995, 994,
+ /* 1430 */ 993, 990, 575, 45, 577, 1402, 1649, 288, 1397, 1139,
+ /* 1440 */ 289, 194, 1678, 989, 988, 133, 1650, 512, 1652, 1653,
+ /* 1450 */ 508, 442, 529, 1665, 1395, 290, 445, 1378, 447, 1377,
+ /* 1460 */ 1596, 510, 449, 84, 1665, 201, 456, 1577, 1141, 1590,
+ /* 1470 */ 1576, 509, 507, 1575, 1567, 1619, 212, 49, 300, 41,
+ /* 1480 */ 66, 457, 509, 4, 15, 134, 1619, 1649, 37, 48,
+ /* 1490 */ 1795, 1678, 206, 43, 271, 1650, 512, 1652, 1653, 508,
+ /* 1500 */ 1639, 529, 1678, 211, 1309, 270, 1650, 512, 1652, 1653,
+ /* 1510 */ 508, 210, 529, 197, 1691, 1665, 203, 10, 22, 23,
+ /* 1520 */ 42, 1302, 67, 510, 178, 218, 1649, 1281, 1280, 127,
+ /* 1530 */ 137, 1338, 1327, 509, 17, 1333, 140, 1619, 19, 1649,
+ /* 1540 */ 302, 1332, 420, 416, 412, 408, 177, 298, 1337, 1336,
+ /* 1550 */ 299, 1244, 29, 1678, 1665, 138, 271, 1650, 512, 1652,
+ /* 1560 */ 1653, 508, 510, 529, 1223, 1222, 12, 1665, 20, 1208,
+ /* 1570 */ 150, 63, 509, 21, 175, 510, 1619, 229, 1307, 304,
+ /* 1580 */ 231, 1566, 16, 235, 1178, 509, 13, 511, 1649, 1619,
+ /* 1590 */ 233, 517, 1678, 68, 69, 271, 1650, 512, 1652, 1653,
+ /* 1600 */ 508, 237, 529, 1638, 240, 1678, 1225, 73, 257, 1650,
+ /* 1610 */ 512, 1652, 1653, 508, 1681, 529, 1665, 1649, 528, 44,
+ /* 1620 */ 531, 1057, 533, 312, 510, 535, 537, 1054, 1049, 538,
+ /* 1630 */ 540, 174, 1051, 166, 509, 171, 541, 398, 1619, 543,
+ /* 1640 */ 1045, 546, 544, 1034, 547, 1665, 1043, 1048, 1047, 553,
+ /* 1650 */ 74, 75, 1065, 510, 1678, 164, 1649, 265, 1650, 512,
+ /* 1660 */ 1652, 1653, 508, 509, 529, 76, 1063, 1619, 1062, 961,
+ /* 1670 */ 1046, 561, 985, 1003, 564, 242, 983, 982, 981, 980,
+ /* 1680 */ 979, 978, 977, 1678, 1665, 976, 267, 1650, 512, 1652,
+ /* 1690 */ 1653, 508, 510, 529, 998, 973, 972, 971, 968, 967,
+ /* 1700 */ 966, 1000, 509, 1409, 585, 1649, 1619, 586, 587, 1407,
+ /* 1710 */ 589, 590, 591, 1405, 593, 594, 1649, 595, 1403, 597,
+ /* 1720 */ 599, 598, 1678, 1391, 601, 258, 1650, 512, 1652, 1653,
+ /* 1730 */ 508, 1390, 529, 1665, 602, 1376, 605, 606, 1351, 1351,
+ /* 1740 */ 609, 510, 1164, 252, 1665, 610, 1351, 1351, 1351, 1351,
+ /* 1750 */ 1351, 509, 510, 1351, 1351, 1619, 1351, 1351, 1351, 1351,
+ /* 1760 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351,
+ /* 1770 */ 1351, 1678, 1351, 1351, 268, 1650, 512, 1652, 1653, 508,
+ /* 1780 */ 1649, 529, 1678, 1351, 1351, 259, 1650, 512, 1652, 1653,
+ /* 1790 */ 508, 1351, 529, 1665, 1351, 1351, 1351, 1649, 1351, 1351,
+ /* 1800 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351,
+ /* 1810 */ 1351, 509, 1649, 1351, 1351, 1619, 510, 1351, 1351, 1351,
+ /* 1820 */ 1351, 1351, 1351, 1351, 1351, 1665, 509, 1351, 1351, 1649,
+ /* 1830 */ 1619, 1678, 1351, 510, 269, 1650, 512, 1652, 1653, 508,
+ /* 1840 */ 1665, 529, 1351, 509, 1351, 1351, 1678, 1619, 510, 260,
+ /* 1850 */ 1650, 512, 1652, 1653, 508, 1351, 529, 1665, 509, 1351,
+ /* 1860 */ 1351, 1351, 1619, 1678, 1351, 510, 1661, 1650, 512, 1652,
+ /* 1870 */ 1653, 508, 1351, 529, 1351, 509, 1649, 1351, 1678, 1619,
+ /* 1880 */ 1351, 1660, 1650, 512, 1652, 1653, 508, 1351, 529, 1351,
+ /* 1890 */ 1351, 1351, 1351, 1351, 1351, 1678, 1351, 1351, 1659, 1650,
+ /* 1900 */ 512, 1652, 1653, 508, 1665, 529, 1351, 1351, 1649, 1351,
+ /* 1910 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351,
+ /* 1920 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351,
+ /* 1930 */ 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351, 1351, 1351,
+ /* 1940 */ 1351, 1351, 1678, 1351, 510, 280, 1650, 512, 1652, 1653,
+ /* 1950 */ 508, 1351, 529, 1665, 509, 1351, 1351, 1649, 1619, 1351,
+ /* 1960 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351,
+ /* 1970 */ 1351, 509, 1351, 1351, 1678, 1619, 1351, 279, 1650, 512,
+ /* 1980 */ 1652, 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351,
+ /* 1990 */ 1351, 1678, 1351, 510, 281, 1650, 512, 1652, 1653, 508,
+ /* 2000 */ 1351, 529, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351,
+ /* 2010 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 486,
+ /* 2020 */ 1351, 1351, 1351, 1678, 1351, 1351, 278, 1650, 512, 1652,
+ /* 2030 */ 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351, 1351,
+ /* 2040 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 113, 1351,
+ /* 2050 */ 1351, 1351, 509, 1351, 1351, 1351, 1619, 1351, 1351, 1351,
+ /* 2060 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 490, 1351, 1351,
+ /* 2070 */ 1351, 1351, 1678, 1351, 1351, 264, 1650, 512, 1652, 1653,
+ /* 2080 */ 508, 1351, 529, 1351, 1351, 111, 1351, 1351, 1351, 1351,
+ /* 2090 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351,
+ /* 2100 */ 217, 1725, 485, 1351, 484, 1351, 1351, 1780, 1351, 1351,
+ /* 2110 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351,
+ /* 2120 */ 146, 1351, 1351, 1351, 1777,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 270, 336, 336, 241, 244, 271, 246, 247, 274, 1,
- /* 10 */ 2, 297, 12, 13, 349, 349, 2, 4, 353, 353,
- /* 20 */ 20, 244, 22, 246, 247, 291, 12, 13, 14, 15,
- /* 30 */ 16, 269, 2, 12, 13, 14, 15, 16, 20, 277,
- /* 40 */ 306, 307, 12, 13, 14, 15, 16, 47, 238, 287,
- /* 50 */ 336, 317, 240, 291, 242, 42, 43, 20, 58, 297,
- /* 60 */ 12, 13, 14, 349, 64, 321, 322, 353, 20, 307,
- /* 70 */ 22, 248, 310, 311, 312, 313, 314, 315, 251, 317,
- /* 80 */ 80, 253, 320, 336, 20, 248, 324, 325, 20, 81,
- /* 90 */ 271, 264, 57, 274, 266, 47, 349, 260, 336, 272,
- /* 100 */ 353, 241, 102, 275, 267, 282, 58, 297, 12, 13,
- /* 110 */ 291, 349, 64, 113, 277, 353, 20, 80, 22, 12,
- /* 120 */ 13, 14, 15, 16, 14, 306, 307, 4, 80, 269,
- /* 130 */ 20, 12, 13, 14, 15, 16, 317, 277, 12, 13,
- /* 140 */ 14, 15, 16, 47, 80, 269, 336, 287, 80, 254,
- /* 150 */ 102, 291, 276, 258, 58, 241, 156, 281, 269, 349,
- /* 160 */ 64, 113, 257, 353, 259, 58, 277, 307, 268, 148,
- /* 170 */ 310, 311, 312, 313, 314, 315, 80, 317, 178, 179,
- /* 180 */ 280, 181, 182, 183, 184, 185, 186, 187, 188, 189,
- /* 190 */ 190, 191, 192, 193, 194, 195, 89, 0, 102, 248,
- /* 200 */ 81, 312, 20, 80, 156, 291, 0, 81, 208, 113,
- /* 210 */ 20, 260, 297, 21, 354, 355, 24, 25, 26, 27,
- /* 220 */ 28, 29, 30, 31, 32, 308, 178, 179, 277, 181,
+ /* 0 */ 253, 335, 295, 270, 257, 270, 273, 243, 273, 245,
+ /* 10 */ 246, 250, 12, 13, 348, 269, 2, 240, 352, 239,
+ /* 20 */ 20, 241, 22, 290, 263, 290, 12, 13, 14, 15,
+ /* 30 */ 16, 335, 271, 12, 13, 14, 15, 16, 305, 306,
+ /* 40 */ 305, 306, 335, 20, 348, 268, 4, 47, 352, 316,
+ /* 50 */ 20, 316, 237, 276, 243, 348, 245, 246, 58, 352,
+ /* 60 */ 12, 13, 14, 286, 64, 4, 244, 290, 20, 247,
+ /* 70 */ 22, 12, 13, 14, 15, 16, 12, 13, 14, 15,
+ /* 80 */ 16, 81, 81, 306, 42, 43, 309, 310, 311, 312,
+ /* 90 */ 313, 314, 91, 316, 240, 47, 319, 0, 335, 14,
+ /* 100 */ 323, 324, 325, 103, 81, 20, 58, 0, 12, 13,
+ /* 110 */ 295, 348, 64, 113, 337, 352, 20, 58, 22, 295,
+ /* 120 */ 343, 344, 12, 13, 14, 15, 16, 250, 21, 81,
+ /* 130 */ 240, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ /* 140 */ 20, 82, 81, 47, 290, 278, 82, 260, 271, 90,
+ /* 150 */ 335, 103, 285, 286, 58, 268, 156, 60, 61, 335,
+ /* 160 */ 64, 113, 65, 348, 277, 68, 69, 352, 58, 72,
+ /* 170 */ 73, 74, 348, 20, 41, 22, 352, 81, 178, 179,
+ /* 180 */ 290, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ /* 190 */ 190, 191, 192, 193, 194, 195, 35, 14, 139, 103,
+ /* 200 */ 90, 81, 49, 20, 156, 81, 252, 21, 208, 113,
+ /* 210 */ 24, 25, 26, 27, 28, 29, 30, 31, 32, 160,
+ /* 220 */ 307, 12, 13, 14, 15, 16, 178, 179, 274, 181,
/* 230 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
- /* 240 */ 192, 193, 194, 195, 0, 208, 139, 248, 269, 332,
- /* 250 */ 229, 336, 156, 249, 250, 276, 178, 60, 61, 80,
- /* 260 */ 281, 269, 65, 57, 349, 68, 69, 160, 353, 72,
- /* 270 */ 73, 74, 208, 281, 178, 179, 277, 181, 182, 183,
+ /* 240 */ 192, 193, 194, 195, 331, 84, 240, 86, 87, 139,
+ /* 250 */ 89, 57, 156, 240, 93, 196, 197, 198, 199, 200,
+ /* 260 */ 201, 202, 203, 204, 205, 286, 20, 182, 289, 208,
+ /* 270 */ 160, 292, 320, 321, 178, 179, 115, 181, 182, 183,
/* 280 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
- /* 290 */ 194, 195, 12, 13, 0, 217, 218, 219, 220, 221,
- /* 300 */ 20, 57, 22, 196, 197, 198, 199, 200, 201, 202,
- /* 310 */ 203, 204, 205, 314, 22, 21, 253, 271, 24, 25,
- /* 320 */ 26, 27, 28, 29, 30, 31, 32, 47, 329, 330,
- /* 330 */ 331, 208, 333, 248, 20, 248, 248, 291, 275, 47,
- /* 340 */ 12, 13, 14, 287, 64, 260, 290, 260, 20, 293,
- /* 350 */ 22, 80, 306, 307, 267, 12, 13, 14, 15, 16,
- /* 360 */ 80, 248, 277, 317, 277, 277, 12, 13, 14, 15,
- /* 370 */ 16, 277, 60, 61, 271, 47, 55, 65, 284, 241,
- /* 380 */ 68, 69, 102, 248, 72, 73, 74, 208, 12, 13,
- /* 390 */ 277, 57, 64, 113, 291, 260, 20, 261, 22, 269,
- /* 400 */ 79, 58, 314, 82, 20, 269, 276, 269, 80, 306,
- /* 410 */ 307, 281, 277, 20, 278, 277, 328, 329, 330, 331,
- /* 420 */ 317, 333, 143, 47, 81, 287, 155, 314, 157, 291,
- /* 430 */ 102, 279, 89, 165, 166, 0, 156, 169, 286, 287,
- /* 440 */ 64, 113, 329, 330, 331, 307, 333, 91, 310, 311,
- /* 450 */ 312, 313, 314, 315, 241, 317, 80, 22, 178, 179,
- /* 460 */ 151, 181, 182, 183, 184, 185, 186, 187, 188, 189,
- /* 470 */ 190, 191, 192, 193, 194, 195, 35, 261, 102, 208,
- /* 480 */ 171, 172, 139, 79, 156, 269, 348, 248, 208, 113,
- /* 490 */ 75, 87, 213, 214, 278, 270, 248, 12, 13, 14,
- /* 500 */ 15, 16, 241, 160, 291, 270, 178, 179, 260, 181,
+ /* 290 */ 194, 195, 12, 13, 0, 178, 290, 165, 166, 0,
+ /* 300 */ 20, 169, 22, 290, 247, 247, 196, 197, 198, 199,
+ /* 310 */ 200, 201, 202, 203, 204, 205, 259, 20, 24, 25,
+ /* 320 */ 26, 27, 28, 29, 30, 31, 32, 47, 208, 295,
+ /* 330 */ 93, 240, 208, 276, 217, 218, 219, 220, 221, 281,
+ /* 340 */ 12, 13, 14, 22, 64, 247, 260, 268, 20, 112,
+ /* 350 */ 22, 114, 115, 116, 268, 276, 57, 148, 247, 268,
+ /* 360 */ 227, 81, 240, 277, 276, 182, 247, 276, 47, 335,
+ /* 370 */ 259, 283, 268, 244, 276, 47, 247, 286, 259, 275,
+ /* 380 */ 20, 290, 348, 103, 280, 64, 352, 276, 12, 13,
+ /* 390 */ 311, 145, 64, 113, 240, 276, 20, 306, 22, 0,
+ /* 400 */ 309, 310, 311, 312, 313, 314, 81, 316, 2, 81,
+ /* 410 */ 319, 313, 290, 4, 323, 324, 325, 267, 12, 13,
+ /* 420 */ 14, 15, 16, 47, 103, 327, 328, 329, 330, 279,
+ /* 430 */ 332, 103, 252, 240, 113, 344, 156, 228, 60, 61,
+ /* 440 */ 64, 113, 145, 65, 290, 265, 68, 69, 1, 2,
+ /* 450 */ 72, 73, 74, 20, 274, 240, 57, 81, 178, 179,
+ /* 460 */ 143, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ /* 470 */ 190, 191, 192, 193, 194, 195, 20, 156, 260, 103,
+ /* 480 */ 155, 151, 157, 290, 156, 0, 268, 47, 208, 113,
+ /* 490 */ 12, 13, 14, 15, 16, 277, 268, 248, 249, 178,
+ /* 500 */ 179, 171, 172, 275, 64, 290, 178, 179, 280, 181,
/* 510 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
- /* 520 */ 192, 193, 194, 195, 83, 277, 85, 86, 241, 88,
- /* 530 */ 64, 14, 156, 92, 119, 120, 297, 20, 145, 196,
- /* 540 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 241,
- /* 550 */ 245, 147, 291, 248, 178, 179, 115, 181, 182, 183,
+ /* 520 */ 192, 193, 194, 195, 12, 13, 14, 15, 16, 82,
+ /* 530 */ 213, 214, 156, 208, 37, 94, 95, 96, 97, 98,
+ /* 540 */ 99, 100, 101, 102, 103, 104, 240, 106, 107, 108,
+ /* 550 */ 109, 110, 111, 240, 178, 179, 240, 181, 182, 183,
/* 560 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
- /* 570 */ 194, 195, 12, 13, 18, 336, 20, 269, 291, 248,
- /* 580 */ 20, 261, 22, 27, 139, 277, 30, 270, 349, 269,
- /* 590 */ 92, 260, 353, 245, 92, 287, 248, 14, 278, 291,
- /* 600 */ 20, 3, 22, 20, 48, 160, 251, 47, 277, 111,
- /* 610 */ 112, 241, 114, 115, 116, 307, 241, 115, 310, 311,
- /* 620 */ 312, 313, 314, 315, 64, 317, 241, 272, 320, 49,
- /* 630 */ 145, 20, 324, 325, 326, 0, 14, 15, 16, 270,
- /* 640 */ 80, 196, 93, 94, 95, 96, 97, 98, 99, 100,
- /* 650 */ 101, 102, 103, 345, 105, 106, 107, 108, 109, 110,
- /* 660 */ 270, 291, 102, 241, 241, 241, 291, 241, 241, 241,
- /* 670 */ 241, 241, 241, 113, 118, 241, 291, 121, 122, 123,
+ /* 570 */ 194, 195, 12, 13, 18, 75, 20, 0, 93, 247,
+ /* 580 */ 20, 247, 22, 27, 268, 307, 30, 261, 55, 14,
+ /* 590 */ 264, 259, 276, 259, 82, 20, 290, 112, 266, 114,
+ /* 600 */ 115, 116, 286, 290, 48, 268, 290, 47, 276, 331,
+ /* 610 */ 276, 240, 275, 80, 139, 286, 83, 280, 209, 119,
+ /* 620 */ 120, 292, 306, 145, 64, 309, 310, 311, 312, 313,
+ /* 630 */ 314, 315, 316, 317, 318, 160, 247, 60, 61, 62,
+ /* 640 */ 63, 81, 65, 66, 67, 68, 69, 70, 71, 72,
+ /* 650 */ 73, 74, 75, 76, 77, 78, 240, 14, 15, 16,
+ /* 660 */ 20, 290, 240, 103, 240, 276, 254, 255, 57, 254,
+ /* 670 */ 255, 196, 240, 113, 118, 0, 240, 121, 122, 123,
/* 680 */ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
- /* 690 */ 134, 135, 136, 137, 138, 60, 61, 62, 63, 182,
- /* 700 */ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
- /* 710 */ 75, 76, 77, 291, 291, 291, 156, 291, 291, 291,
- /* 720 */ 291, 291, 291, 241, 37, 291, 269, 241, 255, 256,
- /* 730 */ 241, 255, 256, 286, 287, 278, 42, 43, 178, 179,
- /* 740 */ 308, 181, 182, 183, 184, 185, 186, 187, 188, 189,
- /* 750 */ 190, 191, 192, 193, 194, 195, 145, 18, 269, 248,
- /* 760 */ 4, 241, 23, 308, 332, 182, 277, 47, 22, 248,
- /* 770 */ 20, 260, 0, 291, 35, 36, 287, 291, 39, 287,
- /* 780 */ 291, 260, 206, 207, 64, 293, 297, 332, 277, 269,
- /* 790 */ 167, 168, 41, 47, 248, 56, 307, 277, 277, 310,
- /* 800 */ 311, 312, 313, 314, 315, 262, 317, 287, 265, 320,
- /* 810 */ 64, 291, 248, 324, 325, 41, 300, 45, 84, 80,
- /* 820 */ 47, 87, 224, 277, 260, 336, 84, 307, 41, 87,
- /* 830 */ 310, 311, 312, 313, 314, 315, 44, 317, 349, 248,
- /* 840 */ 320, 277, 353, 297, 324, 325, 326, 84, 102, 84,
- /* 850 */ 87, 260, 87, 21, 58, 81, 117, 241, 338, 113,
- /* 860 */ 314, 248, 248, 270, 344, 345, 34, 47, 277, 145,
- /* 870 */ 146, 41, 80, 260, 260, 329, 330, 331, 0, 333,
- /* 880 */ 1, 2, 336, 0, 64, 269, 113, 148, 149, 150,
- /* 890 */ 277, 277, 153, 277, 207, 349, 242, 158, 41, 353,
- /* 900 */ 22, 47, 156, 287, 41, 22, 41, 291, 258, 170,
- /* 910 */ 41, 81, 173, 41, 175, 176, 177, 41, 193, 194,
- /* 920 */ 241, 0, 41, 307, 178, 179, 310, 311, 312, 313,
- /* 930 */ 314, 315, 182, 317, 356, 41, 320, 347, 81, 80,
- /* 940 */ 324, 325, 326, 304, 81, 41, 81, 208, 269, 90,
- /* 950 */ 81, 335, 269, 81, 247, 41, 277, 81, 41, 341,
- /* 960 */ 249, 33, 81, 334, 280, 209, 287, 113, 309, 350,
- /* 970 */ 291, 41, 241, 45, 178, 81, 350, 226, 350, 51,
- /* 980 */ 52, 53, 54, 55, 41, 81, 307, 41, 41, 310,
- /* 990 */ 311, 312, 313, 314, 315, 81, 317, 0, 81, 320,
- /* 1000 */ 269, 337, 20, 324, 325, 326, 248, 79, 277, 45,
- /* 1010 */ 82, 81, 47, 92, 335, 228, 255, 305, 287, 298,
- /* 1020 */ 154, 40, 291, 248, 81, 248, 285, 81, 81, 139,
- /* 1030 */ 283, 241, 111, 112, 248, 114, 115, 116, 307, 20,
- /* 1040 */ 283, 310, 311, 312, 313, 314, 315, 4, 317, 243,
- /* 1050 */ 243, 320, 20, 253, 302, 324, 325, 326, 287, 269,
- /* 1060 */ 20, 253, 19, 295, 253, 277, 335, 277, 140, 20,
- /* 1070 */ 142, 288, 144, 248, 253, 253, 33, 287, 243, 253,
- /* 1080 */ 64, 291, 241, 269, 269, 269, 269, 297, 45, 92,
- /* 1090 */ 248, 302, 164, 50, 243, 291, 269, 307, 55, 241,
- /* 1100 */ 310, 311, 312, 313, 314, 315, 269, 317, 111, 112,
- /* 1110 */ 269, 114, 115, 116, 269, 269, 269, 269, 277, 269,
- /* 1120 */ 163, 251, 79, 287, 301, 82, 336, 269, 287, 295,
- /* 1130 */ 277, 251, 291, 251, 20, 277, 251, 288, 297, 349,
- /* 1140 */ 309, 216, 215, 353, 346, 287, 223, 292, 307, 291,
- /* 1150 */ 211, 310, 311, 312, 313, 314, 315, 241, 317, 291,
- /* 1160 */ 222, 210, 343, 292, 346, 307, 291, 339, 310, 311,
- /* 1170 */ 312, 313, 314, 315, 207, 317, 277, 336, 320, 291,
- /* 1180 */ 342, 20, 324, 325, 0, 269, 40, 327, 227, 241,
- /* 1190 */ 349, 352, 340, 277, 353, 308, 230, 225, 80, 292,
- /* 1200 */ 357, 291, 291, 287, 292, 142, 291, 291, 24, 25,
- /* 1210 */ 26, 27, 28, 29, 30, 31, 32, 269, 351, 248,
- /* 1220 */ 323, 289, 277, 307, 351, 277, 310, 311, 312, 313,
- /* 1230 */ 314, 315, 288, 317, 251, 287, 320, 251, 352, 291,
- /* 1240 */ 324, 325, 241, 352, 351, 265, 277, 80, 277, 273,
- /* 1250 */ 259, 248, 243, 251, 299, 307, 303, 263, 310, 311,
- /* 1260 */ 312, 313, 314, 315, 316, 317, 318, 319, 297, 296,
- /* 1270 */ 269, 263, 252, 263, 239, 0, 0, 72, 277, 0,
- /* 1280 */ 47, 174, 47, 47, 47, 314, 174, 0, 287, 47,
- /* 1290 */ 47, 174, 291, 0, 47, 0, 47, 0, 47, 241,
- /* 1300 */ 329, 330, 331, 0, 333, 80, 159, 336, 307, 160,
- /* 1310 */ 113, 310, 311, 312, 313, 314, 315, 0, 317, 156,
- /* 1320 */ 349, 320, 0, 152, 353, 151, 325, 269, 19, 0,
- /* 1330 */ 0, 0, 44, 0, 0, 277, 0, 0, 0, 0,
- /* 1340 */ 0, 0, 33, 22, 40, 287, 241, 0, 0, 291,
- /* 1350 */ 0, 0, 294, 0, 45, 0, 0, 0, 0, 0,
- /* 1360 */ 51, 52, 53, 54, 55, 307, 0, 0, 310, 311,
- /* 1370 */ 312, 313, 314, 315, 269, 317, 0, 0, 0, 0,
- /* 1380 */ 0, 0, 277, 0, 0, 241, 0, 41, 79, 40,
- /* 1390 */ 0, 82, 287, 0, 0, 0, 291, 37, 44, 14,
- /* 1400 */ 38, 14, 37, 0, 0, 44, 37, 0, 0, 45,
- /* 1410 */ 47, 59, 307, 269, 0, 310, 311, 312, 313, 314,
- /* 1420 */ 315, 277, 317, 0, 0, 116, 37, 0, 37, 47,
- /* 1430 */ 0, 287, 87, 45, 37, 291, 241, 37, 294, 47,
- /* 1440 */ 0, 45, 0, 0, 45, 47, 0, 22, 47, 47,
- /* 1450 */ 141, 307, 47, 144, 310, 311, 312, 313, 314, 315,
- /* 1460 */ 355, 317, 89, 47, 269, 241, 47, 41, 0, 41,
- /* 1470 */ 22, 162, 277, 164, 22, 0, 47, 22, 47, 0,
- /* 1480 */ 47, 47, 287, 48, 22, 0, 291, 22, 0, 20,
- /* 1490 */ 0, 47, 22, 269, 0, 0, 161, 241, 0, 41,
- /* 1500 */ 80, 277, 307, 37, 41, 310, 311, 312, 313, 314,
- /* 1510 */ 315, 287, 317, 145, 319, 291, 145, 142, 294, 212,
- /* 1520 */ 81, 41, 81, 41, 206, 269, 81, 80, 44, 80,
- /* 1530 */ 80, 307, 41, 277, 310, 311, 312, 313, 314, 315,
- /* 1540 */ 44, 317, 81, 287, 80, 44, 140, 291, 81, 41,
- /* 1550 */ 294, 44, 81, 241, 212, 41, 212, 81, 47, 2,
- /* 1560 */ 47, 41, 47, 307, 47, 47, 310, 311, 312, 313,
- /* 1570 */ 314, 315, 47, 317, 81, 44, 44, 80, 22, 143,
- /* 1580 */ 81, 269, 80, 80, 241, 80, 80, 178, 0, 277,
- /* 1590 */ 180, 81, 37, 140, 90, 44, 44, 81, 22, 287,
- /* 1600 */ 81, 80, 80, 291, 80, 47, 80, 80, 80, 47,
- /* 1610 */ 81, 81, 269, 241, 91, 80, 47, 80, 47, 307,
- /* 1620 */ 277, 81, 310, 311, 312, 313, 314, 315, 80, 317,
- /* 1630 */ 287, 81, 47, 80, 291, 81, 47, 104, 104, 80,
- /* 1640 */ 104, 269, 104, 22, 80, 80, 47, 80, 47, 277,
- /* 1650 */ 307, 22, 92, 310, 311, 312, 313, 314, 315, 287,
- /* 1660 */ 317, 59, 241, 291, 58, 78, 41, 64, 12, 13,
- /* 1670 */ 47, 47, 22, 241, 113, 47, 64, 47, 22, 307,
- /* 1680 */ 47, 47, 310, 311, 312, 313, 314, 315, 47, 317,
- /* 1690 */ 269, 47, 47, 47, 47, 47, 47, 0, 277, 47,
- /* 1700 */ 47, 269, 45, 47, 37, 0, 47, 45, 287, 277,
- /* 1710 */ 37, 0, 291, 47, 45, 37, 0, 47, 37, 287,
- /* 1720 */ 64, 45, 0, 291, 241, 0, 47, 0, 307, 46,
- /* 1730 */ 22, 310, 311, 312, 313, 314, 315, 22, 317, 307,
- /* 1740 */ 21, 20, 310, 311, 312, 313, 314, 315, 22, 317,
- /* 1750 */ 21, 358, 269, 241, 358, 358, 358, 358, 102, 358,
- /* 1760 */ 277, 358, 358, 358, 358, 358, 358, 358, 358, 113,
- /* 1770 */ 287, 358, 358, 358, 291, 358, 358, 358, 358, 358,
- /* 1780 */ 358, 269, 358, 358, 358, 241, 358, 358, 358, 277,
- /* 1790 */ 307, 358, 358, 310, 311, 312, 313, 314, 315, 287,
- /* 1800 */ 317, 358, 358, 291, 358, 358, 358, 358, 358, 358,
- /* 1810 */ 358, 358, 156, 269, 358, 358, 358, 358, 358, 307,
- /* 1820 */ 358, 277, 310, 311, 312, 313, 314, 315, 358, 317,
- /* 1830 */ 358, 287, 358, 358, 178, 291, 358, 358, 358, 358,
- /* 1840 */ 358, 241, 358, 358, 358, 189, 190, 191, 358, 358,
- /* 1850 */ 358, 307, 358, 358, 310, 311, 312, 313, 314, 315,
- /* 1860 */ 358, 317, 358, 358, 358, 358, 358, 358, 358, 269,
- /* 1870 */ 358, 358, 241, 358, 358, 358, 358, 277, 358, 358,
- /* 1880 */ 358, 358, 358, 358, 358, 358, 358, 287, 358, 358,
- /* 1890 */ 358, 291, 358, 358, 358, 358, 358, 358, 358, 358,
- /* 1900 */ 269, 241, 358, 358, 358, 358, 358, 307, 277, 358,
- /* 1910 */ 310, 311, 312, 313, 314, 315, 358, 317, 287, 358,
- /* 1920 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 269,
- /* 1930 */ 358, 358, 241, 358, 358, 358, 358, 277, 307, 358,
- /* 1940 */ 358, 310, 311, 312, 313, 314, 315, 287, 317, 358,
- /* 1950 */ 358, 291, 358, 358, 358, 358, 358, 358, 358, 358,
- /* 1960 */ 269, 241, 358, 358, 358, 358, 358, 307, 277, 358,
- /* 1970 */ 310, 311, 312, 313, 314, 315, 358, 317, 287, 358,
- /* 1980 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 269,
- /* 1990 */ 358, 358, 358, 358, 358, 358, 358, 277, 307, 358,
- /* 2000 */ 358, 310, 311, 312, 313, 314, 315, 287, 317, 358,
- /* 2010 */ 358, 291, 241, 358, 358, 358, 358, 358, 358, 358,
- /* 2020 */ 358, 358, 358, 241, 358, 358, 358, 307, 358, 358,
- /* 2030 */ 310, 311, 312, 313, 314, 315, 358, 317, 358, 358,
- /* 2040 */ 269, 358, 358, 358, 358, 358, 358, 358, 277, 358,
- /* 2050 */ 358, 269, 358, 358, 358, 358, 358, 358, 287, 277,
- /* 2060 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 287,
- /* 2070 */ 358, 358, 358, 291, 358, 358, 358, 241, 307, 358,
- /* 2080 */ 358, 310, 311, 312, 313, 314, 315, 358, 317, 307,
- /* 2090 */ 358, 358, 310, 311, 312, 313, 314, 315, 358, 317,
- /* 2100 */ 358, 358, 358, 358, 358, 269, 358, 358, 358, 358,
- /* 2110 */ 358, 358, 358, 277, 358, 358, 358, 358, 358, 358,
- /* 2120 */ 358, 358, 358, 287, 358, 358, 358, 291, 358, 358,
- /* 2130 */ 358, 358, 358, 358, 358, 358, 358, 358, 358, 358,
- /* 2140 */ 358, 358, 358, 307, 358, 358, 310, 311, 312, 313,
- /* 2150 */ 314, 315, 358, 317,
+ /* 690 */ 134, 135, 136, 137, 138, 247, 285, 286, 247, 42,
+ /* 700 */ 43, 307, 313, 268, 207, 55, 290, 3, 240, 240,
+ /* 710 */ 259, 247, 290, 240, 290, 280, 156, 328, 329, 330,
+ /* 720 */ 20, 332, 290, 259, 276, 331, 290, 276, 268, 240,
+ /* 730 */ 266, 81, 256, 83, 258, 241, 270, 277, 178, 179,
+ /* 740 */ 276, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ /* 750 */ 190, 191, 192, 193, 194, 195, 290, 268, 290, 290,
+ /* 760 */ 240, 313, 0, 290, 93, 276, 47, 22, 93, 206,
+ /* 770 */ 207, 305, 306, 167, 168, 286, 328, 329, 330, 290,
+ /* 780 */ 332, 92, 316, 64, 295, 247, 115, 112, 268, 114,
+ /* 790 */ 115, 116, 47, 58, 41, 306, 276, 259, 309, 310,
+ /* 800 */ 311, 312, 313, 314, 41, 316, 286, 45, 319, 269,
+ /* 810 */ 290, 247, 323, 324, 276, 295, 85, 85, 270, 88,
+ /* 820 */ 88, 240, 182, 259, 335, 85, 306, 269, 88, 309,
+ /* 830 */ 310, 311, 312, 313, 314, 82, 316, 348, 290, 319,
+ /* 840 */ 276, 352, 44, 323, 324, 18, 12, 13, 247, 268,
+ /* 850 */ 23, 64, 21, 305, 306, 335, 22, 276, 208, 269,
+ /* 860 */ 259, 269, 35, 36, 316, 34, 39, 286, 348, 269,
+ /* 870 */ 247, 290, 352, 4, 247, 85, 41, 276, 88, 81,
+ /* 880 */ 41, 47, 259, 56, 240, 41, 259, 306, 19, 247,
+ /* 890 */ 309, 310, 311, 312, 313, 314, 47, 316, 64, 276,
+ /* 900 */ 319, 259, 33, 276, 323, 324, 325, 0, 81, 0,
+ /* 910 */ 0, 247, 268, 178, 45, 334, 269, 82, 276, 50,
+ /* 920 */ 276, 82, 257, 259, 55, 41, 82, 223, 47, 22,
+ /* 930 */ 286, 22, 22, 41, 290, 193, 194, 103, 41, 355,
+ /* 940 */ 276, 41, 1, 2, 117, 41, 41, 113, 299, 80,
+ /* 950 */ 306, 346, 83, 309, 310, 311, 312, 313, 314, 303,
+ /* 960 */ 316, 340, 113, 319, 268, 248, 82, 323, 324, 325,
+ /* 970 */ 240, 246, 41, 41, 82, 148, 149, 150, 334, 82,
+ /* 980 */ 153, 279, 82, 349, 308, 158, 82, 82, 225, 41,
+ /* 990 */ 156, 41, 41, 333, 113, 349, 20, 170, 268, 336,
+ /* 1000 */ 173, 240, 175, 176, 177, 247, 276, 349, 45, 41,
+ /* 1010 */ 304, 47, 178, 82, 82, 154, 286, 297, 41, 254,
+ /* 1020 */ 290, 247, 247, 189, 190, 191, 40, 284, 139, 268,
+ /* 1030 */ 82, 282, 82, 82, 282, 208, 306, 276, 247, 309,
+ /* 1040 */ 310, 311, 312, 313, 314, 20, 316, 286, 242, 319,
+ /* 1050 */ 82, 290, 242, 323, 324, 325, 295, 20, 247, 82,
+ /* 1060 */ 301, 252, 240, 286, 334, 252, 20, 306, 20, 296,
+ /* 1070 */ 309, 310, 311, 312, 313, 314, 294, 316, 252, 294,
+ /* 1080 */ 252, 276, 20, 287, 252, 252, 268, 276, 247, 252,
+ /* 1090 */ 268, 268, 242, 268, 64, 242, 335, 268, 276, 247,
+ /* 1100 */ 301, 163, 290, 300, 268, 250, 295, 286, 286, 348,
+ /* 1110 */ 268, 268, 290, 352, 268, 268, 268, 295, 250, 268,
+ /* 1120 */ 247, 250, 250, 20, 313, 240, 345, 287, 306, 216,
+ /* 1130 */ 215, 309, 310, 311, 312, 313, 314, 345, 316, 328,
+ /* 1140 */ 329, 330, 276, 332, 222, 147, 335, 290, 294, 291,
+ /* 1150 */ 291, 211, 290, 268, 290, 341, 240, 335, 342, 348,
+ /* 1160 */ 207, 276, 210, 352, 308, 20, 276, 307, 40, 339,
+ /* 1170 */ 348, 286, 226, 224, 352, 290, 338, 81, 291, 291,
+ /* 1180 */ 356, 290, 142, 229, 268, 290, 290, 288, 276, 326,
+ /* 1190 */ 287, 306, 276, 250, 309, 310, 311, 312, 313, 314,
+ /* 1200 */ 322, 316, 286, 264, 319, 250, 290, 81, 323, 324,
+ /* 1210 */ 240, 276, 247, 242, 272, 258, 351, 350, 298, 240,
+ /* 1220 */ 302, 351, 306, 250, 350, 309, 310, 311, 312, 313,
+ /* 1230 */ 314, 351, 316, 350, 262, 319, 262, 262, 268, 323,
+ /* 1240 */ 324, 251, 238, 0, 0, 72, 276, 268, 0, 47,
+ /* 1250 */ 174, 47, 47, 47, 174, 276, 286, 0, 47, 47,
+ /* 1260 */ 290, 174, 0, 47, 0, 286, 47, 0, 47, 290,
+ /* 1270 */ 0, 81, 160, 159, 113, 156, 306, 152, 151, 309,
+ /* 1280 */ 310, 311, 312, 313, 314, 306, 316, 0, 309, 310,
+ /* 1290 */ 311, 312, 313, 314, 0, 316, 19, 240, 319, 44,
+ /* 1300 */ 0, 0, 0, 324, 0, 0, 0, 0, 240, 0,
+ /* 1310 */ 33, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 1320 */ 0, 0, 45, 353, 354, 268, 0, 0, 51, 52,
+ /* 1330 */ 53, 54, 55, 276, 0, 40, 268, 0, 0, 0,
+ /* 1340 */ 0, 0, 0, 286, 276, 22, 0, 290, 40, 14,
+ /* 1350 */ 293, 0, 0, 0, 286, 0, 0, 80, 290, 37,
+ /* 1360 */ 83, 14, 37, 306, 0, 41, 309, 310, 311, 312,
+ /* 1370 */ 313, 314, 44, 316, 306, 44, 240, 309, 310, 311,
+ /* 1380 */ 312, 313, 314, 38, 316, 0, 37, 147, 0, 0,
+ /* 1390 */ 0, 37, 59, 116, 0, 0, 37, 47, 0, 37,
+ /* 1400 */ 0, 0, 45, 37, 268, 47, 45, 47, 45, 37,
+ /* 1410 */ 45, 0, 276, 0, 47, 347, 47, 88, 141, 0,
+ /* 1420 */ 0, 144, 286, 0, 22, 240, 290, 47, 47, 47,
+ /* 1430 */ 47, 47, 41, 90, 41, 0, 240, 22, 0, 162,
+ /* 1440 */ 22, 164, 306, 47, 47, 309, 310, 311, 312, 313,
+ /* 1450 */ 314, 48, 316, 268, 0, 22, 47, 0, 22, 0,
+ /* 1460 */ 0, 276, 22, 20, 268, 37, 22, 0, 47, 0,
+ /* 1470 */ 0, 286, 276, 0, 0, 290, 44, 145, 293, 206,
+ /* 1480 */ 81, 145, 286, 41, 212, 81, 290, 240, 41, 145,
+ /* 1490 */ 354, 306, 82, 41, 309, 310, 311, 312, 313, 314,
+ /* 1500 */ 44, 316, 306, 41, 82, 309, 310, 311, 312, 313,
+ /* 1510 */ 314, 81, 316, 142, 318, 268, 140, 2, 81, 41,
+ /* 1520 */ 41, 82, 81, 276, 33, 44, 240, 82, 82, 161,
+ /* 1530 */ 44, 82, 82, 286, 41, 47, 45, 290, 41, 240,
+ /* 1540 */ 293, 47, 51, 52, 53, 54, 55, 47, 47, 47,
+ /* 1550 */ 47, 178, 81, 306, 268, 44, 309, 310, 311, 312,
+ /* 1560 */ 313, 314, 276, 316, 82, 82, 81, 268, 81, 22,
+ /* 1570 */ 44, 80, 286, 81, 83, 276, 290, 82, 82, 293,
+ /* 1580 */ 81, 0, 212, 37, 22, 286, 212, 180, 240, 290,
+ /* 1590 */ 81, 143, 306, 81, 81, 309, 310, 311, 312, 313,
+ /* 1600 */ 314, 140, 316, 44, 44, 306, 82, 91, 309, 310,
+ /* 1610 */ 311, 312, 313, 314, 81, 316, 268, 240, 81, 81,
+ /* 1620 */ 92, 82, 47, 47, 276, 81, 47, 82, 105, 81,
+ /* 1630 */ 47, 140, 82, 142, 286, 144, 81, 146, 290, 47,
+ /* 1640 */ 82, 47, 81, 22, 81, 268, 82, 105, 105, 93,
+ /* 1650 */ 81, 81, 47, 276, 306, 164, 240, 309, 310, 311,
+ /* 1660 */ 312, 313, 314, 286, 316, 81, 113, 290, 22, 59,
+ /* 1670 */ 105, 58, 47, 64, 79, 41, 47, 47, 47, 47,
+ /* 1680 */ 47, 22, 47, 306, 268, 47, 309, 310, 311, 312,
+ /* 1690 */ 313, 314, 276, 316, 47, 47, 47, 47, 47, 47,
+ /* 1700 */ 47, 64, 286, 0, 47, 240, 290, 45, 37, 0,
+ /* 1710 */ 47, 45, 37, 0, 47, 45, 240, 37, 0, 47,
+ /* 1720 */ 37, 45, 306, 0, 47, 309, 310, 311, 312, 313,
+ /* 1730 */ 314, 0, 316, 268, 46, 0, 22, 21, 357, 357,
+ /* 1740 */ 21, 276, 22, 22, 268, 20, 357, 357, 357, 357,
+ /* 1750 */ 357, 286, 276, 357, 357, 290, 357, 357, 357, 357,
+ /* 1760 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357,
+ /* 1770 */ 357, 306, 357, 357, 309, 310, 311, 312, 313, 314,
+ /* 1780 */ 240, 316, 306, 357, 357, 309, 310, 311, 312, 313,
+ /* 1790 */ 314, 357, 316, 268, 357, 357, 357, 240, 357, 357,
+ /* 1800 */ 357, 276, 357, 357, 357, 357, 357, 357, 268, 357,
+ /* 1810 */ 357, 286, 240, 357, 357, 290, 276, 357, 357, 357,
+ /* 1820 */ 357, 357, 357, 357, 357, 268, 286, 357, 357, 240,
+ /* 1830 */ 290, 306, 357, 276, 309, 310, 311, 312, 313, 314,
+ /* 1840 */ 268, 316, 357, 286, 357, 357, 306, 290, 276, 309,
+ /* 1850 */ 310, 311, 312, 313, 314, 357, 316, 268, 286, 357,
+ /* 1860 */ 357, 357, 290, 306, 357, 276, 309, 310, 311, 312,
+ /* 1870 */ 313, 314, 357, 316, 357, 286, 240, 357, 306, 290,
+ /* 1880 */ 357, 309, 310, 311, 312, 313, 314, 357, 316, 357,
+ /* 1890 */ 357, 357, 357, 357, 357, 306, 357, 357, 309, 310,
+ /* 1900 */ 311, 312, 313, 314, 268, 316, 357, 357, 240, 357,
+ /* 1910 */ 357, 357, 276, 357, 357, 357, 357, 357, 357, 357,
+ /* 1920 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357,
+ /* 1930 */ 357, 357, 357, 357, 357, 357, 268, 357, 357, 357,
+ /* 1940 */ 357, 357, 306, 357, 276, 309, 310, 311, 312, 313,
+ /* 1950 */ 314, 357, 316, 268, 286, 357, 357, 240, 290, 357,
+ /* 1960 */ 357, 276, 357, 357, 357, 357, 357, 357, 357, 357,
+ /* 1970 */ 357, 286, 357, 357, 306, 290, 357, 309, 310, 311,
+ /* 1980 */ 312, 313, 314, 357, 316, 268, 357, 357, 357, 357,
+ /* 1990 */ 357, 306, 357, 276, 309, 310, 311, 312, 313, 314,
+ /* 2000 */ 357, 316, 357, 286, 357, 357, 240, 290, 357, 357,
+ /* 2010 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 247,
+ /* 2020 */ 357, 357, 357, 306, 357, 357, 309, 310, 311, 312,
+ /* 2030 */ 313, 314, 357, 316, 268, 357, 357, 357, 357, 357,
+ /* 2040 */ 357, 357, 276, 357, 357, 357, 357, 357, 276, 357,
+ /* 2050 */ 357, 357, 286, 357, 357, 357, 290, 357, 357, 357,
+ /* 2060 */ 357, 357, 357, 357, 357, 357, 357, 295, 357, 357,
+ /* 2070 */ 357, 357, 306, 357, 357, 309, 310, 311, 312, 313,
+ /* 2080 */ 314, 357, 316, 357, 357, 313, 357, 357, 357, 357,
+ /* 2090 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357,
+ /* 2100 */ 328, 329, 330, 357, 332, 357, 357, 335, 357, 357,
+ /* 2110 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357,
+ /* 2120 */ 348, 357, 357, 357, 352, 237, 237, 237, 237, 237,
+ /* 2130 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2140 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2150 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2160 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2170 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2180 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2190 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2200 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2210 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2220 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2230 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2240 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2250 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2260 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2270 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2280 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2290 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2300 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2310 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2320 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2330 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2340 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2350 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
+ /* 2360 */ 237, 237,
};
-#define YY_SHIFT_COUNT (604)
+#define YY_SHIFT_COUNT (611)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (1729)
+#define YY_SHIFT_MAX (1735)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 739, 0, 0, 48, 96, 96, 96, 96, 280, 280,
+ /* 0 */ 827, 0, 0, 48, 96, 96, 96, 96, 280, 280,
/* 10 */ 96, 96, 328, 376, 560, 376, 376, 376, 376, 376,
/* 20 */ 376, 376, 376, 376, 376, 376, 376, 376, 376, 376,
- /* 30 */ 376, 376, 376, 376, 376, 376, 376, 376, 37, 37,
- /* 40 */ 68, 68, 68, 1656, 1656, 1656, 1656, 64, 271, 179,
- /* 50 */ 18, 18, 13, 13, 123, 179, 179, 18, 18, 18,
- /* 60 */ 18, 18, 18, 35, 18, 182, 190, 314, 182, 18,
- /* 70 */ 18, 182, 18, 182, 182, 314, 182, 18, 334, 556,
- /* 80 */ 343, 107, 107, 192, 312, 746, 746, 746, 746, 746,
- /* 90 */ 746, 746, 746, 746, 746, 746, 746, 746, 746, 746,
- /* 100 */ 746, 746, 746, 746, 441, 580, 110, 110, 206, 720,
- /* 110 */ 393, 393, 393, 244, 720, 384, 314, 182, 182, 314,
- /* 120 */ 356, 466, 549, 549, 549, 549, 549, 549, 549, 1309,
- /* 130 */ 294, 197, 21, 78, 268, 279, 517, 583, 694, 292,
- /* 140 */ 502, 611, 576, 687, 576, 598, 598, 598, 756, 750,
- /* 150 */ 982, 964, 965, 866, 982, 982, 981, 890, 890, 982,
- /* 160 */ 1019, 1019, 1032, 35, 314, 35, 1040, 35, 384, 1049,
- /* 170 */ 35, 35, 982, 35, 1019, 182, 182, 182, 182, 182,
- /* 180 */ 182, 182, 182, 182, 182, 182, 982, 1019, 1016, 1032,
- /* 190 */ 334, 957, 314, 334, 1040, 334, 384, 1049, 334, 1114,
- /* 200 */ 925, 927, 1016, 925, 927, 1016, 1016, 182, 923, 938,
- /* 210 */ 939, 951, 967, 384, 1161, 1146, 961, 972, 966, 961,
- /* 220 */ 972, 961, 972, 1118, 927, 1016, 1016, 927, 1016, 1063,
- /* 230 */ 384, 1049, 334, 356, 334, 384, 1167, 466, 982, 334,
- /* 240 */ 1019, 2154, 2154, 2154, 2154, 2154, 2154, 2154, 2154, 635,
- /* 250 */ 928, 1184, 1043, 921, 997, 119, 14, 30, 485, 126,
- /* 260 */ 498, 354, 354, 354, 354, 354, 354, 354, 354, 309,
- /* 270 */ 321, 415, 404, 8, 445, 622, 622, 622, 622, 772,
- /* 280 */ 774, 734, 742, 763, 765, 435, 878, 883, 832, 623,
- /* 290 */ 724, 830, 857, 863, 879, 725, 751, 787, 865, 796,
- /* 300 */ 869, 792, 872, 876, 881, 894, 904, 773, 854, 914,
- /* 310 */ 917, 930, 943, 946, 947, 859, 820, 1275, 1276, 1205,
- /* 320 */ 1279, 1233, 1107, 1235, 1236, 1237, 1112, 1287, 1242, 1243,
- /* 330 */ 1117, 1293, 1247, 1295, 1249, 1297, 1251, 1303, 1225, 1149,
- /* 340 */ 1147, 1197, 1163, 1317, 1322, 1171, 1174, 1329, 1330, 1288,
- /* 350 */ 1331, 1333, 1334, 1336, 1337, 1338, 1339, 1340, 1341, 1347,
- /* 360 */ 1348, 1350, 1351, 1353, 1355, 1356, 1357, 1358, 1304, 1359,
- /* 370 */ 1366, 1367, 1376, 1377, 1378, 1321, 1379, 1380, 1381, 1383,
- /* 380 */ 1384, 1386, 1349, 1360, 1346, 1385, 1354, 1387, 1361, 1390,
- /* 390 */ 1362, 1365, 1393, 1394, 1395, 1403, 1369, 1404, 1352, 1407,
- /* 400 */ 1408, 1363, 1364, 1389, 1414, 1382, 1388, 1391, 1423, 1392,
- /* 410 */ 1396, 1397, 1424, 1398, 1399, 1400, 1427, 1430, 1440, 1442,
- /* 420 */ 1373, 1345, 1401, 1425, 1443, 1402, 1405, 1416, 1419, 1426,
- /* 430 */ 1428, 1429, 1431, 1433, 1446, 1448, 1468, 1452, 1435, 1475,
- /* 440 */ 1455, 1434, 1479, 1462, 1485, 1465, 1469, 1488, 1368, 1444,
- /* 450 */ 1490, 1335, 1470, 1371, 1375, 1494, 1495, 1498, 1420, 1466,
- /* 460 */ 1406, 1458, 1463, 1307, 1439, 1480, 1441, 1447, 1449, 1450,
- /* 470 */ 1445, 1482, 1484, 1496, 1464, 1491, 1342, 1461, 1467, 1501,
- /* 480 */ 1318, 1508, 1507, 1471, 1514, 1344, 1476, 1511, 1513, 1515,
- /* 490 */ 1517, 1518, 1525, 1476, 1557, 1409, 1520, 1493, 1497, 1499,
- /* 500 */ 1531, 1502, 1503, 1532, 1556, 1410, 1505, 1510, 1516, 1506,
- /* 510 */ 1521, 1436, 1522, 1588, 1555, 1453, 1524, 1504, 1551, 1552,
- /* 520 */ 1526, 1519, 1527, 1576, 1528, 1523, 1529, 1558, 1562, 1535,
- /* 530 */ 1530, 1569, 1537, 1540, 1571, 1548, 1550, 1585, 1553, 1554,
- /* 540 */ 1589, 1559, 1533, 1534, 1536, 1538, 1621, 1560, 1564, 1565,
- /* 550 */ 1599, 1567, 1561, 1601, 1629, 1602, 1606, 1603, 1587, 1625,
- /* 560 */ 1623, 1624, 1628, 1630, 1633, 1650, 1634, 1641, 1612, 1426,
- /* 570 */ 1644, 1428, 1645, 1646, 1647, 1648, 1649, 1652, 1697, 1653,
- /* 580 */ 1657, 1667, 1705, 1659, 1662, 1673, 1711, 1666, 1669, 1678,
- /* 590 */ 1716, 1670, 1676, 1681, 1722, 1679, 1683, 1725, 1727, 1708,
- /* 600 */ 1719, 1715, 1726, 1729, 1721,
+ /* 30 */ 376, 376, 376, 376, 376, 376, 376, 376, 120, 120,
+ /* 40 */ 23, 23, 23, 834, 834, 834, 834, 325, 650, 124,
+ /* 50 */ 30, 30, 42, 42, 61, 124, 124, 30, 30, 30,
+ /* 60 */ 30, 30, 30, 194, 30, 30, 360, 433, 456, 360,
+ /* 70 */ 30, 30, 360, 30, 360, 360, 456, 360, 30, 611,
+ /* 80 */ 556, 59, 110, 110, 186, 378, 321, 321, 321, 321,
+ /* 90 */ 321, 321, 321, 321, 321, 321, 321, 321, 321, 321,
+ /* 100 */ 321, 321, 321, 321, 321, 161, 153, 575, 575, 299,
+ /* 110 */ 440, 246, 246, 246, 399, 440, 700, 456, 360, 360,
+ /* 120 */ 456, 689, 787, 441, 441, 441, 441, 441, 441, 441,
+ /* 130 */ 1277, 107, 97, 209, 117, 132, 317, 85, 183, 657,
+ /* 140 */ 745, 671, 297, 563, 497, 563, 704, 704, 704, 409,
+ /* 150 */ 640, 976, 963, 964, 861, 976, 976, 986, 889, 889,
+ /* 160 */ 976, 1025, 1025, 1037, 194, 456, 194, 1046, 1048, 194,
+ /* 170 */ 1046, 194, 700, 1062, 194, 194, 976, 194, 1025, 360,
+ /* 180 */ 360, 360, 360, 360, 360, 360, 360, 360, 360, 360,
+ /* 190 */ 976, 1025, 1030, 1037, 611, 938, 456, 611, 976, 1046,
+ /* 200 */ 611, 700, 1062, 611, 1103, 913, 915, 1030, 913, 915,
+ /* 210 */ 1030, 1030, 360, 922, 998, 940, 952, 953, 700, 1145,
+ /* 220 */ 1128, 946, 949, 954, 946, 949, 946, 949, 1096, 915,
+ /* 230 */ 1030, 1030, 915, 1030, 1040, 700, 1062, 611, 689, 611,
+ /* 240 */ 700, 1126, 787, 976, 611, 1025, 2125, 2125, 2125, 2125,
+ /* 250 */ 2125, 2125, 2125, 577, 1491, 294, 869, 64, 14, 406,
+ /* 260 */ 478, 512, 485, 675, 21, 21, 21, 21, 21, 21,
+ /* 270 */ 21, 21, 237, 330, 533, 500, 447, 475, 643, 643,
+ /* 280 */ 643, 643, 762, 753, 731, 732, 740, 790, 907, 909,
+ /* 290 */ 910, 831, 606, 835, 839, 844, 941, 742, 763, 133,
+ /* 300 */ 884, 735, 892, 798, 900, 904, 905, 931, 932, 849,
+ /* 310 */ 881, 897, 948, 950, 951, 968, 977, 1, 719, 1243,
+ /* 320 */ 1244, 1173, 1248, 1202, 1076, 1204, 1205, 1206, 1080, 1257,
+ /* 330 */ 1211, 1212, 1087, 1262, 1216, 1264, 1219, 1267, 1221, 1270,
+ /* 340 */ 1190, 1112, 1114, 1161, 1119, 1287, 1294, 1125, 1127, 1304,
+ /* 350 */ 1305, 1255, 1300, 1301, 1302, 1306, 1307, 1309, 1311, 1312,
+ /* 360 */ 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1326,
+ /* 370 */ 1295, 1327, 1334, 1337, 1338, 1339, 1340, 1323, 1341, 1342,
+ /* 380 */ 1346, 1351, 1352, 1353, 1308, 1322, 1324, 1335, 1328, 1347,
+ /* 390 */ 1331, 1355, 1345, 1325, 1356, 1364, 1385, 1349, 1240, 1388,
+ /* 400 */ 1389, 1354, 1390, 1333, 1394, 1395, 1350, 1357, 1359, 1398,
+ /* 410 */ 1358, 1361, 1362, 1400, 1360, 1363, 1366, 1401, 1367, 1365,
+ /* 420 */ 1372, 1411, 1413, 1419, 1420, 1343, 1329, 1369, 1402, 1423,
+ /* 430 */ 1380, 1381, 1382, 1383, 1391, 1393, 1384, 1396, 1397, 1435,
+ /* 440 */ 1415, 1438, 1418, 1403, 1454, 1433, 1409, 1457, 1436, 1459,
+ /* 450 */ 1440, 1443, 1460, 1332, 1421, 1469, 1368, 1444, 1336, 1371,
+ /* 460 */ 1467, 1470, 1473, 1344, 1474, 1399, 1428, 1376, 1442, 1447,
+ /* 470 */ 1272, 1410, 1452, 1422, 1404, 1430, 1437, 1439, 1462, 1432,
+ /* 480 */ 1456, 1441, 1478, 1370, 1445, 1446, 1481, 1273, 1479, 1486,
+ /* 490 */ 1449, 1493, 1374, 1450, 1488, 1494, 1500, 1501, 1502, 1503,
+ /* 500 */ 1450, 1515, 1373, 1497, 1482, 1471, 1483, 1511, 1485, 1487,
+ /* 510 */ 1526, 1547, 1407, 1492, 1495, 1496, 1499, 1509, 1448, 1512,
+ /* 520 */ 1581, 1546, 1461, 1513, 1516, 1559, 1560, 1533, 1524, 1537,
+ /* 530 */ 1562, 1538, 1528, 1539, 1575, 1576, 1544, 1545, 1579, 1548,
+ /* 540 */ 1550, 1583, 1555, 1558, 1592, 1561, 1564, 1594, 1563, 1523,
+ /* 550 */ 1542, 1543, 1565, 1621, 1556, 1569, 1570, 1605, 1584, 1553,
+ /* 560 */ 1646, 1610, 1613, 1625, 1609, 1595, 1634, 1629, 1630, 1631,
+ /* 570 */ 1632, 1633, 1659, 1635, 1638, 1637, 1391, 1647, 1393, 1648,
+ /* 580 */ 1649, 1650, 1651, 1652, 1653, 1703, 1657, 1662, 1671, 1709,
+ /* 590 */ 1663, 1666, 1675, 1713, 1667, 1670, 1680, 1718, 1672, 1676,
+ /* 600 */ 1683, 1723, 1677, 1688, 1731, 1735, 1714, 1716, 1720, 1721,
+ /* 610 */ 1719, 1725,
};
-#define YY_REDUCE_COUNT (248)
-#define YY_REDUCE_MIN (-335)
-#define YY_REDUCE_MAX (1836)
+#define YY_REDUCE_COUNT (252)
+#define YY_REDUCE_MIN (-334)
+#define YY_REDUCE_MAX (1772)
static const short yy_reduce_ofst[] = {
- /* 0 */ -190, -238, 489, 520, 308, 616, 679, 731, 790, 841,
- /* 10 */ 858, 916, 948, -140, 1001, 1058, 138, 1105, 1144, 1195,
- /* 20 */ 1224, 1256, 1312, 1343, 1372, 1421, 1432, 1483, 1512, 1544,
- /* 30 */ 1600, 1631, 1660, 1691, 1720, 1771, 1782, 1836, 546, 971,
- /* 40 */ 88, -1, 113, -266, -181, 46, 103, 239, -286, -85,
- /* 50 */ -163, 87, -240, -223, -335, -334, -253, -49, 85, 135,
- /* 60 */ 248, 331, 511, -172, 521, -124, -111, 56, 136, 564,
- /* 70 */ 591, -21, 613, 216, 130, 152, 320, 614, -173, -177,
- /* 80 */ -256, -256, -256, -188, -105, -86, 213, 261, 287, 370,
- /* 90 */ 375, 385, 422, 423, 424, 426, 427, 428, 429, 430,
- /* 100 */ 431, 434, 482, 486, -100, 4, 305, 348, 63, 473,
- /* 110 */ -83, 432, 455, 355, 476, 94, 492, 457, -8, 447,
- /* 120 */ 543, -95, -270, 225, 235, 317, 369, 390, 593, 516,
- /* 130 */ 654, 650, 578, 590, 639, 618, 683, 683, 707, 711,
- /* 140 */ 684, 659, 629, 629, 629, 619, 626, 628, 664, 683,
- /* 150 */ 758, 712, 761, 721, 775, 777, 741, 747, 757, 786,
- /* 160 */ 806, 807, 752, 800, 771, 808, 768, 811, 788, 783,
- /* 170 */ 821, 822, 825, 826, 835, 814, 815, 816, 817, 827,
- /* 180 */ 837, 845, 846, 847, 848, 850, 842, 851, 804, 789,
- /* 190 */ 870, 823, 836, 880, 834, 882, 853, 849, 885, 831,
- /* 200 */ 798, 855, 868, 818, 871, 875, 888, 683, 819, 838,
- /* 210 */ 852, 828, 629, 899, 887, 860, 839, 867, 843, 886,
- /* 220 */ 873, 891, 893, 897, 907, 910, 911, 912, 915, 932,
- /* 230 */ 945, 944, 983, 980, 986, 969, 976, 991, 1003, 1002,
- /* 240 */ 1009, 955, 953, 973, 994, 1008, 1010, 1020, 1035,
+ /* 0 */ -185, 489, 520, -223, 91, 581, 644, 730, 761, 822,
+ /* 10 */ 885, 916, 316, 970, 979, 1057, 1068, 1136, 1185, 1196,
+ /* 20 */ 1247, 1286, 1299, 1348, 1377, 1416, 1465, 1476, 1525, 1540,
+ /* 30 */ 1557, 1572, 1589, 1636, 1668, 1685, 1717, 1766, 811, 1772,
+ /* 40 */ 98, 389, 448, -267, -265, 466, 548, -293, -176, 34,
+ /* 50 */ 332, 464, -236, -189, -334, -304, -237, 57, 111, 119,
+ /* 60 */ 334, 451, 538, 180, 564, 601, 104, 79, -21, -113,
+ /* 70 */ 623, 627, 228, 642, 86, 337, -133, 218, 664, -239,
+ /* 80 */ 58, -48, -48, -48, -220, -253, -146, -110, 6, 13,
+ /* 90 */ 122, 154, 193, 215, 306, 313, 371, 416, 422, 424,
+ /* 100 */ 432, 436, 468, 469, 473, 150, 249, -178, 129, -46,
+ /* 110 */ 412, -87, 278, 394, -123, 415, 88, 329, 460, 435,
+ /* 120 */ 411, 326, 476, -254, 540, 558, 590, 592, 600, 647,
+ /* 130 */ 649, 494, 665, 584, 605, 656, 621, 696, 696, 725,
+ /* 140 */ 717, 702, 676, 660, 660, 660, 634, 646, 658, 663,
+ /* 150 */ 696, 758, 706, 765, 720, 774, 775, 743, 749, 752,
+ /* 160 */ 791, 806, 810, 759, 809, 777, 813, 782, 773, 826,
+ /* 170 */ 785, 828, 805, 796, 832, 833, 841, 837, 850, 818,
+ /* 180 */ 823, 825, 829, 836, 842, 843, 846, 847, 848, 851,
+ /* 190 */ 852, 853, 812, 799, 855, 803, 821, 868, 873, 854,
+ /* 200 */ 871, 866, 840, 872, 856, 781, 858, 857, 792, 859,
+ /* 210 */ 862, 864, 696, 816, 814, 830, 838, 660, 890, 860,
+ /* 220 */ 863, 865, 867, 824, 870, 874, 880, 883, 878, 887,
+ /* 230 */ 891, 895, 888, 896, 899, 912, 903, 943, 939, 955,
+ /* 240 */ 935, 942, 957, 965, 973, 971, 920, 918, 972, 974,
+ /* 250 */ 975, 990, 1004,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 10 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 20 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 30 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 40 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 50 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 60 */ 1345, 1345, 1345, 1414, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 70 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1412, 1552,
- /* 80 */ 1345, 1717, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 90 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 100 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1414, 1345,
- /* 110 */ 1728, 1728, 1728, 1412, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 120 */ 1507, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1590,
- /* 130 */ 1345, 1345, 1794, 1345, 1596, 1752, 1345, 1345, 1345, 1345,
- /* 140 */ 1460, 1744, 1720, 1734, 1721, 1779, 1779, 1779, 1737, 1345,
- /* 150 */ 1345, 1345, 1345, 1582, 1345, 1345, 1557, 1554, 1554, 1345,
- /* 160 */ 1345, 1345, 1345, 1414, 1345, 1414, 1345, 1414, 1345, 1345,
- /* 170 */ 1414, 1414, 1345, 1414, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 180 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 190 */ 1412, 1592, 1345, 1412, 1345, 1412, 1345, 1345, 1412, 1345,
- /* 200 */ 1759, 1757, 1345, 1759, 1757, 1345, 1345, 1345, 1771, 1767,
- /* 210 */ 1750, 1748, 1734, 1345, 1345, 1345, 1785, 1781, 1797, 1785,
- /* 220 */ 1781, 1785, 1781, 1345, 1757, 1345, 1345, 1757, 1345, 1565,
- /* 230 */ 1345, 1345, 1412, 1345, 1412, 1345, 1476, 1345, 1345, 1412,
- /* 240 */ 1345, 1584, 1598, 1574, 1510, 1510, 1510, 1415, 1350, 1345,
- /* 250 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 260 */ 1472, 1661, 1770, 1769, 1693, 1692, 1691, 1689, 1660, 1345,
- /* 270 */ 1345, 1345, 1345, 1345, 1345, 1654, 1655, 1653, 1652, 1345,
- /* 280 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 290 */ 1345, 1345, 1345, 1345, 1718, 1345, 1782, 1786, 1345, 1345,
- /* 300 */ 1345, 1638, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 310 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 320 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 330 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 340 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 350 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 360 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 370 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 380 */ 1345, 1345, 1345, 1345, 1379, 1345, 1345, 1345, 1345, 1345,
- /* 390 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 400 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 410 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 420 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1441,
- /* 430 */ 1440, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 440 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 450 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 460 */ 1345, 1741, 1751, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 470 */ 1345, 1345, 1345, 1638, 1345, 1768, 1345, 1727, 1723, 1345,
- /* 480 */ 1345, 1719, 1345, 1345, 1780, 1345, 1345, 1345, 1345, 1345,
- /* 490 */ 1345, 1345, 1345, 1345, 1713, 1345, 1686, 1345, 1345, 1345,
- /* 500 */ 1345, 1345, 1345, 1345, 1345, 1648, 1345, 1345, 1345, 1345,
- /* 510 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1637, 1345,
- /* 520 */ 1677, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1504,
- /* 530 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 540 */ 1345, 1345, 1489, 1487, 1486, 1485, 1345, 1482, 1345, 1345,
- /* 550 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1434,
- /* 560 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1425,
- /* 570 */ 1345, 1424, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 580 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 590 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345,
- /* 600 */ 1345, 1345, 1345, 1345, 1345,
+ /* 0 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 10 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 20 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 30 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 40 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 50 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 60 */ 1349, 1349, 1349, 1418, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 70 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1416,
+ /* 80 */ 1556, 1349, 1720, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 90 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 100 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1418,
+ /* 110 */ 1349, 1731, 1731, 1731, 1416, 1349, 1349, 1349, 1349, 1349,
+ /* 120 */ 1349, 1512, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 130 */ 1592, 1349, 1349, 1797, 1349, 1598, 1755, 1349, 1349, 1349,
+ /* 140 */ 1349, 1465, 1747, 1723, 1737, 1724, 1782, 1782, 1782, 1740,
+ /* 150 */ 1349, 1349, 1349, 1349, 1584, 1349, 1349, 1561, 1558, 1558,
+ /* 160 */ 1349, 1349, 1349, 1349, 1418, 1349, 1418, 1349, 1349, 1418,
+ /* 170 */ 1349, 1418, 1349, 1349, 1418, 1418, 1349, 1418, 1349, 1349,
+ /* 180 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 190 */ 1349, 1349, 1349, 1349, 1416, 1594, 1349, 1416, 1349, 1349,
+ /* 200 */ 1416, 1349, 1349, 1416, 1349, 1762, 1760, 1349, 1762, 1760,
+ /* 210 */ 1349, 1349, 1349, 1774, 1770, 1753, 1751, 1737, 1349, 1349,
+ /* 220 */ 1349, 1788, 1784, 1800, 1788, 1784, 1788, 1784, 1349, 1760,
+ /* 230 */ 1349, 1349, 1760, 1349, 1569, 1349, 1349, 1416, 1349, 1416,
+ /* 240 */ 1349, 1481, 1349, 1349, 1416, 1349, 1586, 1600, 1515, 1515,
+ /* 250 */ 1515, 1419, 1354, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 260 */ 1349, 1349, 1349, 1349, 1664, 1773, 1772, 1696, 1695, 1694,
+ /* 270 */ 1692, 1663, 1477, 1349, 1349, 1349, 1349, 1349, 1657, 1658,
+ /* 280 */ 1656, 1655, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 290 */ 1349, 1349, 1349, 1349, 1349, 1349, 1721, 1349, 1785, 1789,
+ /* 300 */ 1349, 1349, 1349, 1640, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 310 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 320 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 330 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 340 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 350 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 360 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 370 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 380 */ 1349, 1349, 1349, 1349, 1349, 1349, 1383, 1349, 1349, 1349,
+ /* 390 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 400 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 410 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 420 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 430 */ 1349, 1349, 1349, 1349, 1446, 1445, 1349, 1349, 1349, 1349,
+ /* 440 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 450 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 460 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1744, 1754,
+ /* 470 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 480 */ 1640, 1349, 1771, 1349, 1730, 1726, 1349, 1349, 1722, 1349,
+ /* 490 */ 1349, 1783, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 500 */ 1349, 1716, 1349, 1689, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 510 */ 1349, 1349, 1651, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 520 */ 1349, 1349, 1349, 1349, 1349, 1639, 1349, 1680, 1349, 1349,
+ /* 530 */ 1349, 1349, 1349, 1349, 1349, 1349, 1509, 1349, 1349, 1349,
+ /* 540 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1494,
+ /* 550 */ 1492, 1491, 1490, 1349, 1487, 1349, 1349, 1349, 1349, 1349,
+ /* 560 */ 1349, 1349, 1349, 1349, 1349, 1349, 1438, 1349, 1349, 1349,
+ /* 570 */ 1349, 1349, 1349, 1349, 1349, 1349, 1429, 1349, 1428, 1349,
+ /* 580 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 590 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 600 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349,
+ /* 610 */ 1349, 1349,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -903,6 +929,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* VGROUPS => nothing */
0, /* SINGLE_STABLE => nothing */
0, /* RETENTIONS => nothing */
+ 0, /* SCHEMALESS => nothing */
0, /* NK_COLON => nothing */
0, /* TABLE => nothing */
0, /* NK_LP => nothing */
@@ -936,7 +963,6 @@ static const YYCODETYPE yyFallback[] = {
0, /* BLOB => nothing */
0, /* VARBINARY => nothing */
0, /* DECIMAL => nothing */
- 0, /* DELAY => nothing */
0, /* FILE_FACTOR => nothing */
0, /* NK_FLOAT => nothing */
0, /* ROLLUP => nothing */
@@ -971,8 +997,8 @@ static const YYCODETYPE yyFallback[] = {
0, /* INTERVAL => nothing */
0, /* TOPIC => nothing */
0, /* AS => nothing */
- 0, /* WITH => nothing */
- 0, /* SCHEMA => nothing */
+ 0, /* CONSUMER => nothing */
+ 0, /* GROUP => nothing */
0, /* DESC => nothing */
0, /* DESCRIBE => nothing */
0, /* RESET => nothing */
@@ -1047,7 +1073,6 @@ static const YYCODETYPE yyFallback[] = {
0, /* PREV => nothing */
0, /* LINEAR => nothing */
0, /* NEXT => nothing */
- 0, /* GROUP => nothing */
0, /* HAVING => nothing */
0, /* ORDER => nothing */
0, /* SLIMIT => nothing */
@@ -1057,12 +1082,12 @@ static const YYCODETYPE yyFallback[] = {
0, /* ASC => nothing */
0, /* NULLS => nothing */
0, /* ID => nothing */
- 231, /* NK_BITNOT => ID */
- 231, /* INSERT => ID */
- 231, /* VALUES => ID */
- 231, /* IMPORT => ID */
- 231, /* NK_SEMI => ID */
- 231, /* FILE => ID */
+ 230, /* NK_BITNOT => ID */
+ 230, /* INSERT => ID */
+ 230, /* VALUES => ID */
+ 230, /* IMPORT => ID */
+ 230, /* NK_SEMI => ID */
+ 230, /* FILE => ID */
};
#endif /* YYFALLBACK */
@@ -1228,40 +1253,40 @@ static const char *const yyTokenName[] = {
/* 75 */ "VGROUPS",
/* 76 */ "SINGLE_STABLE",
/* 77 */ "RETENTIONS",
- /* 78 */ "NK_COLON",
- /* 79 */ "TABLE",
- /* 80 */ "NK_LP",
- /* 81 */ "NK_RP",
- /* 82 */ "STABLE",
- /* 83 */ "ADD",
- /* 84 */ "COLUMN",
- /* 85 */ "MODIFY",
- /* 86 */ "RENAME",
- /* 87 */ "TAG",
- /* 88 */ "SET",
- /* 89 */ "NK_EQ",
- /* 90 */ "USING",
- /* 91 */ "TAGS",
- /* 92 */ "COMMENT",
- /* 93 */ "BOOL",
- /* 94 */ "TINYINT",
- /* 95 */ "SMALLINT",
- /* 96 */ "INT",
- /* 97 */ "INTEGER",
- /* 98 */ "BIGINT",
- /* 99 */ "FLOAT",
- /* 100 */ "DOUBLE",
- /* 101 */ "BINARY",
- /* 102 */ "TIMESTAMP",
- /* 103 */ "NCHAR",
- /* 104 */ "UNSIGNED",
- /* 105 */ "JSON",
- /* 106 */ "VARCHAR",
- /* 107 */ "MEDIUMBLOB",
- /* 108 */ "BLOB",
- /* 109 */ "VARBINARY",
- /* 110 */ "DECIMAL",
- /* 111 */ "DELAY",
+ /* 78 */ "SCHEMALESS",
+ /* 79 */ "NK_COLON",
+ /* 80 */ "TABLE",
+ /* 81 */ "NK_LP",
+ /* 82 */ "NK_RP",
+ /* 83 */ "STABLE",
+ /* 84 */ "ADD",
+ /* 85 */ "COLUMN",
+ /* 86 */ "MODIFY",
+ /* 87 */ "RENAME",
+ /* 88 */ "TAG",
+ /* 89 */ "SET",
+ /* 90 */ "NK_EQ",
+ /* 91 */ "USING",
+ /* 92 */ "TAGS",
+ /* 93 */ "COMMENT",
+ /* 94 */ "BOOL",
+ /* 95 */ "TINYINT",
+ /* 96 */ "SMALLINT",
+ /* 97 */ "INT",
+ /* 98 */ "INTEGER",
+ /* 99 */ "BIGINT",
+ /* 100 */ "FLOAT",
+ /* 101 */ "DOUBLE",
+ /* 102 */ "BINARY",
+ /* 103 */ "TIMESTAMP",
+ /* 104 */ "NCHAR",
+ /* 105 */ "UNSIGNED",
+ /* 106 */ "JSON",
+ /* 107 */ "VARCHAR",
+ /* 108 */ "MEDIUMBLOB",
+ /* 109 */ "BLOB",
+ /* 110 */ "VARBINARY",
+ /* 111 */ "DECIMAL",
/* 112 */ "FILE_FACTOR",
/* 113 */ "NK_FLOAT",
/* 114 */ "ROLLUP",
@@ -1296,8 +1321,8 @@ static const char *const yyTokenName[] = {
/* 143 */ "INTERVAL",
/* 144 */ "TOPIC",
/* 145 */ "AS",
- /* 146 */ "WITH",
- /* 147 */ "SCHEMA",
+ /* 146 */ "CONSUMER",
+ /* 147 */ "GROUP",
/* 148 */ "DESC",
/* 149 */ "DESCRIBE",
/* 150 */ "RESET",
@@ -1372,142 +1397,141 @@ static const char *const yyTokenName[] = {
/* 219 */ "PREV",
/* 220 */ "LINEAR",
/* 221 */ "NEXT",
- /* 222 */ "GROUP",
- /* 223 */ "HAVING",
- /* 224 */ "ORDER",
- /* 225 */ "SLIMIT",
- /* 226 */ "SOFFSET",
- /* 227 */ "LIMIT",
- /* 228 */ "OFFSET",
- /* 229 */ "ASC",
- /* 230 */ "NULLS",
- /* 231 */ "ID",
- /* 232 */ "NK_BITNOT",
- /* 233 */ "INSERT",
- /* 234 */ "VALUES",
- /* 235 */ "IMPORT",
- /* 236 */ "NK_SEMI",
- /* 237 */ "FILE",
- /* 238 */ "cmd",
- /* 239 */ "account_options",
- /* 240 */ "alter_account_options",
- /* 241 */ "literal",
- /* 242 */ "alter_account_option",
- /* 243 */ "user_name",
- /* 244 */ "privileges",
- /* 245 */ "priv_level",
- /* 246 */ "priv_type_list",
- /* 247 */ "priv_type",
- /* 248 */ "db_name",
- /* 249 */ "dnode_endpoint",
- /* 250 */ "dnode_host_name",
- /* 251 */ "not_exists_opt",
- /* 252 */ "db_options",
- /* 253 */ "exists_opt",
- /* 254 */ "alter_db_options",
- /* 255 */ "integer_list",
- /* 256 */ "variable_list",
- /* 257 */ "retention_list",
- /* 258 */ "alter_db_option",
- /* 259 */ "retention",
- /* 260 */ "full_table_name",
- /* 261 */ "column_def_list",
- /* 262 */ "tags_def_opt",
- /* 263 */ "table_options",
- /* 264 */ "multi_create_clause",
- /* 265 */ "tags_def",
- /* 266 */ "multi_drop_clause",
- /* 267 */ "alter_table_clause",
- /* 268 */ "alter_table_options",
- /* 269 */ "column_name",
- /* 270 */ "type_name",
- /* 271 */ "signed_literal",
- /* 272 */ "create_subtable_clause",
- /* 273 */ "specific_tags_opt",
- /* 274 */ "literal_list",
- /* 275 */ "drop_table_clause",
- /* 276 */ "col_name_list",
- /* 277 */ "table_name",
- /* 278 */ "column_def",
- /* 279 */ "func_name_list",
- /* 280 */ "alter_table_option",
- /* 281 */ "col_name",
- /* 282 */ "db_name_cond_opt",
- /* 283 */ "like_pattern_opt",
- /* 284 */ "table_name_cond",
- /* 285 */ "from_db_opt",
- /* 286 */ "func_name",
- /* 287 */ "function_name",
- /* 288 */ "index_name",
- /* 289 */ "index_options",
- /* 290 */ "func_list",
- /* 291 */ "duration_literal",
- /* 292 */ "sliding_opt",
- /* 293 */ "func",
- /* 294 */ "expression_list",
- /* 295 */ "topic_name",
- /* 296 */ "topic_options",
- /* 297 */ "query_expression",
- /* 298 */ "analyze_opt",
- /* 299 */ "explain_options",
- /* 300 */ "agg_func_opt",
- /* 301 */ "bufsize_opt",
- /* 302 */ "stream_name",
- /* 303 */ "stream_options",
- /* 304 */ "into_opt",
- /* 305 */ "dnode_list",
- /* 306 */ "signed",
- /* 307 */ "literal_func",
- /* 308 */ "table_alias",
- /* 309 */ "column_alias",
- /* 310 */ "expression",
- /* 311 */ "pseudo_column",
- /* 312 */ "column_reference",
- /* 313 */ "function_expression",
- /* 314 */ "subquery",
- /* 315 */ "star_func",
- /* 316 */ "star_func_para_list",
- /* 317 */ "noarg_func",
- /* 318 */ "other_para_list",
- /* 319 */ "star_func_para",
- /* 320 */ "predicate",
- /* 321 */ "compare_op",
- /* 322 */ "in_op",
- /* 323 */ "in_predicate_value",
- /* 324 */ "boolean_value_expression",
- /* 325 */ "boolean_primary",
- /* 326 */ "common_expression",
- /* 327 */ "from_clause",
- /* 328 */ "table_reference_list",
- /* 329 */ "table_reference",
- /* 330 */ "table_primary",
- /* 331 */ "joined_table",
- /* 332 */ "alias_opt",
- /* 333 */ "parenthesized_joined_table",
- /* 334 */ "join_type",
- /* 335 */ "search_condition",
- /* 336 */ "query_specification",
- /* 337 */ "set_quantifier_opt",
- /* 338 */ "select_list",
- /* 339 */ "where_clause_opt",
- /* 340 */ "partition_by_clause_opt",
- /* 341 */ "twindow_clause_opt",
- /* 342 */ "group_by_clause_opt",
- /* 343 */ "having_clause_opt",
- /* 344 */ "select_sublist",
- /* 345 */ "select_item",
- /* 346 */ "fill_opt",
- /* 347 */ "fill_mode",
- /* 348 */ "group_by_list",
- /* 349 */ "query_expression_body",
- /* 350 */ "order_by_clause_opt",
- /* 351 */ "slimit_clause_opt",
- /* 352 */ "limit_clause_opt",
- /* 353 */ "query_primary",
- /* 354 */ "sort_specification_list",
- /* 355 */ "sort_specification",
- /* 356 */ "ordering_specification_opt",
- /* 357 */ "null_ordering_opt",
+ /* 222 */ "HAVING",
+ /* 223 */ "ORDER",
+ /* 224 */ "SLIMIT",
+ /* 225 */ "SOFFSET",
+ /* 226 */ "LIMIT",
+ /* 227 */ "OFFSET",
+ /* 228 */ "ASC",
+ /* 229 */ "NULLS",
+ /* 230 */ "ID",
+ /* 231 */ "NK_BITNOT",
+ /* 232 */ "INSERT",
+ /* 233 */ "VALUES",
+ /* 234 */ "IMPORT",
+ /* 235 */ "NK_SEMI",
+ /* 236 */ "FILE",
+ /* 237 */ "cmd",
+ /* 238 */ "account_options",
+ /* 239 */ "alter_account_options",
+ /* 240 */ "literal",
+ /* 241 */ "alter_account_option",
+ /* 242 */ "user_name",
+ /* 243 */ "privileges",
+ /* 244 */ "priv_level",
+ /* 245 */ "priv_type_list",
+ /* 246 */ "priv_type",
+ /* 247 */ "db_name",
+ /* 248 */ "dnode_endpoint",
+ /* 249 */ "dnode_host_name",
+ /* 250 */ "not_exists_opt",
+ /* 251 */ "db_options",
+ /* 252 */ "exists_opt",
+ /* 253 */ "alter_db_options",
+ /* 254 */ "integer_list",
+ /* 255 */ "variable_list",
+ /* 256 */ "retention_list",
+ /* 257 */ "alter_db_option",
+ /* 258 */ "retention",
+ /* 259 */ "full_table_name",
+ /* 260 */ "column_def_list",
+ /* 261 */ "tags_def_opt",
+ /* 262 */ "table_options",
+ /* 263 */ "multi_create_clause",
+ /* 264 */ "tags_def",
+ /* 265 */ "multi_drop_clause",
+ /* 266 */ "alter_table_clause",
+ /* 267 */ "alter_table_options",
+ /* 268 */ "column_name",
+ /* 269 */ "type_name",
+ /* 270 */ "signed_literal",
+ /* 271 */ "create_subtable_clause",
+ /* 272 */ "specific_tags_opt",
+ /* 273 */ "literal_list",
+ /* 274 */ "drop_table_clause",
+ /* 275 */ "col_name_list",
+ /* 276 */ "table_name",
+ /* 277 */ "column_def",
+ /* 278 */ "func_name_list",
+ /* 279 */ "alter_table_option",
+ /* 280 */ "col_name",
+ /* 281 */ "db_name_cond_opt",
+ /* 282 */ "like_pattern_opt",
+ /* 283 */ "table_name_cond",
+ /* 284 */ "from_db_opt",
+ /* 285 */ "func_name",
+ /* 286 */ "function_name",
+ /* 287 */ "index_name",
+ /* 288 */ "index_options",
+ /* 289 */ "func_list",
+ /* 290 */ "duration_literal",
+ /* 291 */ "sliding_opt",
+ /* 292 */ "func",
+ /* 293 */ "expression_list",
+ /* 294 */ "topic_name",
+ /* 295 */ "query_expression",
+ /* 296 */ "cgroup_name",
+ /* 297 */ "analyze_opt",
+ /* 298 */ "explain_options",
+ /* 299 */ "agg_func_opt",
+ /* 300 */ "bufsize_opt",
+ /* 301 */ "stream_name",
+ /* 302 */ "stream_options",
+ /* 303 */ "into_opt",
+ /* 304 */ "dnode_list",
+ /* 305 */ "signed",
+ /* 306 */ "literal_func",
+ /* 307 */ "table_alias",
+ /* 308 */ "column_alias",
+ /* 309 */ "expression",
+ /* 310 */ "pseudo_column",
+ /* 311 */ "column_reference",
+ /* 312 */ "function_expression",
+ /* 313 */ "subquery",
+ /* 314 */ "star_func",
+ /* 315 */ "star_func_para_list",
+ /* 316 */ "noarg_func",
+ /* 317 */ "other_para_list",
+ /* 318 */ "star_func_para",
+ /* 319 */ "predicate",
+ /* 320 */ "compare_op",
+ /* 321 */ "in_op",
+ /* 322 */ "in_predicate_value",
+ /* 323 */ "boolean_value_expression",
+ /* 324 */ "boolean_primary",
+ /* 325 */ "common_expression",
+ /* 326 */ "from_clause",
+ /* 327 */ "table_reference_list",
+ /* 328 */ "table_reference",
+ /* 329 */ "table_primary",
+ /* 330 */ "joined_table",
+ /* 331 */ "alias_opt",
+ /* 332 */ "parenthesized_joined_table",
+ /* 333 */ "join_type",
+ /* 334 */ "search_condition",
+ /* 335 */ "query_specification",
+ /* 336 */ "set_quantifier_opt",
+ /* 337 */ "select_list",
+ /* 338 */ "where_clause_opt",
+ /* 339 */ "partition_by_clause_opt",
+ /* 340 */ "twindow_clause_opt",
+ /* 341 */ "group_by_clause_opt",
+ /* 342 */ "having_clause_opt",
+ /* 343 */ "select_sublist",
+ /* 344 */ "select_item",
+ /* 345 */ "fill_opt",
+ /* 346 */ "fill_mode",
+ /* 347 */ "group_by_list",
+ /* 348 */ "query_expression_body",
+ /* 349 */ "order_by_clause_opt",
+ /* 350 */ "slimit_clause_opt",
+ /* 351 */ "limit_clause_opt",
+ /* 352 */ "query_primary",
+ /* 353 */ "sort_specification_list",
+ /* 354 */ "sort_specification",
+ /* 355 */ "ordering_specification_opt",
+ /* 356 */ "null_ordering_opt",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -1602,84 +1626,84 @@ static const char *const yyRuleName[] = {
/* 84 */ "db_options ::= db_options VGROUPS NK_INTEGER",
/* 85 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER",
/* 86 */ "db_options ::= db_options RETENTIONS retention_list",
- /* 87 */ "alter_db_options ::= alter_db_option",
- /* 88 */ "alter_db_options ::= alter_db_options alter_db_option",
- /* 89 */ "alter_db_option ::= BUFFER NK_INTEGER",
- /* 90 */ "alter_db_option ::= CACHELAST NK_INTEGER",
- /* 91 */ "alter_db_option ::= FSYNC NK_INTEGER",
- /* 92 */ "alter_db_option ::= KEEP integer_list",
- /* 93 */ "alter_db_option ::= KEEP variable_list",
- /* 94 */ "alter_db_option ::= PAGES NK_INTEGER",
- /* 95 */ "alter_db_option ::= REPLICA NK_INTEGER",
- /* 96 */ "alter_db_option ::= STRICT NK_INTEGER",
- /* 97 */ "alter_db_option ::= WAL NK_INTEGER",
- /* 98 */ "integer_list ::= NK_INTEGER",
- /* 99 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
- /* 100 */ "variable_list ::= NK_VARIABLE",
- /* 101 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
- /* 102 */ "retention_list ::= retention",
- /* 103 */ "retention_list ::= retention_list NK_COMMA retention",
- /* 104 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
- /* 105 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
- /* 106 */ "cmd ::= CREATE TABLE multi_create_clause",
- /* 107 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
- /* 108 */ "cmd ::= DROP TABLE multi_drop_clause",
- /* 109 */ "cmd ::= DROP STABLE exists_opt full_table_name",
- /* 110 */ "cmd ::= ALTER TABLE alter_table_clause",
- /* 111 */ "cmd ::= ALTER STABLE alter_table_clause",
- /* 112 */ "alter_table_clause ::= full_table_name alter_table_options",
- /* 113 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
- /* 114 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
- /* 115 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
- /* 116 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
- /* 117 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
- /* 118 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
- /* 119 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
- /* 120 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
- /* 121 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
- /* 122 */ "multi_create_clause ::= create_subtable_clause",
- /* 123 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
- /* 124 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options",
- /* 125 */ "multi_drop_clause ::= drop_table_clause",
- /* 126 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
- /* 127 */ "drop_table_clause ::= exists_opt full_table_name",
- /* 128 */ "specific_tags_opt ::=",
- /* 129 */ "specific_tags_opt ::= NK_LP col_name_list NK_RP",
- /* 130 */ "full_table_name ::= table_name",
- /* 131 */ "full_table_name ::= db_name NK_DOT table_name",
- /* 132 */ "column_def_list ::= column_def",
- /* 133 */ "column_def_list ::= column_def_list NK_COMMA column_def",
- /* 134 */ "column_def ::= column_name type_name",
- /* 135 */ "column_def ::= column_name type_name COMMENT NK_STRING",
- /* 136 */ "type_name ::= BOOL",
- /* 137 */ "type_name ::= TINYINT",
- /* 138 */ "type_name ::= SMALLINT",
- /* 139 */ "type_name ::= INT",
- /* 140 */ "type_name ::= INTEGER",
- /* 141 */ "type_name ::= BIGINT",
- /* 142 */ "type_name ::= FLOAT",
- /* 143 */ "type_name ::= DOUBLE",
- /* 144 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
- /* 145 */ "type_name ::= TIMESTAMP",
- /* 146 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
- /* 147 */ "type_name ::= TINYINT UNSIGNED",
- /* 148 */ "type_name ::= SMALLINT UNSIGNED",
- /* 149 */ "type_name ::= INT UNSIGNED",
- /* 150 */ "type_name ::= BIGINT UNSIGNED",
- /* 151 */ "type_name ::= JSON",
- /* 152 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
- /* 153 */ "type_name ::= MEDIUMBLOB",
- /* 154 */ "type_name ::= BLOB",
- /* 155 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
- /* 156 */ "type_name ::= DECIMAL",
- /* 157 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
- /* 158 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
- /* 159 */ "tags_def_opt ::=",
- /* 160 */ "tags_def_opt ::= tags_def",
- /* 161 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
- /* 162 */ "table_options ::=",
- /* 163 */ "table_options ::= table_options COMMENT NK_STRING",
- /* 164 */ "table_options ::= table_options DELAY NK_INTEGER",
+ /* 87 */ "db_options ::= db_options SCHEMALESS NK_INTEGER",
+ /* 88 */ "alter_db_options ::= alter_db_option",
+ /* 89 */ "alter_db_options ::= alter_db_options alter_db_option",
+ /* 90 */ "alter_db_option ::= BUFFER NK_INTEGER",
+ /* 91 */ "alter_db_option ::= CACHELAST NK_INTEGER",
+ /* 92 */ "alter_db_option ::= FSYNC NK_INTEGER",
+ /* 93 */ "alter_db_option ::= KEEP integer_list",
+ /* 94 */ "alter_db_option ::= KEEP variable_list",
+ /* 95 */ "alter_db_option ::= PAGES NK_INTEGER",
+ /* 96 */ "alter_db_option ::= REPLICA NK_INTEGER",
+ /* 97 */ "alter_db_option ::= STRICT NK_INTEGER",
+ /* 98 */ "alter_db_option ::= WAL NK_INTEGER",
+ /* 99 */ "integer_list ::= NK_INTEGER",
+ /* 100 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
+ /* 101 */ "variable_list ::= NK_VARIABLE",
+ /* 102 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
+ /* 103 */ "retention_list ::= retention",
+ /* 104 */ "retention_list ::= retention_list NK_COMMA retention",
+ /* 105 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
+ /* 106 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
+ /* 107 */ "cmd ::= CREATE TABLE multi_create_clause",
+ /* 108 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
+ /* 109 */ "cmd ::= DROP TABLE multi_drop_clause",
+ /* 110 */ "cmd ::= DROP STABLE exists_opt full_table_name",
+ /* 111 */ "cmd ::= ALTER TABLE alter_table_clause",
+ /* 112 */ "cmd ::= ALTER STABLE alter_table_clause",
+ /* 113 */ "alter_table_clause ::= full_table_name alter_table_options",
+ /* 114 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
+ /* 115 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
+ /* 116 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
+ /* 117 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
+ /* 118 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
+ /* 119 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
+ /* 120 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
+ /* 121 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
+ /* 122 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
+ /* 123 */ "multi_create_clause ::= create_subtable_clause",
+ /* 124 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
+ /* 125 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options",
+ /* 126 */ "multi_drop_clause ::= drop_table_clause",
+ /* 127 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
+ /* 128 */ "drop_table_clause ::= exists_opt full_table_name",
+ /* 129 */ "specific_tags_opt ::=",
+ /* 130 */ "specific_tags_opt ::= NK_LP col_name_list NK_RP",
+ /* 131 */ "full_table_name ::= table_name",
+ /* 132 */ "full_table_name ::= db_name NK_DOT table_name",
+ /* 133 */ "column_def_list ::= column_def",
+ /* 134 */ "column_def_list ::= column_def_list NK_COMMA column_def",
+ /* 135 */ "column_def ::= column_name type_name",
+ /* 136 */ "column_def ::= column_name type_name COMMENT NK_STRING",
+ /* 137 */ "type_name ::= BOOL",
+ /* 138 */ "type_name ::= TINYINT",
+ /* 139 */ "type_name ::= SMALLINT",
+ /* 140 */ "type_name ::= INT",
+ /* 141 */ "type_name ::= INTEGER",
+ /* 142 */ "type_name ::= BIGINT",
+ /* 143 */ "type_name ::= FLOAT",
+ /* 144 */ "type_name ::= DOUBLE",
+ /* 145 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
+ /* 146 */ "type_name ::= TIMESTAMP",
+ /* 147 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
+ /* 148 */ "type_name ::= TINYINT UNSIGNED",
+ /* 149 */ "type_name ::= SMALLINT UNSIGNED",
+ /* 150 */ "type_name ::= INT UNSIGNED",
+ /* 151 */ "type_name ::= BIGINT UNSIGNED",
+ /* 152 */ "type_name ::= JSON",
+ /* 153 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
+ /* 154 */ "type_name ::= MEDIUMBLOB",
+ /* 155 */ "type_name ::= BLOB",
+ /* 156 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
+ /* 157 */ "type_name ::= DECIMAL",
+ /* 158 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
+ /* 159 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
+ /* 160 */ "tags_def_opt ::=",
+ /* 161 */ "tags_def_opt ::= tags_def",
+ /* 162 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
+ /* 163 */ "table_options ::=",
+ /* 164 */ "table_options ::= table_options COMMENT NK_STRING",
/* 165 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT",
/* 166 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP",
/* 167 */ "table_options ::= table_options TTL NK_INTEGER",
@@ -1738,235 +1762,234 @@ static const char *const yyRuleName[] = {
/* 220 */ "func_list ::= func",
/* 221 */ "func_list ::= func_list NK_COMMA func",
/* 222 */ "func ::= function_name NK_LP expression_list NK_RP",
- /* 223 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression",
- /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name",
- /* 225 */ "cmd ::= DROP TOPIC exists_opt topic_name",
- /* 226 */ "topic_options ::=",
- /* 227 */ "topic_options ::= topic_options WITH TABLE",
- /* 228 */ "topic_options ::= topic_options WITH SCHEMA",
- /* 229 */ "topic_options ::= topic_options WITH TAG",
- /* 230 */ "cmd ::= DESC full_table_name",
- /* 231 */ "cmd ::= DESCRIBE full_table_name",
- /* 232 */ "cmd ::= RESET QUERY CACHE",
- /* 233 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression",
- /* 234 */ "analyze_opt ::=",
- /* 235 */ "analyze_opt ::= ANALYZE",
- /* 236 */ "explain_options ::=",
- /* 237 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
- /* 238 */ "explain_options ::= explain_options RATIO NK_FLOAT",
- /* 239 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP",
- /* 240 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
- /* 241 */ "cmd ::= DROP FUNCTION exists_opt function_name",
- /* 242 */ "agg_func_opt ::=",
- /* 243 */ "agg_func_opt ::= AGGREGATE",
- /* 244 */ "bufsize_opt ::=",
- /* 245 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
- /* 246 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression",
- /* 247 */ "cmd ::= DROP STREAM exists_opt stream_name",
- /* 248 */ "into_opt ::=",
- /* 249 */ "into_opt ::= INTO full_table_name",
- /* 250 */ "stream_options ::=",
- /* 251 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
- /* 252 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
- /* 253 */ "stream_options ::= stream_options WATERMARK duration_literal",
- /* 254 */ "cmd ::= KILL CONNECTION NK_INTEGER",
- /* 255 */ "cmd ::= KILL QUERY NK_INTEGER",
- /* 256 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
- /* 257 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
- /* 258 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
- /* 259 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
- /* 260 */ "dnode_list ::= DNODE NK_INTEGER",
- /* 261 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
- /* 262 */ "cmd ::= SYNCDB db_name REPLICA",
- /* 263 */ "cmd ::= query_expression",
- /* 264 */ "literal ::= NK_INTEGER",
- /* 265 */ "literal ::= NK_FLOAT",
- /* 266 */ "literal ::= NK_STRING",
- /* 267 */ "literal ::= NK_BOOL",
- /* 268 */ "literal ::= TIMESTAMP NK_STRING",
- /* 269 */ "literal ::= duration_literal",
- /* 270 */ "literal ::= NULL",
- /* 271 */ "literal ::= NK_QUESTION",
- /* 272 */ "duration_literal ::= NK_VARIABLE",
- /* 273 */ "signed ::= NK_INTEGER",
- /* 274 */ "signed ::= NK_PLUS NK_INTEGER",
- /* 275 */ "signed ::= NK_MINUS NK_INTEGER",
- /* 276 */ "signed ::= NK_FLOAT",
- /* 277 */ "signed ::= NK_PLUS NK_FLOAT",
- /* 278 */ "signed ::= NK_MINUS NK_FLOAT",
- /* 279 */ "signed_literal ::= signed",
- /* 280 */ "signed_literal ::= NK_STRING",
- /* 281 */ "signed_literal ::= NK_BOOL",
- /* 282 */ "signed_literal ::= TIMESTAMP NK_STRING",
- /* 283 */ "signed_literal ::= duration_literal",
- /* 284 */ "signed_literal ::= NULL",
- /* 285 */ "signed_literal ::= literal_func",
- /* 286 */ "literal_list ::= signed_literal",
- /* 287 */ "literal_list ::= literal_list NK_COMMA signed_literal",
- /* 288 */ "db_name ::= NK_ID",
- /* 289 */ "table_name ::= NK_ID",
- /* 290 */ "column_name ::= NK_ID",
- /* 291 */ "function_name ::= NK_ID",
- /* 292 */ "table_alias ::= NK_ID",
- /* 293 */ "column_alias ::= NK_ID",
- /* 294 */ "user_name ::= NK_ID",
- /* 295 */ "index_name ::= NK_ID",
- /* 296 */ "topic_name ::= NK_ID",
- /* 297 */ "stream_name ::= NK_ID",
- /* 298 */ "expression ::= literal",
- /* 299 */ "expression ::= pseudo_column",
- /* 300 */ "expression ::= column_reference",
- /* 301 */ "expression ::= function_expression",
- /* 302 */ "expression ::= subquery",
- /* 303 */ "expression ::= NK_LP expression NK_RP",
- /* 304 */ "expression ::= NK_PLUS expression",
- /* 305 */ "expression ::= NK_MINUS expression",
- /* 306 */ "expression ::= expression NK_PLUS expression",
- /* 307 */ "expression ::= expression NK_MINUS expression",
- /* 308 */ "expression ::= expression NK_STAR expression",
- /* 309 */ "expression ::= expression NK_SLASH expression",
- /* 310 */ "expression ::= expression NK_REM expression",
- /* 311 */ "expression ::= column_reference NK_ARROW NK_STRING",
- /* 312 */ "expression_list ::= expression",
- /* 313 */ "expression_list ::= expression_list NK_COMMA expression",
- /* 314 */ "column_reference ::= column_name",
- /* 315 */ "column_reference ::= table_name NK_DOT column_name",
- /* 316 */ "pseudo_column ::= ROWTS",
- /* 317 */ "pseudo_column ::= TBNAME",
- /* 318 */ "pseudo_column ::= table_name NK_DOT TBNAME",
- /* 319 */ "pseudo_column ::= QSTARTTS",
- /* 320 */ "pseudo_column ::= QENDTS",
- /* 321 */ "pseudo_column ::= WSTARTTS",
- /* 322 */ "pseudo_column ::= WENDTS",
- /* 323 */ "pseudo_column ::= WDURATION",
- /* 324 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
- /* 325 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
- /* 326 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
- /* 327 */ "function_expression ::= literal_func",
- /* 328 */ "literal_func ::= noarg_func NK_LP NK_RP",
- /* 329 */ "literal_func ::= NOW",
- /* 330 */ "noarg_func ::= NOW",
- /* 331 */ "noarg_func ::= TODAY",
- /* 332 */ "noarg_func ::= TIMEZONE",
- /* 333 */ "star_func ::= COUNT",
- /* 334 */ "star_func ::= FIRST",
- /* 335 */ "star_func ::= LAST",
- /* 336 */ "star_func ::= LAST_ROW",
- /* 337 */ "star_func_para_list ::= NK_STAR",
- /* 338 */ "star_func_para_list ::= other_para_list",
- /* 339 */ "other_para_list ::= star_func_para",
- /* 340 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
- /* 341 */ "star_func_para ::= expression",
- /* 342 */ "star_func_para ::= table_name NK_DOT NK_STAR",
- /* 343 */ "predicate ::= expression compare_op expression",
- /* 344 */ "predicate ::= expression BETWEEN expression AND expression",
- /* 345 */ "predicate ::= expression NOT BETWEEN expression AND expression",
- /* 346 */ "predicate ::= expression IS NULL",
- /* 347 */ "predicate ::= expression IS NOT NULL",
- /* 348 */ "predicate ::= expression in_op in_predicate_value",
- /* 349 */ "compare_op ::= NK_LT",
- /* 350 */ "compare_op ::= NK_GT",
- /* 351 */ "compare_op ::= NK_LE",
- /* 352 */ "compare_op ::= NK_GE",
- /* 353 */ "compare_op ::= NK_NE",
- /* 354 */ "compare_op ::= NK_EQ",
- /* 355 */ "compare_op ::= LIKE",
- /* 356 */ "compare_op ::= NOT LIKE",
- /* 357 */ "compare_op ::= MATCH",
- /* 358 */ "compare_op ::= NMATCH",
- /* 359 */ "compare_op ::= CONTAINS",
- /* 360 */ "in_op ::= IN",
- /* 361 */ "in_op ::= NOT IN",
- /* 362 */ "in_predicate_value ::= NK_LP expression_list NK_RP",
- /* 363 */ "boolean_value_expression ::= boolean_primary",
- /* 364 */ "boolean_value_expression ::= NOT boolean_primary",
- /* 365 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
- /* 366 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
- /* 367 */ "boolean_primary ::= predicate",
- /* 368 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
- /* 369 */ "common_expression ::= expression",
- /* 370 */ "common_expression ::= boolean_value_expression",
- /* 371 */ "from_clause ::= FROM table_reference_list",
- /* 372 */ "table_reference_list ::= table_reference",
- /* 373 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
- /* 374 */ "table_reference ::= table_primary",
- /* 375 */ "table_reference ::= joined_table",
- /* 376 */ "table_primary ::= table_name alias_opt",
- /* 377 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
- /* 378 */ "table_primary ::= subquery alias_opt",
- /* 379 */ "table_primary ::= parenthesized_joined_table",
- /* 380 */ "alias_opt ::=",
- /* 381 */ "alias_opt ::= table_alias",
- /* 382 */ "alias_opt ::= AS table_alias",
- /* 383 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
- /* 384 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
- /* 385 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
- /* 386 */ "join_type ::=",
- /* 387 */ "join_type ::= INNER",
- /* 388 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
- /* 389 */ "set_quantifier_opt ::=",
- /* 390 */ "set_quantifier_opt ::= DISTINCT",
- /* 391 */ "set_quantifier_opt ::= ALL",
- /* 392 */ "select_list ::= NK_STAR",
- /* 393 */ "select_list ::= select_sublist",
- /* 394 */ "select_sublist ::= select_item",
- /* 395 */ "select_sublist ::= select_sublist NK_COMMA select_item",
- /* 396 */ "select_item ::= common_expression",
- /* 397 */ "select_item ::= common_expression column_alias",
- /* 398 */ "select_item ::= common_expression AS column_alias",
- /* 399 */ "select_item ::= table_name NK_DOT NK_STAR",
- /* 400 */ "where_clause_opt ::=",
- /* 401 */ "where_clause_opt ::= WHERE search_condition",
- /* 402 */ "partition_by_clause_opt ::=",
- /* 403 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
- /* 404 */ "twindow_clause_opt ::=",
- /* 405 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
- /* 406 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
- /* 407 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
- /* 408 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
- /* 409 */ "sliding_opt ::=",
- /* 410 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
- /* 411 */ "fill_opt ::=",
- /* 412 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
- /* 413 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
- /* 414 */ "fill_mode ::= NONE",
- /* 415 */ "fill_mode ::= PREV",
- /* 416 */ "fill_mode ::= NULL",
- /* 417 */ "fill_mode ::= LINEAR",
- /* 418 */ "fill_mode ::= NEXT",
- /* 419 */ "group_by_clause_opt ::=",
- /* 420 */ "group_by_clause_opt ::= GROUP BY group_by_list",
- /* 421 */ "group_by_list ::= expression",
- /* 422 */ "group_by_list ::= group_by_list NK_COMMA expression",
- /* 423 */ "having_clause_opt ::=",
- /* 424 */ "having_clause_opt ::= HAVING search_condition",
- /* 425 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
- /* 426 */ "query_expression_body ::= query_primary",
- /* 427 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
- /* 428 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
- /* 429 */ "query_primary ::= query_specification",
- /* 430 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
- /* 431 */ "order_by_clause_opt ::=",
- /* 432 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
- /* 433 */ "slimit_clause_opt ::=",
- /* 434 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
- /* 435 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
- /* 436 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 437 */ "limit_clause_opt ::=",
- /* 438 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
- /* 439 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
- /* 440 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 441 */ "subquery ::= NK_LP query_expression NK_RP",
- /* 442 */ "search_condition ::= common_expression",
- /* 443 */ "sort_specification_list ::= sort_specification",
- /* 444 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
- /* 445 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
- /* 446 */ "ordering_specification_opt ::=",
- /* 447 */ "ordering_specification_opt ::= ASC",
- /* 448 */ "ordering_specification_opt ::= DESC",
- /* 449 */ "null_ordering_opt ::=",
- /* 450 */ "null_ordering_opt ::= NULLS FIRST",
- /* 451 */ "null_ordering_opt ::= NULLS LAST",
+ /* 223 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression",
+ /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
+ /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
+ /* 226 */ "cmd ::= DROP TOPIC exists_opt topic_name",
+ /* 227 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
+ /* 228 */ "cmd ::= DESC full_table_name",
+ /* 229 */ "cmd ::= DESCRIBE full_table_name",
+ /* 230 */ "cmd ::= RESET QUERY CACHE",
+ /* 231 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression",
+ /* 232 */ "analyze_opt ::=",
+ /* 233 */ "analyze_opt ::= ANALYZE",
+ /* 234 */ "explain_options ::=",
+ /* 235 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
+ /* 236 */ "explain_options ::= explain_options RATIO NK_FLOAT",
+ /* 237 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP",
+ /* 238 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
+ /* 239 */ "cmd ::= DROP FUNCTION exists_opt function_name",
+ /* 240 */ "agg_func_opt ::=",
+ /* 241 */ "agg_func_opt ::= AGGREGATE",
+ /* 242 */ "bufsize_opt ::=",
+ /* 243 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
+ /* 244 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression",
+ /* 245 */ "cmd ::= DROP STREAM exists_opt stream_name",
+ /* 246 */ "into_opt ::=",
+ /* 247 */ "into_opt ::= INTO full_table_name",
+ /* 248 */ "stream_options ::=",
+ /* 249 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
+ /* 250 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
+ /* 251 */ "stream_options ::= stream_options WATERMARK duration_literal",
+ /* 252 */ "cmd ::= KILL CONNECTION NK_INTEGER",
+ /* 253 */ "cmd ::= KILL QUERY NK_INTEGER",
+ /* 254 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
+ /* 255 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
+ /* 256 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
+ /* 257 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
+ /* 258 */ "dnode_list ::= DNODE NK_INTEGER",
+ /* 259 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
+ /* 260 */ "cmd ::= SYNCDB db_name REPLICA",
+ /* 261 */ "cmd ::= query_expression",
+ /* 262 */ "literal ::= NK_INTEGER",
+ /* 263 */ "literal ::= NK_FLOAT",
+ /* 264 */ "literal ::= NK_STRING",
+ /* 265 */ "literal ::= NK_BOOL",
+ /* 266 */ "literal ::= TIMESTAMP NK_STRING",
+ /* 267 */ "literal ::= duration_literal",
+ /* 268 */ "literal ::= NULL",
+ /* 269 */ "literal ::= NK_QUESTION",
+ /* 270 */ "duration_literal ::= NK_VARIABLE",
+ /* 271 */ "signed ::= NK_INTEGER",
+ /* 272 */ "signed ::= NK_PLUS NK_INTEGER",
+ /* 273 */ "signed ::= NK_MINUS NK_INTEGER",
+ /* 274 */ "signed ::= NK_FLOAT",
+ /* 275 */ "signed ::= NK_PLUS NK_FLOAT",
+ /* 276 */ "signed ::= NK_MINUS NK_FLOAT",
+ /* 277 */ "signed_literal ::= signed",
+ /* 278 */ "signed_literal ::= NK_STRING",
+ /* 279 */ "signed_literal ::= NK_BOOL",
+ /* 280 */ "signed_literal ::= TIMESTAMP NK_STRING",
+ /* 281 */ "signed_literal ::= duration_literal",
+ /* 282 */ "signed_literal ::= NULL",
+ /* 283 */ "signed_literal ::= literal_func",
+ /* 284 */ "literal_list ::= signed_literal",
+ /* 285 */ "literal_list ::= literal_list NK_COMMA signed_literal",
+ /* 286 */ "db_name ::= NK_ID",
+ /* 287 */ "table_name ::= NK_ID",
+ /* 288 */ "column_name ::= NK_ID",
+ /* 289 */ "function_name ::= NK_ID",
+ /* 290 */ "table_alias ::= NK_ID",
+ /* 291 */ "column_alias ::= NK_ID",
+ /* 292 */ "user_name ::= NK_ID",
+ /* 293 */ "index_name ::= NK_ID",
+ /* 294 */ "topic_name ::= NK_ID",
+ /* 295 */ "stream_name ::= NK_ID",
+ /* 296 */ "cgroup_name ::= NK_ID",
+ /* 297 */ "expression ::= literal",
+ /* 298 */ "expression ::= pseudo_column",
+ /* 299 */ "expression ::= column_reference",
+ /* 300 */ "expression ::= function_expression",
+ /* 301 */ "expression ::= subquery",
+ /* 302 */ "expression ::= NK_LP expression NK_RP",
+ /* 303 */ "expression ::= NK_PLUS expression",
+ /* 304 */ "expression ::= NK_MINUS expression",
+ /* 305 */ "expression ::= expression NK_PLUS expression",
+ /* 306 */ "expression ::= expression NK_MINUS expression",
+ /* 307 */ "expression ::= expression NK_STAR expression",
+ /* 308 */ "expression ::= expression NK_SLASH expression",
+ /* 309 */ "expression ::= expression NK_REM expression",
+ /* 310 */ "expression ::= column_reference NK_ARROW NK_STRING",
+ /* 311 */ "expression_list ::= expression",
+ /* 312 */ "expression_list ::= expression_list NK_COMMA expression",
+ /* 313 */ "column_reference ::= column_name",
+ /* 314 */ "column_reference ::= table_name NK_DOT column_name",
+ /* 315 */ "pseudo_column ::= ROWTS",
+ /* 316 */ "pseudo_column ::= TBNAME",
+ /* 317 */ "pseudo_column ::= table_name NK_DOT TBNAME",
+ /* 318 */ "pseudo_column ::= QSTARTTS",
+ /* 319 */ "pseudo_column ::= QENDTS",
+ /* 320 */ "pseudo_column ::= WSTARTTS",
+ /* 321 */ "pseudo_column ::= WENDTS",
+ /* 322 */ "pseudo_column ::= WDURATION",
+ /* 323 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
+ /* 324 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
+ /* 325 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
+ /* 326 */ "function_expression ::= literal_func",
+ /* 327 */ "literal_func ::= noarg_func NK_LP NK_RP",
+ /* 328 */ "literal_func ::= NOW",
+ /* 329 */ "noarg_func ::= NOW",
+ /* 330 */ "noarg_func ::= TODAY",
+ /* 331 */ "noarg_func ::= TIMEZONE",
+ /* 332 */ "star_func ::= COUNT",
+ /* 333 */ "star_func ::= FIRST",
+ /* 334 */ "star_func ::= LAST",
+ /* 335 */ "star_func ::= LAST_ROW",
+ /* 336 */ "star_func_para_list ::= NK_STAR",
+ /* 337 */ "star_func_para_list ::= other_para_list",
+ /* 338 */ "other_para_list ::= star_func_para",
+ /* 339 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
+ /* 340 */ "star_func_para ::= expression",
+ /* 341 */ "star_func_para ::= table_name NK_DOT NK_STAR",
+ /* 342 */ "predicate ::= expression compare_op expression",
+ /* 343 */ "predicate ::= expression BETWEEN expression AND expression",
+ /* 344 */ "predicate ::= expression NOT BETWEEN expression AND expression",
+ /* 345 */ "predicate ::= expression IS NULL",
+ /* 346 */ "predicate ::= expression IS NOT NULL",
+ /* 347 */ "predicate ::= expression in_op in_predicate_value",
+ /* 348 */ "compare_op ::= NK_LT",
+ /* 349 */ "compare_op ::= NK_GT",
+ /* 350 */ "compare_op ::= NK_LE",
+ /* 351 */ "compare_op ::= NK_GE",
+ /* 352 */ "compare_op ::= NK_NE",
+ /* 353 */ "compare_op ::= NK_EQ",
+ /* 354 */ "compare_op ::= LIKE",
+ /* 355 */ "compare_op ::= NOT LIKE",
+ /* 356 */ "compare_op ::= MATCH",
+ /* 357 */ "compare_op ::= NMATCH",
+ /* 358 */ "compare_op ::= CONTAINS",
+ /* 359 */ "in_op ::= IN",
+ /* 360 */ "in_op ::= NOT IN",
+ /* 361 */ "in_predicate_value ::= NK_LP expression_list NK_RP",
+ /* 362 */ "boolean_value_expression ::= boolean_primary",
+ /* 363 */ "boolean_value_expression ::= NOT boolean_primary",
+ /* 364 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
+ /* 365 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
+ /* 366 */ "boolean_primary ::= predicate",
+ /* 367 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
+ /* 368 */ "common_expression ::= expression",
+ /* 369 */ "common_expression ::= boolean_value_expression",
+ /* 370 */ "from_clause ::= FROM table_reference_list",
+ /* 371 */ "table_reference_list ::= table_reference",
+ /* 372 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
+ /* 373 */ "table_reference ::= table_primary",
+ /* 374 */ "table_reference ::= joined_table",
+ /* 375 */ "table_primary ::= table_name alias_opt",
+ /* 376 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
+ /* 377 */ "table_primary ::= subquery alias_opt",
+ /* 378 */ "table_primary ::= parenthesized_joined_table",
+ /* 379 */ "alias_opt ::=",
+ /* 380 */ "alias_opt ::= table_alias",
+ /* 381 */ "alias_opt ::= AS table_alias",
+ /* 382 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
+ /* 383 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
+ /* 384 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
+ /* 385 */ "join_type ::=",
+ /* 386 */ "join_type ::= INNER",
+ /* 387 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
+ /* 388 */ "set_quantifier_opt ::=",
+ /* 389 */ "set_quantifier_opt ::= DISTINCT",
+ /* 390 */ "set_quantifier_opt ::= ALL",
+ /* 391 */ "select_list ::= NK_STAR",
+ /* 392 */ "select_list ::= select_sublist",
+ /* 393 */ "select_sublist ::= select_item",
+ /* 394 */ "select_sublist ::= select_sublist NK_COMMA select_item",
+ /* 395 */ "select_item ::= common_expression",
+ /* 396 */ "select_item ::= common_expression column_alias",
+ /* 397 */ "select_item ::= common_expression AS column_alias",
+ /* 398 */ "select_item ::= table_name NK_DOT NK_STAR",
+ /* 399 */ "where_clause_opt ::=",
+ /* 400 */ "where_clause_opt ::= WHERE search_condition",
+ /* 401 */ "partition_by_clause_opt ::=",
+ /* 402 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
+ /* 403 */ "twindow_clause_opt ::=",
+ /* 404 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
+ /* 405 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
+ /* 406 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
+ /* 407 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
+ /* 408 */ "sliding_opt ::=",
+ /* 409 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
+ /* 410 */ "fill_opt ::=",
+ /* 411 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
+ /* 412 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
+ /* 413 */ "fill_mode ::= NONE",
+ /* 414 */ "fill_mode ::= PREV",
+ /* 415 */ "fill_mode ::= NULL",
+ /* 416 */ "fill_mode ::= LINEAR",
+ /* 417 */ "fill_mode ::= NEXT",
+ /* 418 */ "group_by_clause_opt ::=",
+ /* 419 */ "group_by_clause_opt ::= GROUP BY group_by_list",
+ /* 420 */ "group_by_list ::= expression",
+ /* 421 */ "group_by_list ::= group_by_list NK_COMMA expression",
+ /* 422 */ "having_clause_opt ::=",
+ /* 423 */ "having_clause_opt ::= HAVING search_condition",
+ /* 424 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
+ /* 425 */ "query_expression_body ::= query_primary",
+ /* 426 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
+ /* 427 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
+ /* 428 */ "query_primary ::= query_specification",
+ /* 429 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
+ /* 430 */ "order_by_clause_opt ::=",
+ /* 431 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
+ /* 432 */ "slimit_clause_opt ::=",
+ /* 433 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
+ /* 434 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
+ /* 435 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 436 */ "limit_clause_opt ::=",
+ /* 437 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
+ /* 438 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
+ /* 439 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 440 */ "subquery ::= NK_LP query_expression NK_RP",
+ /* 441 */ "search_condition ::= common_expression",
+ /* 442 */ "sort_specification_list ::= sort_specification",
+ /* 443 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
+ /* 444 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
+ /* 445 */ "ordering_specification_opt ::=",
+ /* 446 */ "ordering_specification_opt ::= ASC",
+ /* 447 */ "ordering_specification_opt ::= DESC",
+ /* 448 */ "null_ordering_opt ::=",
+ /* 449 */ "null_ordering_opt ::= NULLS FIRST",
+ /* 450 */ "null_ordering_opt ::= NULLS LAST",
};
#endif /* NDEBUG */
@@ -2093,174 +2116,174 @@ static void yy_destructor(
*/
/********* Begin destructor definitions ***************************************/
/* Default NON-TERMINAL Destructor */
- case 238: /* cmd */
- case 241: /* literal */
- case 252: /* db_options */
- case 254: /* alter_db_options */
- case 259: /* retention */
- case 260: /* full_table_name */
- case 263: /* table_options */
- case 267: /* alter_table_clause */
- case 268: /* alter_table_options */
- case 271: /* signed_literal */
- case 272: /* create_subtable_clause */
- case 275: /* drop_table_clause */
- case 278: /* column_def */
- case 281: /* col_name */
- case 282: /* db_name_cond_opt */
- case 283: /* like_pattern_opt */
- case 284: /* table_name_cond */
- case 285: /* from_db_opt */
- case 286: /* func_name */
- case 289: /* index_options */
- case 291: /* duration_literal */
- case 292: /* sliding_opt */
- case 293: /* func */
- case 296: /* topic_options */
- case 297: /* query_expression */
- case 299: /* explain_options */
- case 303: /* stream_options */
- case 304: /* into_opt */
- case 306: /* signed */
- case 307: /* literal_func */
- case 310: /* expression */
- case 311: /* pseudo_column */
- case 312: /* column_reference */
- case 313: /* function_expression */
- case 314: /* subquery */
- case 319: /* star_func_para */
- case 320: /* predicate */
- case 323: /* in_predicate_value */
- case 324: /* boolean_value_expression */
- case 325: /* boolean_primary */
- case 326: /* common_expression */
- case 327: /* from_clause */
- case 328: /* table_reference_list */
- case 329: /* table_reference */
- case 330: /* table_primary */
- case 331: /* joined_table */
- case 333: /* parenthesized_joined_table */
- case 335: /* search_condition */
- case 336: /* query_specification */
- case 339: /* where_clause_opt */
- case 341: /* twindow_clause_opt */
- case 343: /* having_clause_opt */
- case 345: /* select_item */
- case 346: /* fill_opt */
- case 349: /* query_expression_body */
- case 351: /* slimit_clause_opt */
- case 352: /* limit_clause_opt */
- case 353: /* query_primary */
- case 355: /* sort_specification */
+ case 237: /* cmd */
+ case 240: /* literal */
+ case 251: /* db_options */
+ case 253: /* alter_db_options */
+ case 258: /* retention */
+ case 259: /* full_table_name */
+ case 262: /* table_options */
+ case 266: /* alter_table_clause */
+ case 267: /* alter_table_options */
+ case 270: /* signed_literal */
+ case 271: /* create_subtable_clause */
+ case 274: /* drop_table_clause */
+ case 277: /* column_def */
+ case 280: /* col_name */
+ case 281: /* db_name_cond_opt */
+ case 282: /* like_pattern_opt */
+ case 283: /* table_name_cond */
+ case 284: /* from_db_opt */
+ case 285: /* func_name */
+ case 288: /* index_options */
+ case 290: /* duration_literal */
+ case 291: /* sliding_opt */
+ case 292: /* func */
+ case 295: /* query_expression */
+ case 298: /* explain_options */
+ case 302: /* stream_options */
+ case 303: /* into_opt */
+ case 305: /* signed */
+ case 306: /* literal_func */
+ case 309: /* expression */
+ case 310: /* pseudo_column */
+ case 311: /* column_reference */
+ case 312: /* function_expression */
+ case 313: /* subquery */
+ case 318: /* star_func_para */
+ case 319: /* predicate */
+ case 322: /* in_predicate_value */
+ case 323: /* boolean_value_expression */
+ case 324: /* boolean_primary */
+ case 325: /* common_expression */
+ case 326: /* from_clause */
+ case 327: /* table_reference_list */
+ case 328: /* table_reference */
+ case 329: /* table_primary */
+ case 330: /* joined_table */
+ case 332: /* parenthesized_joined_table */
+ case 334: /* search_condition */
+ case 335: /* query_specification */
+ case 338: /* where_clause_opt */
+ case 340: /* twindow_clause_opt */
+ case 342: /* having_clause_opt */
+ case 344: /* select_item */
+ case 345: /* fill_opt */
+ case 348: /* query_expression_body */
+ case 350: /* slimit_clause_opt */
+ case 351: /* limit_clause_opt */
+ case 352: /* query_primary */
+ case 354: /* sort_specification */
{
- nodesDestroyNode((yypminor->yy172));
+ nodesDestroyNode((yypminor->yy686));
}
break;
- case 239: /* account_options */
- case 240: /* alter_account_options */
- case 242: /* alter_account_option */
- case 301: /* bufsize_opt */
+ case 238: /* account_options */
+ case 239: /* alter_account_options */
+ case 241: /* alter_account_option */
+ case 300: /* bufsize_opt */
{
}
break;
- case 243: /* user_name */
- case 245: /* priv_level */
- case 248: /* db_name */
- case 249: /* dnode_endpoint */
- case 250: /* dnode_host_name */
- case 269: /* column_name */
- case 277: /* table_name */
- case 287: /* function_name */
- case 288: /* index_name */
- case 295: /* topic_name */
- case 302: /* stream_name */
- case 308: /* table_alias */
- case 309: /* column_alias */
- case 315: /* star_func */
- case 317: /* noarg_func */
- case 332: /* alias_opt */
+ case 242: /* user_name */
+ case 244: /* priv_level */
+ case 247: /* db_name */
+ case 248: /* dnode_endpoint */
+ case 249: /* dnode_host_name */
+ case 268: /* column_name */
+ case 276: /* table_name */
+ case 286: /* function_name */
+ case 287: /* index_name */
+ case 294: /* topic_name */
+ case 296: /* cgroup_name */
+ case 301: /* stream_name */
+ case 307: /* table_alias */
+ case 308: /* column_alias */
+ case 314: /* star_func */
+ case 316: /* noarg_func */
+ case 331: /* alias_opt */
{
}
break;
- case 244: /* privileges */
- case 246: /* priv_type_list */
- case 247: /* priv_type */
+ case 243: /* privileges */
+ case 245: /* priv_type_list */
+ case 246: /* priv_type */
{
}
break;
- case 251: /* not_exists_opt */
- case 253: /* exists_opt */
- case 298: /* analyze_opt */
- case 300: /* agg_func_opt */
- case 337: /* set_quantifier_opt */
+ case 250: /* not_exists_opt */
+ case 252: /* exists_opt */
+ case 297: /* analyze_opt */
+ case 299: /* agg_func_opt */
+ case 336: /* set_quantifier_opt */
{
}
break;
- case 255: /* integer_list */
- case 256: /* variable_list */
- case 257: /* retention_list */
- case 261: /* column_def_list */
- case 262: /* tags_def_opt */
- case 264: /* multi_create_clause */
- case 265: /* tags_def */
- case 266: /* multi_drop_clause */
- case 273: /* specific_tags_opt */
- case 274: /* literal_list */
- case 276: /* col_name_list */
- case 279: /* func_name_list */
- case 290: /* func_list */
- case 294: /* expression_list */
- case 305: /* dnode_list */
- case 316: /* star_func_para_list */
- case 318: /* other_para_list */
- case 338: /* select_list */
- case 340: /* partition_by_clause_opt */
- case 342: /* group_by_clause_opt */
- case 344: /* select_sublist */
- case 348: /* group_by_list */
- case 350: /* order_by_clause_opt */
- case 354: /* sort_specification_list */
+ case 254: /* integer_list */
+ case 255: /* variable_list */
+ case 256: /* retention_list */
+ case 260: /* column_def_list */
+ case 261: /* tags_def_opt */
+ case 263: /* multi_create_clause */
+ case 264: /* tags_def */
+ case 265: /* multi_drop_clause */
+ case 272: /* specific_tags_opt */
+ case 273: /* literal_list */
+ case 275: /* col_name_list */
+ case 278: /* func_name_list */
+ case 289: /* func_list */
+ case 293: /* expression_list */
+ case 304: /* dnode_list */
+ case 315: /* star_func_para_list */
+ case 317: /* other_para_list */
+ case 337: /* select_list */
+ case 339: /* partition_by_clause_opt */
+ case 341: /* group_by_clause_opt */
+ case 343: /* select_sublist */
+ case 347: /* group_by_list */
+ case 349: /* order_by_clause_opt */
+ case 353: /* sort_specification_list */
{
- nodesDestroyList((yypminor->yy60));
+ nodesDestroyList((yypminor->yy670));
}
break;
- case 258: /* alter_db_option */
- case 280: /* alter_table_option */
+ case 257: /* alter_db_option */
+ case 279: /* alter_table_option */
{
}
break;
- case 270: /* type_name */
+ case 269: /* type_name */
{
}
break;
- case 321: /* compare_op */
- case 322: /* in_op */
+ case 320: /* compare_op */
+ case 321: /* in_op */
{
}
break;
- case 334: /* join_type */
+ case 333: /* join_type */
{
}
break;
- case 347: /* fill_mode */
+ case 346: /* fill_mode */
{
}
break;
- case 356: /* ordering_specification_opt */
+ case 355: /* ordering_specification_opt */
{
}
break;
- case 357: /* null_ordering_opt */
+ case 356: /* null_ordering_opt */
{
}
@@ -2388,15 +2411,18 @@ static YYACTIONTYPE yy_find_shift_action(
do{
i = yy_shift_ofst[stateno];
assert( i>=0 );
- /* assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); */
+ assert( i<=YY_ACTTAB_COUNT );
+ assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD );
assert( iLookAhead!=YYNOCODE );
assert( iLookAhead < YYNTOKEN );
i += iLookAhead;
- if( i>=YY_NLOOKAHEAD || yy_lookahead[i]!=iLookAhead ){
+ assert( i<(int)YY_NLOOKAHEAD );
+ if( yy_lookahead[i]!=iLookAhead ){
#ifdef YYFALLBACK
YYCODETYPE iFallback; /* Fallback token */
- if( iLookAhead %s\n",
@@ -2411,16 +2437,8 @@ static YYACTIONTYPE yy_find_shift_action(
#ifdef YYWILDCARD
{
int j = i - iLookAhead + YYWILDCARD;
- if(
-#if YY_SHIFT_MIN+YYWILDCARD<0
- j>=0 &&
-#endif
-#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT
- j0
- ){
+ assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) );
+ if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
@@ -2434,6 +2452,7 @@ static YYACTIONTYPE yy_find_shift_action(
#endif /* YYWILDCARD */
return yy_default[stateno];
}else{
+ assert( i>=0 && iyytos;
#ifndef NDEBUG
if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
- yysize = yyRuleInfo[yyruleno].nrhs;
+ yysize = yyRuleInfoNRhs[yyruleno];
if( yysize ){
- fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n",
+ fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
yyTracePrompt,
- yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno);
+ yyruleno, yyRuleName[yyruleno],
+ yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){
yypParser->yyhwm++;
@@ -3099,11 +3572,11 @@ static YYACTIONTYPE yy_reduce(
YYMINORTYPE yylhsminor;
case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,239,&yymsp[0].minor);
+ yy_destructor(yypParser,238,&yymsp[0].minor);
break;
case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,240,&yymsp[0].minor);
+ yy_destructor(yypParser,239,&yymsp[0].minor);
break;
case 2: /* account_options ::= */
{ }
@@ -3117,20 +3590,20 @@ static YYACTIONTYPE yy_reduce(
case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9);
case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10);
case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11);
-{ yy_destructor(yypParser,239,&yymsp[-2].minor);
+{ yy_destructor(yypParser,238,&yymsp[-2].minor);
{ }
- yy_destructor(yypParser,241,&yymsp[0].minor);
+ yy_destructor(yypParser,240,&yymsp[0].minor);
}
break;
case 12: /* alter_account_options ::= alter_account_option */
-{ yy_destructor(yypParser,242,&yymsp[0].minor);
+{ yy_destructor(yypParser,241,&yymsp[0].minor);
{ }
}
break;
case 13: /* alter_account_options ::= alter_account_options alter_account_option */
-{ yy_destructor(yypParser,240,&yymsp[-1].minor);
+{ yy_destructor(yypParser,239,&yymsp[-1].minor);
{ }
- yy_destructor(yypParser,242,&yymsp[0].minor);
+ yy_destructor(yypParser,241,&yymsp[0].minor);
}
break;
case 14: /* alter_account_option ::= PASS literal */
@@ -3144,63 +3617,63 @@ static YYACTIONTYPE yy_reduce(
case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22);
case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23);
{ }
- yy_destructor(yypParser,241,&yymsp[0].minor);
+ yy_destructor(yypParser,240,&yymsp[0].minor);
break;
case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING */
-{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); }
break;
case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy105, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
break;
case 26: /* cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy105, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); }
break;
case 27: /* cmd ::= DROP USER user_name */
-{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy113); }
break;
case 28: /* cmd ::= GRANT privileges ON priv_level TO user_name */
-{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy593, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); }
break;
case 29: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */
-{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy593, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); }
break;
case 30: /* privileges ::= ALL */
-{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_ALL; }
+{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_ALL; }
break;
case 31: /* privileges ::= priv_type_list */
case 32: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==32);
-{ yylhsminor.yy593 = yymsp[0].minor.yy593; }
- yymsp[0].minor.yy593 = yylhsminor.yy593;
+{ yylhsminor.yy123 = yymsp[0].minor.yy123; }
+ yymsp[0].minor.yy123 = yylhsminor.yy123;
break;
case 33: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */
-{ yylhsminor.yy593 = yymsp[-2].minor.yy593 | yymsp[0].minor.yy593; }
- yymsp[-2].minor.yy593 = yylhsminor.yy593;
+{ yylhsminor.yy123 = yymsp[-2].minor.yy123 | yymsp[0].minor.yy123; }
+ yymsp[-2].minor.yy123 = yylhsminor.yy123;
break;
case 34: /* priv_type ::= READ */
-{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_READ; }
+{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_READ; }
break;
case 35: /* priv_type ::= WRITE */
-{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_WRITE; }
+{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_WRITE; }
break;
case 36: /* priv_level ::= NK_STAR NK_DOT NK_STAR */
-{ yylhsminor.yy105 = yymsp[-2].minor.yy0; }
- yymsp[-2].minor.yy105 = yylhsminor.yy105;
+{ yylhsminor.yy113 = yymsp[-2].minor.yy0; }
+ yymsp[-2].minor.yy113 = yylhsminor.yy113;
break;
case 37: /* priv_level ::= db_name NK_DOT NK_STAR */
-{ yylhsminor.yy105 = yymsp[-2].minor.yy105; }
- yymsp[-2].minor.yy105 = yylhsminor.yy105;
+{ yylhsminor.yy113 = yymsp[-2].minor.yy113; }
+ yymsp[-2].minor.yy113 = yylhsminor.yy113;
break;
case 38: /* cmd ::= CREATE DNODE dnode_endpoint */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy105, NULL); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy113, NULL); }
break;
case 39: /* cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); }
break;
case 40: /* cmd ::= DROP DNODE NK_INTEGER */
{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); }
break;
case 41: /* cmd ::= DROP DNODE dnode_endpoint */
-{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy113); }
break;
case 42: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); }
@@ -3217,25 +3690,26 @@ static YYACTIONTYPE yy_reduce(
case 46: /* dnode_endpoint ::= NK_STRING */
case 47: /* dnode_host_name ::= NK_ID */ yytestcase(yyruleno==47);
case 48: /* dnode_host_name ::= NK_IPTOKEN */ yytestcase(yyruleno==48);
- case 288: /* db_name ::= NK_ID */ yytestcase(yyruleno==288);
- case 289: /* table_name ::= NK_ID */ yytestcase(yyruleno==289);
- case 290: /* column_name ::= NK_ID */ yytestcase(yyruleno==290);
- case 291: /* function_name ::= NK_ID */ yytestcase(yyruleno==291);
- case 292: /* table_alias ::= NK_ID */ yytestcase(yyruleno==292);
- case 293: /* column_alias ::= NK_ID */ yytestcase(yyruleno==293);
- case 294: /* user_name ::= NK_ID */ yytestcase(yyruleno==294);
- case 295: /* index_name ::= NK_ID */ yytestcase(yyruleno==295);
- case 296: /* topic_name ::= NK_ID */ yytestcase(yyruleno==296);
- case 297: /* stream_name ::= NK_ID */ yytestcase(yyruleno==297);
- case 330: /* noarg_func ::= NOW */ yytestcase(yyruleno==330);
- case 331: /* noarg_func ::= TODAY */ yytestcase(yyruleno==331);
- case 332: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==332);
- case 333: /* star_func ::= COUNT */ yytestcase(yyruleno==333);
- case 334: /* star_func ::= FIRST */ yytestcase(yyruleno==334);
- case 335: /* star_func ::= LAST */ yytestcase(yyruleno==335);
- case 336: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==336);
-{ yylhsminor.yy105 = yymsp[0].minor.yy0; }
- yymsp[0].minor.yy105 = yylhsminor.yy105;
+ case 286: /* db_name ::= NK_ID */ yytestcase(yyruleno==286);
+ case 287: /* table_name ::= NK_ID */ yytestcase(yyruleno==287);
+ case 288: /* column_name ::= NK_ID */ yytestcase(yyruleno==288);
+ case 289: /* function_name ::= NK_ID */ yytestcase(yyruleno==289);
+ case 290: /* table_alias ::= NK_ID */ yytestcase(yyruleno==290);
+ case 291: /* column_alias ::= NK_ID */ yytestcase(yyruleno==291);
+ case 292: /* user_name ::= NK_ID */ yytestcase(yyruleno==292);
+ case 293: /* index_name ::= NK_ID */ yytestcase(yyruleno==293);
+ case 294: /* topic_name ::= NK_ID */ yytestcase(yyruleno==294);
+ case 295: /* stream_name ::= NK_ID */ yytestcase(yyruleno==295);
+ case 296: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==296);
+ case 329: /* noarg_func ::= NOW */ yytestcase(yyruleno==329);
+ case 330: /* noarg_func ::= TODAY */ yytestcase(yyruleno==330);
+ case 331: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==331);
+ case 332: /* star_func ::= COUNT */ yytestcase(yyruleno==332);
+ case 333: /* star_func ::= FIRST */ yytestcase(yyruleno==333);
+ case 334: /* star_func ::= LAST */ yytestcase(yyruleno==334);
+ case 335: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==335);
+{ yylhsminor.yy113 = yymsp[0].minor.yy0; }
+ yymsp[0].minor.yy113 = yylhsminor.yy113;
break;
case 49: /* cmd ::= ALTER LOCAL NK_STRING */
{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); }
@@ -3268,400 +3742,400 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); }
break;
case 59: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
-{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy617, &yymsp[-1].minor.yy105, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy131, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); }
break;
case 60: /* cmd ::= DROP DATABASE exists_opt db_name */
-{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); }
break;
case 61: /* cmd ::= USE db_name */
-{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy113); }
break;
case 62: /* cmd ::= ALTER DATABASE db_name alter_db_options */
-{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy105, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); }
break;
case 63: /* not_exists_opt ::= IF NOT EXISTS */
-{ yymsp[-2].minor.yy617 = true; }
+{ yymsp[-2].minor.yy131 = true; }
break;
case 64: /* not_exists_opt ::= */
case 66: /* exists_opt ::= */ yytestcase(yyruleno==66);
- case 234: /* analyze_opt ::= */ yytestcase(yyruleno==234);
- case 242: /* agg_func_opt ::= */ yytestcase(yyruleno==242);
- case 389: /* set_quantifier_opt ::= */ yytestcase(yyruleno==389);
-{ yymsp[1].minor.yy617 = false; }
+ case 232: /* analyze_opt ::= */ yytestcase(yyruleno==232);
+ case 240: /* agg_func_opt ::= */ yytestcase(yyruleno==240);
+ case 388: /* set_quantifier_opt ::= */ yytestcase(yyruleno==388);
+{ yymsp[1].minor.yy131 = false; }
break;
case 65: /* exists_opt ::= IF EXISTS */
-{ yymsp[-1].minor.yy617 = true; }
+{ yymsp[-1].minor.yy131 = true; }
break;
case 67: /* db_options ::= */
-{ yymsp[1].minor.yy172 = createDefaultDatabaseOptions(pCxt); }
+{ yymsp[1].minor.yy686 = createDefaultDatabaseOptions(pCxt); }
break;
case 68: /* db_options ::= db_options BUFFER NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 69: /* db_options ::= db_options CACHELAST NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 70: /* db_options ::= db_options COMP NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 71: /* db_options ::= db_options DAYS NK_INTEGER */
case 72: /* db_options ::= db_options DAYS NK_VARIABLE */ yytestcase(yyruleno==72);
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 73: /* db_options ::= db_options FSYNC NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 74: /* db_options ::= db_options MAXROWS NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 75: /* db_options ::= db_options MINROWS NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 76: /* db_options ::= db_options KEEP integer_list */
case 77: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==77);
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_KEEP, yymsp[0].minor.yy60); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_KEEP, yymsp[0].minor.yy670); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 78: /* db_options ::= db_options PAGES NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 79: /* db_options ::= db_options PAGESIZE NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 80: /* db_options ::= db_options PRECISION NK_STRING */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 81: /* db_options ::= db_options REPLICA NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 82: /* db_options ::= db_options STRICT NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 83: /* db_options ::= db_options WAL NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 84: /* db_options ::= db_options VGROUPS NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 85: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 86: /* db_options ::= db_options RETENTIONS retention_list */
-{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_RETENTIONS, yymsp[0].minor.yy60); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_RETENTIONS, yymsp[0].minor.yy670); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 87: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
+{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 87: /* alter_db_options ::= alter_db_option */
-{ yylhsminor.yy172 = createAlterDatabaseOptions(pCxt); yylhsminor.yy172 = setAlterDatabaseOption(pCxt, yylhsminor.yy172, &yymsp[0].minor.yy609); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 88: /* alter_db_options ::= alter_db_option */
+{ yylhsminor.yy686 = createAlterDatabaseOptions(pCxt); yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yylhsminor.yy686, &yymsp[0].minor.yy53); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 88: /* alter_db_options ::= alter_db_options alter_db_option */
-{ yylhsminor.yy172 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy172, &yymsp[0].minor.yy609); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 89: /* alter_db_options ::= alter_db_options alter_db_option */
+{ yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy686, &yymsp[0].minor.yy53); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 89: /* alter_db_option ::= BUFFER NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 90: /* alter_db_option ::= BUFFER NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 90: /* alter_db_option ::= CACHELAST NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 91: /* alter_db_option ::= CACHELAST NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 91: /* alter_db_option ::= FSYNC NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 92: /* alter_db_option ::= FSYNC NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 92: /* alter_db_option ::= KEEP integer_list */
- case 93: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==93);
-{ yymsp[-1].minor.yy609.type = DB_OPTION_KEEP; yymsp[-1].minor.yy609.pList = yymsp[0].minor.yy60; }
+ case 93: /* alter_db_option ::= KEEP integer_list */
+ case 94: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==94);
+{ yymsp[-1].minor.yy53.type = DB_OPTION_KEEP; yymsp[-1].minor.yy53.pList = yymsp[0].minor.yy670; }
break;
- case 94: /* alter_db_option ::= PAGES NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_PAGES; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 95: /* alter_db_option ::= PAGES NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_PAGES; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 95: /* alter_db_option ::= REPLICA NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 96: /* alter_db_option ::= REPLICA NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 96: /* alter_db_option ::= STRICT NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_STRICT; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 97: /* alter_db_option ::= STRICT NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_STRICT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 97: /* alter_db_option ::= WAL NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = DB_OPTION_WAL; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+ case 98: /* alter_db_option ::= WAL NK_INTEGER */
+{ yymsp[-1].minor.yy53.type = DB_OPTION_WAL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
- case 98: /* integer_list ::= NK_INTEGER */
-{ yylhsminor.yy60 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
+ case 99: /* integer_list ::= NK_INTEGER */
+{ yylhsminor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
break;
- case 99: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
- case 261: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==261);
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy60 = yylhsminor.yy60;
+ case 100: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
+ case 259: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==259);
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy670 = yylhsminor.yy670;
break;
- case 100: /* variable_list ::= NK_VARIABLE */
-{ yylhsminor.yy60 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
+ case 101: /* variable_list ::= NK_VARIABLE */
+{ yylhsminor.yy670 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
break;
- case 101: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy60 = yylhsminor.yy60;
+ case 102: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy670 = yylhsminor.yy670;
break;
- case 102: /* retention_list ::= retention */
- case 122: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==122);
- case 125: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==125);
- case 132: /* column_def_list ::= column_def */ yytestcase(yyruleno==132);
+ case 103: /* retention_list ::= retention */
+ case 123: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==123);
+ case 126: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==126);
+ case 133: /* column_def_list ::= column_def */ yytestcase(yyruleno==133);
case 173: /* col_name_list ::= col_name */ yytestcase(yyruleno==173);
case 211: /* func_name_list ::= func_name */ yytestcase(yyruleno==211);
case 220: /* func_list ::= func */ yytestcase(yyruleno==220);
- case 286: /* literal_list ::= signed_literal */ yytestcase(yyruleno==286);
- case 339: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==339);
- case 394: /* select_sublist ::= select_item */ yytestcase(yyruleno==394);
- case 443: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==443);
-{ yylhsminor.yy60 = createNodeList(pCxt, yymsp[0].minor.yy172); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
- break;
- case 103: /* retention_list ::= retention_list NK_COMMA retention */
- case 133: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==133);
+ case 284: /* literal_list ::= signed_literal */ yytestcase(yyruleno==284);
+ case 338: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==338);
+ case 393: /* select_sublist ::= select_item */ yytestcase(yyruleno==393);
+ case 442: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==442);
+{ yylhsminor.yy670 = createNodeList(pCxt, yymsp[0].minor.yy686); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
+ break;
+ case 104: /* retention_list ::= retention_list NK_COMMA retention */
+ case 134: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==134);
case 174: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==174);
case 212: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==212);
case 221: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==221);
- case 287: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==287);
- case 340: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==340);
- case 395: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==395);
- case 444: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==444);
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy60 = yylhsminor.yy60;
- break;
- case 104: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
-{ yylhsminor.yy172 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 285: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==285);
+ case 339: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==339);
+ case 394: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==394);
+ case 443: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==443);
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); }
+ yymsp[-2].minor.yy670 = yylhsminor.yy670;
+ break;
+ case 105: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
+{ yylhsminor.yy686 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 105: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
- case 107: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==107);
-{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy617, yymsp[-5].minor.yy172, yymsp[-3].minor.yy60, yymsp[-1].minor.yy60, yymsp[0].minor.yy172); }
+ case 106: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
+ case 108: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==108);
+{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-5].minor.yy686, yymsp[-3].minor.yy670, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); }
break;
- case 106: /* cmd ::= CREATE TABLE multi_create_clause */
-{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy60); }
+ case 107: /* cmd ::= CREATE TABLE multi_create_clause */
+{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy670); }
break;
- case 108: /* cmd ::= DROP TABLE multi_drop_clause */
-{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy60); }
+ case 109: /* cmd ::= DROP TABLE multi_drop_clause */
+{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy670); }
break;
- case 109: /* cmd ::= DROP STABLE exists_opt full_table_name */
-{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy617, yymsp[0].minor.yy172); }
+ case 110: /* cmd ::= DROP STABLE exists_opt full_table_name */
+{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); }
break;
- case 110: /* cmd ::= ALTER TABLE alter_table_clause */
- case 111: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==111);
- case 263: /* cmd ::= query_expression */ yytestcase(yyruleno==263);
-{ pCxt->pRootNode = yymsp[0].minor.yy172; }
+ case 111: /* cmd ::= ALTER TABLE alter_table_clause */
+ case 112: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==112);
+ case 261: /* cmd ::= query_expression */ yytestcase(yyruleno==261);
+{ pCxt->pRootNode = yymsp[0].minor.yy686; }
break;
- case 112: /* alter_table_clause ::= full_table_name alter_table_options */
-{ yylhsminor.yy172 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 113: /* alter_table_clause ::= full_table_name alter_table_options */
+{ yylhsminor.yy686 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 113: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
-{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 114: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
+{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 114: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
-{ yylhsminor.yy172 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy172, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy105); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ case 115: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
+{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy113); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 115: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
-{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 116: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
+{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 116: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
-{ yylhsminor.yy172 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 117: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
+{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 117: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
-{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 118: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
+{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 118: /* alter_table_clause ::= full_table_name DROP TAG column_name */
-{ yylhsminor.yy172 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy172, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy105); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ case 119: /* alter_table_clause ::= full_table_name DROP TAG column_name */
+{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy113); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 119: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
-{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 120: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
+{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 120: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
-{ yylhsminor.yy172 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ case 121: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
+{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 121: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
-{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, yymsp[0].minor.yy172); }
- yymsp[-5].minor.yy172 = yylhsminor.yy172;
+ case 122: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
+{ yylhsminor.yy686 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy686, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686); }
+ yymsp[-5].minor.yy686 = yylhsminor.yy686;
break;
- case 123: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
- case 126: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==126);
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-1].minor.yy60, yymsp[0].minor.yy172); }
- yymsp[-1].minor.yy60 = yylhsminor.yy60;
+ case 124: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
+ case 127: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==127);
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); }
+ yymsp[-1].minor.yy670 = yylhsminor.yy670;
break;
- case 124: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */
-{ yylhsminor.yy172 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy617, yymsp[-8].minor.yy172, yymsp[-6].minor.yy172, yymsp[-5].minor.yy60, yymsp[-2].minor.yy60, yymsp[0].minor.yy172); }
- yymsp[-9].minor.yy172 = yylhsminor.yy172;
+ case 125: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */
+{ yylhsminor.yy686 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy131, yymsp[-8].minor.yy686, yymsp[-6].minor.yy686, yymsp[-5].minor.yy670, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); }
+ yymsp[-9].minor.yy686 = yylhsminor.yy686;
break;
- case 127: /* drop_table_clause ::= exists_opt full_table_name */
-{ yylhsminor.yy172 = createDropTableClause(pCxt, yymsp[-1].minor.yy617, yymsp[0].minor.yy172); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 128: /* drop_table_clause ::= exists_opt full_table_name */
+{ yylhsminor.yy686 = createDropTableClause(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 128: /* specific_tags_opt ::= */
- case 159: /* tags_def_opt ::= */ yytestcase(yyruleno==159);
- case 402: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==402);
- case 419: /* group_by_clause_opt ::= */ yytestcase(yyruleno==419);
- case 431: /* order_by_clause_opt ::= */ yytestcase(yyruleno==431);
-{ yymsp[1].minor.yy60 = NULL; }
+ case 129: /* specific_tags_opt ::= */
+ case 160: /* tags_def_opt ::= */ yytestcase(yyruleno==160);
+ case 401: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==401);
+ case 418: /* group_by_clause_opt ::= */ yytestcase(yyruleno==418);
+ case 430: /* order_by_clause_opt ::= */ yytestcase(yyruleno==430);
+{ yymsp[1].minor.yy670 = NULL; }
break;
- case 129: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */
-{ yymsp[-2].minor.yy60 = yymsp[-1].minor.yy60; }
+ case 130: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */
+{ yymsp[-2].minor.yy670 = yymsp[-1].minor.yy670; }
break;
- case 130: /* full_table_name ::= table_name */
-{ yylhsminor.yy172 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy105, NULL); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 131: /* full_table_name ::= table_name */
+{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy113, NULL); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 131: /* full_table_name ::= db_name NK_DOT table_name */
-{ yylhsminor.yy172 = createRealTableNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105, NULL); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 132: /* full_table_name ::= db_name NK_DOT table_name */
+{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, NULL); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 134: /* column_def ::= column_name type_name */
-{ yylhsminor.yy172 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248, NULL); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 135: /* column_def ::= column_name type_name */
+{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490, NULL); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 135: /* column_def ::= column_name type_name COMMENT NK_STRING */
-{ yylhsminor.yy172 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-2].minor.yy248, &yymsp[0].minor.yy0); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ case 136: /* column_def ::= column_name type_name COMMENT NK_STRING */
+{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-2].minor.yy490, &yymsp[0].minor.yy0); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 136: /* type_name ::= BOOL */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BOOL); }
+ case 137: /* type_name ::= BOOL */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BOOL); }
break;
- case 137: /* type_name ::= TINYINT */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_TINYINT); }
+ case 138: /* type_name ::= TINYINT */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TINYINT); }
break;
- case 138: /* type_name ::= SMALLINT */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
+ case 139: /* type_name ::= SMALLINT */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
break;
- case 139: /* type_name ::= INT */
- case 140: /* type_name ::= INTEGER */ yytestcase(yyruleno==140);
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_INT); }
+ case 140: /* type_name ::= INT */
+ case 141: /* type_name ::= INTEGER */ yytestcase(yyruleno==141);
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_INT); }
break;
- case 141: /* type_name ::= BIGINT */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BIGINT); }
+ case 142: /* type_name ::= BIGINT */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BIGINT); }
break;
- case 142: /* type_name ::= FLOAT */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_FLOAT); }
+ case 143: /* type_name ::= FLOAT */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_FLOAT); }
break;
- case 143: /* type_name ::= DOUBLE */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
+ case 144: /* type_name ::= DOUBLE */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
break;
- case 144: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
+ case 145: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
break;
- case 145: /* type_name ::= TIMESTAMP */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
+ case 146: /* type_name ::= TIMESTAMP */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
break;
- case 146: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
+ case 147: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
break;
- case 147: /* type_name ::= TINYINT UNSIGNED */
-{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
+ case 148: /* type_name ::= TINYINT UNSIGNED */
+{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
break;
- case 148: /* type_name ::= SMALLINT UNSIGNED */
-{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
+ case 149: /* type_name ::= SMALLINT UNSIGNED */
+{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
break;
- case 149: /* type_name ::= INT UNSIGNED */
-{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UINT); }
+ case 150: /* type_name ::= INT UNSIGNED */
+{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UINT); }
break;
- case 150: /* type_name ::= BIGINT UNSIGNED */
-{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
+ case 151: /* type_name ::= BIGINT UNSIGNED */
+{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
break;
- case 151: /* type_name ::= JSON */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_JSON); }
+ case 152: /* type_name ::= JSON */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_JSON); }
break;
- case 152: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
+ case 153: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
break;
- case 153: /* type_name ::= MEDIUMBLOB */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
+ case 154: /* type_name ::= MEDIUMBLOB */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
break;
- case 154: /* type_name ::= BLOB */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BLOB); }
+ case 155: /* type_name ::= BLOB */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BLOB); }
break;
- case 155: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
+ case 156: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
break;
- case 156: /* type_name ::= DECIMAL */
-{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 157: /* type_name ::= DECIMAL */
+{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 157: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
-{ yymsp[-5].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 159: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
+{ yymsp[-5].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 160: /* tags_def_opt ::= tags_def */
- case 338: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==338);
- case 393: /* select_list ::= select_sublist */ yytestcase(yyruleno==393);
-{ yylhsminor.yy60 = yymsp[0].minor.yy60; }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
+ case 161: /* tags_def_opt ::= tags_def */
+ case 337: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==337);
+ case 392: /* select_list ::= select_sublist */ yytestcase(yyruleno==392);
+{ yylhsminor.yy670 = yymsp[0].minor.yy670; }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
break;
- case 161: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
-{ yymsp[-3].minor.yy60 = yymsp[-1].minor.yy60; }
+ case 162: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
+{ yymsp[-3].minor.yy670 = yymsp[-1].minor.yy670; }
break;
- case 162: /* table_options ::= */
-{ yymsp[1].minor.yy172 = createDefaultTableOptions(pCxt); }
+ case 163: /* table_options ::= */
+{ yymsp[1].minor.yy686 = createDefaultTableOptions(pCxt); }
break;
- case 163: /* table_options ::= table_options COMMENT NK_STRING */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 164: /* table_options ::= table_options DELAY NK_INTEGER */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_DELAY, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 164: /* table_options ::= table_options COMMENT NK_STRING */
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 165: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 166: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-4].minor.yy172, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy60); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy670); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
case 167: /* table_options ::= table_options TTL NK_INTEGER */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
case 168: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-4].minor.yy172, TABLE_OPTION_SMA, yymsp[-1].minor.yy60); }
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_SMA, yymsp[-1].minor.yy670); }
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
case 169: /* alter_table_options ::= alter_table_option */
-{ yylhsminor.yy172 = createAlterTableOptions(pCxt); yylhsminor.yy172 = setTableOption(pCxt, yylhsminor.yy172, yymsp[0].minor.yy609.type, &yymsp[0].minor.yy609.val); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = createAlterTableOptions(pCxt); yylhsminor.yy686 = setTableOption(pCxt, yylhsminor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
case 170: /* alter_table_options ::= alter_table_options alter_table_option */
-{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy609.type, &yymsp[0].minor.yy609.val); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
case 171: /* alter_table_option ::= COMMENT NK_STRING */
-{ yymsp[-1].minor.yy609.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy53.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
case 172: /* alter_table_option ::= TTL NK_INTEGER */
-{ yymsp[-1].minor.yy609.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy53.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; }
break;
case 175: /* col_name ::= column_name */
-{ yylhsminor.yy172 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy105); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
case 176: /* cmd ::= SHOW DNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL, NULL); }
@@ -3673,13 +4147,13 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL, NULL); }
break;
case 179: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); }
break;
case 180: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); }
break;
case 181: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
-{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy172, NULL); }
+{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy686, NULL); }
break;
case 182: /* cmd ::= SHOW MNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL, NULL); }
@@ -3694,7 +4168,7 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT, NULL, NULL); }
break;
case 186: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); }
break;
case 187: /* cmd ::= SHOW STREAMS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT, NULL, NULL); }
@@ -3713,13 +4187,13 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCE_STMT, NULL, NULL); }
break;
case 193: /* cmd ::= SHOW CREATE DATABASE db_name */
-{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy113); }
break;
case 194: /* cmd ::= SHOW CREATE TABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy686); }
break;
case 195: /* cmd ::= SHOW CREATE STABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy686); }
break;
case 196: /* cmd ::= SHOW QUERIES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT, NULL, NULL); }
@@ -3747,683 +4221,674 @@ static YYACTIONTYPE yy_reduce(
break;
case 204: /* db_name_cond_opt ::= */
case 209: /* from_db_opt ::= */ yytestcase(yyruleno==209);
-{ yymsp[1].minor.yy172 = createDefaultDatabaseCondValue(pCxt); }
+{ yymsp[1].minor.yy686 = createDefaultDatabaseCondValue(pCxt); }
break;
case 205: /* db_name_cond_opt ::= db_name NK_DOT */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy105); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy113); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
case 206: /* like_pattern_opt ::= */
case 217: /* index_options ::= */ yytestcase(yyruleno==217);
- case 248: /* into_opt ::= */ yytestcase(yyruleno==248);
- case 400: /* where_clause_opt ::= */ yytestcase(yyruleno==400);
- case 404: /* twindow_clause_opt ::= */ yytestcase(yyruleno==404);
- case 409: /* sliding_opt ::= */ yytestcase(yyruleno==409);
- case 411: /* fill_opt ::= */ yytestcase(yyruleno==411);
- case 423: /* having_clause_opt ::= */ yytestcase(yyruleno==423);
- case 433: /* slimit_clause_opt ::= */ yytestcase(yyruleno==433);
- case 437: /* limit_clause_opt ::= */ yytestcase(yyruleno==437);
-{ yymsp[1].minor.yy172 = NULL; }
+ case 246: /* into_opt ::= */ yytestcase(yyruleno==246);
+ case 399: /* where_clause_opt ::= */ yytestcase(yyruleno==399);
+ case 403: /* twindow_clause_opt ::= */ yytestcase(yyruleno==403);
+ case 408: /* sliding_opt ::= */ yytestcase(yyruleno==408);
+ case 410: /* fill_opt ::= */ yytestcase(yyruleno==410);
+ case 422: /* having_clause_opt ::= */ yytestcase(yyruleno==422);
+ case 432: /* slimit_clause_opt ::= */ yytestcase(yyruleno==432);
+ case 436: /* limit_clause_opt ::= */ yytestcase(yyruleno==436);
+{ yymsp[1].minor.yy686 = NULL; }
break;
case 207: /* like_pattern_opt ::= LIKE NK_STRING */
-{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
break;
case 208: /* table_name_cond ::= table_name */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy105); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
case 210: /* from_db_opt ::= FROM db_name */
-{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy105); }
+{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); }
break;
case 213: /* func_name ::= function_name */
-{ yylhsminor.yy172 = createFunctionNode(pCxt, &yymsp[0].minor.yy105, NULL); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[0].minor.yy113, NULL); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
case 214: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */
-{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, &yymsp[-1].minor.yy105, NULL, yymsp[0].minor.yy172); }
+{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, NULL, yymsp[0].minor.yy686); }
break;
case 215: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */
-{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy617, &yymsp[-5].minor.yy105, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60, NULL); }
+{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670, NULL); }
break;
case 216: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */
-{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy617, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); }
+{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); }
break;
case 218: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */
-{ yymsp[-8].minor.yy172 = createIndexOption(pCxt, yymsp[-6].minor.yy60, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), NULL, yymsp[0].minor.yy172); }
+{ yymsp[-8].minor.yy686 = createIndexOption(pCxt, yymsp[-6].minor.yy670, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL, yymsp[0].minor.yy686); }
break;
case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */
-{ yymsp[-10].minor.yy172 = createIndexOption(pCxt, yymsp[-8].minor.yy60, releaseRawExprNode(pCxt, yymsp[-4].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), yymsp[0].minor.yy172); }
+{ yymsp[-10].minor.yy686 = createIndexOption(pCxt, yymsp[-8].minor.yy670, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[0].minor.yy686); }
break;
case 222: /* func ::= function_name NK_LP expression_list NK_RP */
-{ yylhsminor.yy172 = createFunctionNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
- break;
- case 223: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */
-{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, yymsp[0].minor.yy172, NULL, yymsp[-2].minor.yy172); }
- break;
- case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */
-{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, NULL, &yymsp[0].minor.yy105, yymsp[-2].minor.yy172); }
+{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 225: /* cmd ::= DROP TOPIC exists_opt topic_name */
-{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); }
+ case 223: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
+{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686, NULL, NULL); }
break;
- case 226: /* topic_options ::= */
-{ yymsp[1].minor.yy172 = createTopicOptions(pCxt); }
+ case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
+{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, &yymsp[0].minor.yy113, NULL); }
break;
- case 227: /* topic_options ::= topic_options WITH TABLE */
-{ ((STopicOptions*)yymsp[-2].minor.yy172)->withTable = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
+{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, NULL, yymsp[0].minor.yy686); }
break;
- case 228: /* topic_options ::= topic_options WITH SCHEMA */
-{ ((STopicOptions*)yymsp[-2].minor.yy172)->withSchema = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */
+{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); }
break;
- case 229: /* topic_options ::= topic_options WITH TAG */
-{ ((STopicOptions*)yymsp[-2].minor.yy172)->withTag = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 227: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
+{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); }
break;
- case 230: /* cmd ::= DESC full_table_name */
- case 231: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==231);
-{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy172); }
+ case 228: /* cmd ::= DESC full_table_name */
+ case 229: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==229);
+{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy686); }
break;
- case 232: /* cmd ::= RESET QUERY CACHE */
+ case 230: /* cmd ::= RESET QUERY CACHE */
{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
break;
- case 233: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
-{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy617, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); }
+ case 231: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
+{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy131, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); }
break;
- case 235: /* analyze_opt ::= ANALYZE */
- case 243: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==243);
- case 390: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==390);
-{ yymsp[0].minor.yy617 = true; }
+ case 233: /* analyze_opt ::= ANALYZE */
+ case 241: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==241);
+ case 389: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==389);
+{ yymsp[0].minor.yy131 = true; }
break;
- case 236: /* explain_options ::= */
-{ yymsp[1].minor.yy172 = createDefaultExplainOptions(pCxt); }
+ case 234: /* explain_options ::= */
+{ yymsp[1].minor.yy686 = createDefaultExplainOptions(pCxt); }
break;
- case 237: /* explain_options ::= explain_options VERBOSE NK_BOOL */
-{ yylhsminor.yy172 = setExplainVerbose(pCxt, yymsp[-2].minor.yy172, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 235: /* explain_options ::= explain_options VERBOSE NK_BOOL */
+{ yylhsminor.yy686 = setExplainVerbose(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 238: /* explain_options ::= explain_options RATIO NK_FLOAT */
-{ yylhsminor.yy172 = setExplainRatio(pCxt, yymsp[-2].minor.yy172, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 236: /* explain_options ::= explain_options RATIO NK_FLOAT */
+{ yylhsminor.yy686 = setExplainRatio(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 239: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */
-{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy60); }
+ case 237: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */
+{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy670); }
break;
- case 240: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
-{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy617, yymsp[-8].minor.yy617, &yymsp[-5].minor.yy105, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy248, yymsp[0].minor.yy140); }
+ case 238: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
+{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-8].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy490, yymsp[0].minor.yy550); }
break;
- case 241: /* cmd ::= DROP FUNCTION exists_opt function_name */
-{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); }
+ case 239: /* cmd ::= DROP FUNCTION exists_opt function_name */
+{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); }
break;
- case 244: /* bufsize_opt ::= */
-{ yymsp[1].minor.yy140 = 0; }
+ case 242: /* bufsize_opt ::= */
+{ yymsp[1].minor.yy550 = 0; }
break;
- case 245: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
-{ yymsp[-1].minor.yy140 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
+ case 243: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
+{ yymsp[-1].minor.yy550 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
break;
- case 246: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
-{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy617, &yymsp[-4].minor.yy105, yymsp[-2].minor.yy172, yymsp[-3].minor.yy172, yymsp[0].minor.yy172); }
+ case 244: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
+{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy131, &yymsp[-4].minor.yy113, yymsp[-2].minor.yy686, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); }
break;
- case 247: /* cmd ::= DROP STREAM exists_opt stream_name */
-{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); }
+ case 245: /* cmd ::= DROP STREAM exists_opt stream_name */
+{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); }
break;
- case 249: /* into_opt ::= INTO full_table_name */
- case 371: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==371);
- case 401: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==401);
- case 424: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==424);
-{ yymsp[-1].minor.yy172 = yymsp[0].minor.yy172; }
+ case 247: /* into_opt ::= INTO full_table_name */
+ case 370: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==370);
+ case 400: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==400);
+ case 423: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==423);
+{ yymsp[-1].minor.yy686 = yymsp[0].minor.yy686; }
break;
- case 250: /* stream_options ::= */
-{ yymsp[1].minor.yy172 = createStreamOptions(pCxt); }
+ case 248: /* stream_options ::= */
+{ yymsp[1].minor.yy686 = createStreamOptions(pCxt); }
break;
- case 251: /* stream_options ::= stream_options TRIGGER AT_ONCE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy172)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 249: /* stream_options ::= stream_options TRIGGER AT_ONCE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy686 = yymsp[-2].minor.yy686; }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 252: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy172)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 250: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy686 = yymsp[-2].minor.yy686; }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 253: /* stream_options ::= stream_options WATERMARK duration_literal */
-{ ((SStreamOptions*)yymsp[-2].minor.yy172)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy172); yylhsminor.yy172 = yymsp[-2].minor.yy172; }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 251: /* stream_options ::= stream_options WATERMARK duration_literal */
+{ ((SStreamOptions*)yymsp[-2].minor.yy686)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); yylhsminor.yy686 = yymsp[-2].minor.yy686; }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 254: /* cmd ::= KILL CONNECTION NK_INTEGER */
+ case 252: /* cmd ::= KILL CONNECTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 255: /* cmd ::= KILL QUERY NK_INTEGER */
+ case 253: /* cmd ::= KILL QUERY NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &yymsp[0].minor.yy0); }
break;
- case 256: /* cmd ::= KILL TRANSACTION NK_INTEGER */
+ case 254: /* cmd ::= KILL TRANSACTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 257: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ case 255: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 258: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
-{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy60); }
+ case 256: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy670); }
break;
- case 259: /* cmd ::= SPLIT VGROUP NK_INTEGER */
+ case 257: /* cmd ::= SPLIT VGROUP NK_INTEGER */
{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 260: /* dnode_list ::= DNODE NK_INTEGER */
-{ yymsp[-1].minor.yy60 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- break;
- case 262: /* cmd ::= SYNCDB db_name REPLICA */
-{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy105); }
- break;
- case 264: /* literal ::= NK_INTEGER */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 265: /* literal ::= NK_FLOAT */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 266: /* literal ::= NK_STRING */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 267: /* literal ::= NK_BOOL */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 268: /* literal ::= TIMESTAMP NK_STRING */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
- break;
- case 269: /* literal ::= duration_literal */
- case 279: /* signed_literal ::= signed */ yytestcase(yyruleno==279);
- case 298: /* expression ::= literal */ yytestcase(yyruleno==298);
- case 299: /* expression ::= pseudo_column */ yytestcase(yyruleno==299);
- case 300: /* expression ::= column_reference */ yytestcase(yyruleno==300);
- case 301: /* expression ::= function_expression */ yytestcase(yyruleno==301);
- case 302: /* expression ::= subquery */ yytestcase(yyruleno==302);
- case 327: /* function_expression ::= literal_func */ yytestcase(yyruleno==327);
- case 363: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==363);
- case 367: /* boolean_primary ::= predicate */ yytestcase(yyruleno==367);
- case 369: /* common_expression ::= expression */ yytestcase(yyruleno==369);
- case 370: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==370);
- case 372: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==372);
- case 374: /* table_reference ::= table_primary */ yytestcase(yyruleno==374);
- case 375: /* table_reference ::= joined_table */ yytestcase(yyruleno==375);
- case 379: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==379);
- case 426: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==426);
- case 429: /* query_primary ::= query_specification */ yytestcase(yyruleno==429);
-{ yylhsminor.yy172 = yymsp[0].minor.yy172; }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 270: /* literal ::= NULL */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 271: /* literal ::= NK_QUESTION */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 272: /* duration_literal ::= NK_VARIABLE */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 273: /* signed ::= NK_INTEGER */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 274: /* signed ::= NK_PLUS NK_INTEGER */
-{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); }
- break;
- case 275: /* signed ::= NK_MINUS NK_INTEGER */
+ case 258: /* dnode_list ::= DNODE NK_INTEGER */
+{ yymsp[-1].minor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ break;
+ case 260: /* cmd ::= SYNCDB db_name REPLICA */
+{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy113); }
+ break;
+ case 262: /* literal ::= NK_INTEGER */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 263: /* literal ::= NK_FLOAT */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 264: /* literal ::= NK_STRING */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 265: /* literal ::= NK_BOOL */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 266: /* literal ::= TIMESTAMP NK_STRING */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 267: /* literal ::= duration_literal */
+ case 277: /* signed_literal ::= signed */ yytestcase(yyruleno==277);
+ case 297: /* expression ::= literal */ yytestcase(yyruleno==297);
+ case 298: /* expression ::= pseudo_column */ yytestcase(yyruleno==298);
+ case 299: /* expression ::= column_reference */ yytestcase(yyruleno==299);
+ case 300: /* expression ::= function_expression */ yytestcase(yyruleno==300);
+ case 301: /* expression ::= subquery */ yytestcase(yyruleno==301);
+ case 326: /* function_expression ::= literal_func */ yytestcase(yyruleno==326);
+ case 362: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==362);
+ case 366: /* boolean_primary ::= predicate */ yytestcase(yyruleno==366);
+ case 368: /* common_expression ::= expression */ yytestcase(yyruleno==368);
+ case 369: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==369);
+ case 371: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==371);
+ case 373: /* table_reference ::= table_primary */ yytestcase(yyruleno==373);
+ case 374: /* table_reference ::= joined_table */ yytestcase(yyruleno==374);
+ case 378: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==378);
+ case 425: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==425);
+ case 428: /* query_primary ::= query_specification */ yytestcase(yyruleno==428);
+{ yylhsminor.yy686 = yymsp[0].minor.yy686; }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 268: /* literal ::= NULL */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 269: /* literal ::= NK_QUESTION */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 270: /* duration_literal ::= NK_VARIABLE */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 271: /* signed ::= NK_INTEGER */
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 272: /* signed ::= NK_PLUS NK_INTEGER */
+{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); }
+ break;
+ case 273: /* signed ::= NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
+ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
}
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 276: /* signed ::= NK_FLOAT */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 274: /* signed ::= NK_FLOAT */
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 277: /* signed ::= NK_PLUS NK_FLOAT */
-{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ case 275: /* signed ::= NK_PLUS NK_FLOAT */
+{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
break;
- case 278: /* signed ::= NK_MINUS NK_FLOAT */
+ case 276: /* signed ::= NK_MINUS NK_FLOAT */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
+ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
}
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 280: /* signed_literal ::= NK_STRING */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 278: /* signed_literal ::= NK_STRING */
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 281: /* signed_literal ::= NK_BOOL */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 279: /* signed_literal ::= NK_BOOL */
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 282: /* signed_literal ::= TIMESTAMP NK_STRING */
-{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
+ case 280: /* signed_literal ::= TIMESTAMP NK_STRING */
+{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
break;
- case 283: /* signed_literal ::= duration_literal */
- case 285: /* signed_literal ::= literal_func */ yytestcase(yyruleno==285);
- case 341: /* star_func_para ::= expression */ yytestcase(yyruleno==341);
- case 396: /* select_item ::= common_expression */ yytestcase(yyruleno==396);
- case 442: /* search_condition ::= common_expression */ yytestcase(yyruleno==442);
-{ yylhsminor.yy172 = releaseRawExprNode(pCxt, yymsp[0].minor.yy172); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 281: /* signed_literal ::= duration_literal */
+ case 283: /* signed_literal ::= literal_func */ yytestcase(yyruleno==283);
+ case 340: /* star_func_para ::= expression */ yytestcase(yyruleno==340);
+ case 395: /* select_item ::= common_expression */ yytestcase(yyruleno==395);
+ case 441: /* search_condition ::= common_expression */ yytestcase(yyruleno==441);
+{ yylhsminor.yy686 = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 284: /* signed_literal ::= NULL */
-{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 282: /* signed_literal ::= NULL */
+{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
break;
- case 303: /* expression ::= NK_LP expression NK_RP */
- case 368: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==368);
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 302: /* expression ::= NK_LP expression NK_RP */
+ case 367: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==367);
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 304: /* expression ::= NK_PLUS expression */
+ case 303: /* expression ::= NK_PLUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy172));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy686));
}
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 305: /* expression ::= NK_MINUS expression */
+ case 304: /* expression ::= NK_MINUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy172), NULL));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL));
}
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 306: /* expression ::= expression NK_PLUS expression */
+ case 305: /* expression ::= expression NK_PLUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 307: /* expression ::= expression NK_MINUS expression */
+ case 306: /* expression ::= expression NK_MINUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 308: /* expression ::= expression NK_STAR expression */
+ case 307: /* expression ::= expression NK_STAR expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 309: /* expression ::= expression NK_SLASH expression */
+ case 308: /* expression ::= expression NK_SLASH expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 310: /* expression ::= expression NK_REM expression */
+ case 309: /* expression ::= expression NK_REM expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 311: /* expression ::= column_reference NK_ARROW NK_STRING */
+ case 310: /* expression ::= column_reference NK_ARROW NK_STRING */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 312: /* expression_list ::= expression */
-{ yylhsminor.yy60 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
- break;
- case 313: /* expression_list ::= expression_list NK_COMMA expression */
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); }
- yymsp[-2].minor.yy60 = yylhsminor.yy60;
- break;
- case 314: /* column_reference ::= column_name */
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy105, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy105)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 315: /* column_reference ::= table_name NK_DOT column_name */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105, createColumnNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 316: /* pseudo_column ::= ROWTS */
- case 317: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==317);
- case 319: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==319);
- case 320: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==320);
- case 321: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==321);
- case 322: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==322);
- case 323: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==323);
- case 329: /* literal_func ::= NOW */ yytestcase(yyruleno==329);
-{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 318: /* pseudo_column ::= table_name NK_DOT TBNAME */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy105)))); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 324: /* function_expression ::= function_name NK_LP expression_list NK_RP */
- case 325: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==325);
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60)); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
- break;
- case 326: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), yymsp[-1].minor.yy248)); }
- yymsp[-5].minor.yy172 = yylhsminor.yy172;
- break;
- case 328: /* literal_func ::= noarg_func NK_LP NK_RP */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy105, NULL)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 337: /* star_func_para_list ::= NK_STAR */
-{ yylhsminor.yy60 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
- break;
- case 342: /* star_func_para ::= table_name NK_DOT NK_STAR */
- case 399: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==399);
-{ yylhsminor.yy172 = createColumnNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 343: /* predicate ::= expression compare_op expression */
- case 348: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==348);
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 311: /* expression_list ::= expression */
+{ yylhsminor.yy670 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
+ break;
+ case 312: /* expression_list ::= expression_list NK_COMMA expression */
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); }
+ yymsp[-2].minor.yy670 = yylhsminor.yy670;
+ break;
+ case 313: /* column_reference ::= column_name */
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy113, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 314: /* column_reference ::= table_name NK_DOT column_name */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113)); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 315: /* pseudo_column ::= ROWTS */
+ case 316: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==316);
+ case 318: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==318);
+ case 319: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==319);
+ case 320: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==320);
+ case 321: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==321);
+ case 322: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==322);
+ case 328: /* literal_func ::= NOW */ yytestcase(yyruleno==328);
+{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
+ yymsp[0].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 317: /* pseudo_column ::= table_name NK_DOT TBNAME */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy113)))); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 323: /* function_expression ::= function_name NK_LP expression_list NK_RP */
+ case 324: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==324);
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670)); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 325: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy490)); }
+ yymsp[-5].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 327: /* literal_func ::= noarg_func NK_LP NK_RP */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy113, NULL)); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 336: /* star_func_para_list ::= NK_STAR */
+{ yylhsminor.yy670 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
+ break;
+ case 341: /* star_func_para ::= table_name NK_DOT NK_STAR */
+ case 398: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==398);
+{ yylhsminor.yy686 = createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
+ break;
+ case 342: /* predicate ::= expression compare_op expression */
+ case 347: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==347);
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy572, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy632, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 344: /* predicate ::= expression BETWEEN expression AND expression */
+ case 343: /* predicate ::= expression BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-4].minor.yy172 = yylhsminor.yy172;
+ yymsp[-4].minor.yy686 = yylhsminor.yy686;
break;
- case 345: /* predicate ::= expression NOT BETWEEN expression AND expression */
+ case 344: /* predicate ::= expression NOT BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-5].minor.yy172 = yylhsminor.yy172;
+ yymsp[-5].minor.yy686 = yylhsminor.yy686;
break;
- case 346: /* predicate ::= expression IS NULL */
+ case 345: /* predicate ::= expression IS NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 347: /* predicate ::= expression IS NOT NULL */
+ case 346: /* predicate ::= expression IS NOT NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL));
}
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 349: /* compare_op ::= NK_LT */
-{ yymsp[0].minor.yy572 = OP_TYPE_LOWER_THAN; }
+ case 348: /* compare_op ::= NK_LT */
+{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_THAN; }
break;
- case 350: /* compare_op ::= NK_GT */
-{ yymsp[0].minor.yy572 = OP_TYPE_GREATER_THAN; }
+ case 349: /* compare_op ::= NK_GT */
+{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_THAN; }
break;
- case 351: /* compare_op ::= NK_LE */
-{ yymsp[0].minor.yy572 = OP_TYPE_LOWER_EQUAL; }
+ case 350: /* compare_op ::= NK_LE */
+{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_EQUAL; }
break;
- case 352: /* compare_op ::= NK_GE */
-{ yymsp[0].minor.yy572 = OP_TYPE_GREATER_EQUAL; }
+ case 351: /* compare_op ::= NK_GE */
+{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_EQUAL; }
break;
- case 353: /* compare_op ::= NK_NE */
-{ yymsp[0].minor.yy572 = OP_TYPE_NOT_EQUAL; }
+ case 352: /* compare_op ::= NK_NE */
+{ yymsp[0].minor.yy632 = OP_TYPE_NOT_EQUAL; }
break;
- case 354: /* compare_op ::= NK_EQ */
-{ yymsp[0].minor.yy572 = OP_TYPE_EQUAL; }
+ case 353: /* compare_op ::= NK_EQ */
+{ yymsp[0].minor.yy632 = OP_TYPE_EQUAL; }
break;
- case 355: /* compare_op ::= LIKE */
-{ yymsp[0].minor.yy572 = OP_TYPE_LIKE; }
+ case 354: /* compare_op ::= LIKE */
+{ yymsp[0].minor.yy632 = OP_TYPE_LIKE; }
break;
- case 356: /* compare_op ::= NOT LIKE */
-{ yymsp[-1].minor.yy572 = OP_TYPE_NOT_LIKE; }
+ case 355: /* compare_op ::= NOT LIKE */
+{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_LIKE; }
break;
- case 357: /* compare_op ::= MATCH */
-{ yymsp[0].minor.yy572 = OP_TYPE_MATCH; }
+ case 356: /* compare_op ::= MATCH */
+{ yymsp[0].minor.yy632 = OP_TYPE_MATCH; }
break;
- case 358: /* compare_op ::= NMATCH */
-{ yymsp[0].minor.yy572 = OP_TYPE_NMATCH; }
+ case 357: /* compare_op ::= NMATCH */
+{ yymsp[0].minor.yy632 = OP_TYPE_NMATCH; }
break;
- case 359: /* compare_op ::= CONTAINS */
-{ yymsp[0].minor.yy572 = OP_TYPE_JSON_CONTAINS; }
+ case 358: /* compare_op ::= CONTAINS */
+{ yymsp[0].minor.yy632 = OP_TYPE_JSON_CONTAINS; }
break;
- case 360: /* in_op ::= IN */
-{ yymsp[0].minor.yy572 = OP_TYPE_IN; }
+ case 359: /* in_op ::= IN */
+{ yymsp[0].minor.yy632 = OP_TYPE_IN; }
break;
- case 361: /* in_op ::= NOT IN */
-{ yymsp[-1].minor.yy572 = OP_TYPE_NOT_IN; }
+ case 360: /* in_op ::= NOT IN */
+{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_IN; }
break;
- case 362: /* in_predicate_value ::= NK_LP expression_list NK_RP */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy60)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 361: /* in_predicate_value ::= NK_LP expression_list NK_RP */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 364: /* boolean_value_expression ::= NOT boolean_primary */
+ case 363: /* boolean_value_expression ::= NOT boolean_primary */
{
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy172), NULL));
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL));
}
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 365: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ case 364: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 366: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ case 365: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172);
- yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686);
+ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686)));
}
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 373: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
-{ yylhsminor.yy172 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy172, yymsp[0].minor.yy172, NULL); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 372: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
+{ yylhsminor.yy686 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy686, yymsp[0].minor.yy686, NULL); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 376: /* table_primary ::= table_name alias_opt */
-{ yylhsminor.yy172 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 375: /* table_primary ::= table_name alias_opt */
+{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 377: /* table_primary ::= db_name NK_DOT table_name alias_opt */
-{ yylhsminor.yy172 = createRealTableNode(pCxt, &yymsp[-3].minor.yy105, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ case 376: /* table_primary ::= db_name NK_DOT table_name alias_opt */
+{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 378: /* table_primary ::= subquery alias_opt */
-{ yylhsminor.yy172 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172), &yymsp[0].minor.yy105); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 377: /* table_primary ::= subquery alias_opt */
+{ yylhsminor.yy686 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 380: /* alias_opt ::= */
-{ yymsp[1].minor.yy105 = nil_token; }
+ case 379: /* alias_opt ::= */
+{ yymsp[1].minor.yy113 = nil_token; }
break;
- case 381: /* alias_opt ::= table_alias */
-{ yylhsminor.yy105 = yymsp[0].minor.yy105; }
- yymsp[0].minor.yy105 = yylhsminor.yy105;
+ case 380: /* alias_opt ::= table_alias */
+{ yylhsminor.yy113 = yymsp[0].minor.yy113; }
+ yymsp[0].minor.yy113 = yylhsminor.yy113;
break;
- case 382: /* alias_opt ::= AS table_alias */
-{ yymsp[-1].minor.yy105 = yymsp[0].minor.yy105; }
+ case 381: /* alias_opt ::= AS table_alias */
+{ yymsp[-1].minor.yy113 = yymsp[0].minor.yy113; }
break;
- case 383: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- case 384: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==384);
-{ yymsp[-2].minor.yy172 = yymsp[-1].minor.yy172; }
+ case 382: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ case 383: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==383);
+{ yymsp[-2].minor.yy686 = yymsp[-1].minor.yy686; }
break;
- case 385: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
-{ yylhsminor.yy172 = createJoinTableNode(pCxt, yymsp[-4].minor.yy636, yymsp[-5].minor.yy172, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); }
- yymsp[-5].minor.yy172 = yylhsminor.yy172;
+ case 384: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+{ yylhsminor.yy686 = createJoinTableNode(pCxt, yymsp[-4].minor.yy120, yymsp[-5].minor.yy686, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); }
+ yymsp[-5].minor.yy686 = yylhsminor.yy686;
break;
- case 386: /* join_type ::= */
-{ yymsp[1].minor.yy636 = JOIN_TYPE_INNER; }
+ case 385: /* join_type ::= */
+{ yymsp[1].minor.yy120 = JOIN_TYPE_INNER; }
break;
- case 387: /* join_type ::= INNER */
-{ yymsp[0].minor.yy636 = JOIN_TYPE_INNER; }
+ case 386: /* join_type ::= INNER */
+{ yymsp[0].minor.yy120 = JOIN_TYPE_INNER; }
break;
- case 388: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ case 387: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
{
- yymsp[-8].minor.yy172 = createSelectStmt(pCxt, yymsp[-7].minor.yy617, yymsp[-6].minor.yy60, yymsp[-5].minor.yy172);
- yymsp[-8].minor.yy172 = addWhereClause(pCxt, yymsp[-8].minor.yy172, yymsp[-4].minor.yy172);
- yymsp[-8].minor.yy172 = addPartitionByClause(pCxt, yymsp[-8].minor.yy172, yymsp[-3].minor.yy60);
- yymsp[-8].minor.yy172 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy172, yymsp[-2].minor.yy172);
- yymsp[-8].minor.yy172 = addGroupByClause(pCxt, yymsp[-8].minor.yy172, yymsp[-1].minor.yy60);
- yymsp[-8].minor.yy172 = addHavingClause(pCxt, yymsp[-8].minor.yy172, yymsp[0].minor.yy172);
+ yymsp[-8].minor.yy686 = createSelectStmt(pCxt, yymsp[-7].minor.yy131, yymsp[-6].minor.yy670, yymsp[-5].minor.yy686);
+ yymsp[-8].minor.yy686 = addWhereClause(pCxt, yymsp[-8].minor.yy686, yymsp[-4].minor.yy686);
+ yymsp[-8].minor.yy686 = addPartitionByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-3].minor.yy670);
+ yymsp[-8].minor.yy686 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy686, yymsp[-2].minor.yy686);
+ yymsp[-8].minor.yy686 = addGroupByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-1].minor.yy670);
+ yymsp[-8].minor.yy686 = addHavingClause(pCxt, yymsp[-8].minor.yy686, yymsp[0].minor.yy686);
}
break;
- case 391: /* set_quantifier_opt ::= ALL */
-{ yymsp[0].minor.yy617 = false; }
+ case 390: /* set_quantifier_opt ::= ALL */
+{ yymsp[0].minor.yy131 = false; }
break;
- case 392: /* select_list ::= NK_STAR */
-{ yymsp[0].minor.yy60 = NULL; }
+ case 391: /* select_list ::= NK_STAR */
+{ yymsp[0].minor.yy670 = NULL; }
break;
- case 397: /* select_item ::= common_expression column_alias */
-{ yylhsminor.yy172 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172), &yymsp[0].minor.yy105); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 396: /* select_item ::= common_expression column_alias */
+{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); }
+ yymsp[-1].minor.yy686 = yylhsminor.yy686;
break;
- case 398: /* select_item ::= common_expression AS column_alias */
-{ yylhsminor.yy172 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), &yymsp[0].minor.yy105); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 397: /* select_item ::= common_expression AS column_alias */
+{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), &yymsp[0].minor.yy113); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 403: /* partition_by_clause_opt ::= PARTITION BY expression_list */
- case 420: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==420);
- case 432: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==432);
-{ yymsp[-2].minor.yy60 = yymsp[0].minor.yy60; }
+ case 402: /* partition_by_clause_opt ::= PARTITION BY expression_list */
+ case 419: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==419);
+ case 431: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==431);
+{ yymsp[-2].minor.yy670 = yymsp[0].minor.yy670; }
break;
- case 405: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
-{ yymsp[-5].minor.yy172 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); }
+ case 404: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+{ yymsp[-5].minor.yy686 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); }
break;
- case 406: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
-{ yymsp[-3].minor.yy172 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); }
+ case 405: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
+{ yymsp[-3].minor.yy686 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); }
break;
- case 407: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-5].minor.yy172 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), NULL, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); }
+ case 406: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-5].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); }
break;
- case 408: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-7].minor.yy172 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy172), releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), yymsp[-1].minor.yy172, yymsp[0].minor.yy172); }
+ case 407: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-7].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy686, yymsp[0].minor.yy686); }
break;
- case 410: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
-{ yymsp[-3].minor.yy172 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy172); }
+ case 409: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+{ yymsp[-3].minor.yy686 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy686); }
break;
- case 412: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
-{ yymsp[-3].minor.yy172 = createFillNode(pCxt, yymsp[-1].minor.yy202, NULL); }
+ case 411: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
+{ yymsp[-3].minor.yy686 = createFillNode(pCxt, yymsp[-1].minor.yy522, NULL); }
break;
- case 413: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
-{ yymsp[-5].minor.yy172 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy60)); }
+ case 412: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+{ yymsp[-5].minor.yy686 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); }
break;
- case 414: /* fill_mode ::= NONE */
-{ yymsp[0].minor.yy202 = FILL_MODE_NONE; }
+ case 413: /* fill_mode ::= NONE */
+{ yymsp[0].minor.yy522 = FILL_MODE_NONE; }
break;
- case 415: /* fill_mode ::= PREV */
-{ yymsp[0].minor.yy202 = FILL_MODE_PREV; }
+ case 414: /* fill_mode ::= PREV */
+{ yymsp[0].minor.yy522 = FILL_MODE_PREV; }
break;
- case 416: /* fill_mode ::= NULL */
-{ yymsp[0].minor.yy202 = FILL_MODE_NULL; }
+ case 415: /* fill_mode ::= NULL */
+{ yymsp[0].minor.yy522 = FILL_MODE_NULL; }
break;
- case 417: /* fill_mode ::= LINEAR */
-{ yymsp[0].minor.yy202 = FILL_MODE_LINEAR; }
+ case 416: /* fill_mode ::= LINEAR */
+{ yymsp[0].minor.yy522 = FILL_MODE_LINEAR; }
break;
- case 418: /* fill_mode ::= NEXT */
-{ yymsp[0].minor.yy202 = FILL_MODE_NEXT; }
+ case 417: /* fill_mode ::= NEXT */
+{ yymsp[0].minor.yy522 = FILL_MODE_NEXT; }
break;
- case 421: /* group_by_list ::= expression */
-{ yylhsminor.yy60 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); }
- yymsp[0].minor.yy60 = yylhsminor.yy60;
+ case 420: /* group_by_list ::= expression */
+{ yylhsminor.yy670 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); }
+ yymsp[0].minor.yy670 = yylhsminor.yy670;
break;
- case 422: /* group_by_list ::= group_by_list NK_COMMA expression */
-{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); }
- yymsp[-2].minor.yy60 = yylhsminor.yy60;
+ case 421: /* group_by_list ::= group_by_list NK_COMMA expression */
+{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); }
+ yymsp[-2].minor.yy670 = yylhsminor.yy670;
break;
- case 425: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ case 424: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
{
- yylhsminor.yy172 = addOrderByClause(pCxt, yymsp[-3].minor.yy172, yymsp[-2].minor.yy60);
- yylhsminor.yy172 = addSlimitClause(pCxt, yylhsminor.yy172, yymsp[-1].minor.yy172);
- yylhsminor.yy172 = addLimitClause(pCxt, yylhsminor.yy172, yymsp[0].minor.yy172);
+ yylhsminor.yy686 = addOrderByClause(pCxt, yymsp[-3].minor.yy686, yymsp[-2].minor.yy670);
+ yylhsminor.yy686 = addSlimitClause(pCxt, yylhsminor.yy686, yymsp[-1].minor.yy686);
+ yylhsminor.yy686 = addLimitClause(pCxt, yylhsminor.yy686, yymsp[0].minor.yy686);
}
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 427: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
-{ yylhsminor.yy172 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy172, yymsp[0].minor.yy172); }
- yymsp[-3].minor.yy172 = yylhsminor.yy172;
+ case 426: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
+{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); }
+ yymsp[-3].minor.yy686 = yylhsminor.yy686;
break;
- case 428: /* query_expression_body ::= query_expression_body UNION query_expression_body */
-{ yylhsminor.yy172 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 427: /* query_expression_body ::= query_expression_body UNION query_expression_body */
+{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 430: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
-{ yymsp[-5].minor.yy172 = yymsp[-4].minor.yy172; }
- yy_destructor(yypParser,350,&yymsp[-3].minor);
- yy_destructor(yypParser,351,&yymsp[-2].minor);
- yy_destructor(yypParser,352,&yymsp[-1].minor);
+ case 429: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
+{ yymsp[-5].minor.yy686 = yymsp[-4].minor.yy686; }
+ yy_destructor(yypParser,349,&yymsp[-3].minor);
+ yy_destructor(yypParser,350,&yymsp[-2].minor);
+ yy_destructor(yypParser,351,&yymsp[-1].minor);
break;
- case 434: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
- case 438: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==438);
-{ yymsp[-1].minor.yy172 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ case 433: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ case 437: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==437);
+{ yymsp[-1].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
break;
- case 435: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- case 439: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==439);
-{ yymsp[-3].minor.yy172 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
+ case 434: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ case 438: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==438);
+{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 436: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- case 440: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==440);
-{ yymsp[-3].minor.yy172 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
+ case 435: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ case 439: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==439);
+{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
break;
- case 441: /* subquery ::= NK_LP query_expression NK_RP */
-{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy172); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 440: /* subquery ::= NK_LP query_expression NK_RP */
+{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy686); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 445: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
-{ yylhsminor.yy172 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), yymsp[-1].minor.yy14, yymsp[0].minor.yy17); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 444: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
+{ yylhsminor.yy686 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[-1].minor.yy428, yymsp[0].minor.yy109); }
+ yymsp[-2].minor.yy686 = yylhsminor.yy686;
break;
- case 446: /* ordering_specification_opt ::= */
-{ yymsp[1].minor.yy14 = ORDER_ASC; }
+ case 445: /* ordering_specification_opt ::= */
+{ yymsp[1].minor.yy428 = ORDER_ASC; }
break;
- case 447: /* ordering_specification_opt ::= ASC */
-{ yymsp[0].minor.yy14 = ORDER_ASC; }
+ case 446: /* ordering_specification_opt ::= ASC */
+{ yymsp[0].minor.yy428 = ORDER_ASC; }
break;
- case 448: /* ordering_specification_opt ::= DESC */
-{ yymsp[0].minor.yy14 = ORDER_DESC; }
+ case 447: /* ordering_specification_opt ::= DESC */
+{ yymsp[0].minor.yy428 = ORDER_DESC; }
break;
- case 449: /* null_ordering_opt ::= */
-{ yymsp[1].minor.yy17 = NULL_ORDER_DEFAULT; }
+ case 448: /* null_ordering_opt ::= */
+{ yymsp[1].minor.yy109 = NULL_ORDER_DEFAULT; }
break;
- case 450: /* null_ordering_opt ::= NULLS FIRST */
-{ yymsp[-1].minor.yy17 = NULL_ORDER_FIRST; }
+ case 449: /* null_ordering_opt ::= NULLS FIRST */
+{ yymsp[-1].minor.yy109 = NULL_ORDER_FIRST; }
break;
- case 451: /* null_ordering_opt ::= NULLS LAST */
-{ yymsp[-1].minor.yy17 = NULL_ORDER_LAST; }
+ case 450: /* null_ordering_opt ::= NULLS LAST */
+{ yymsp[-1].minor.yy109 = NULL_ORDER_LAST; }
break;
default:
break;
/********** End reduce actions ************************************************/
};
- assert( yyrulenocreateTableBuilder("information_schema", "user_streams", TSDB_SYSTEM_TABLE, 1)
- .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_tables", TSDB_SYSTEM_TABLE, 2)
.addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
@@ -106,6 +101,11 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
.addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
+ {
+ ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1)
+ .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
+ builder.done();
+ }
}
/*
@@ -154,6 +154,13 @@ void generateTestST1(MockCatalogService* mcs) {
builder.done();
mcs->createSubTable("test", "st1", "st1s1", 1);
mcs->createSubTable("test", "st1", "st1s2", 2);
+ mcs->createSubTable("test", "st1", "st1s3", 1);
+}
+
+void generateFunctions(MockCatalogService* mcs) {
+ mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0);
+ mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes,
+ 8);
}
} // namespace
@@ -162,17 +169,17 @@ int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandl
int32_t __catalogGetTableMeta(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName,
STableMeta** pTableMeta) {
- return mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta);
+ return g_mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta);
}
int32_t __catalogGetTableHashVgroup(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps,
const SName* pTableName, SVgroupInfo* vgInfo) {
- return mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo);
+ return g_mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo);
}
int32_t __catalogGetTableDistVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName,
SArray** pVgList) {
- return mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList);
+ return g_mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList);
}
int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId,
@@ -181,8 +188,8 @@ int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* ve
}
int32_t __catalogGetDBVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const char* dbFName,
- SArray** vgroupList) {
- return 0;
+ SArray** pVgList) {
+ return g_mockCatalogService->catalogGetDBVgInfo(dbFName, pVgList);
}
int32_t __catalogGetDBCfg(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg) {
@@ -195,8 +202,13 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con
return 0;
}
+int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName,
+ SFuncInfo* pInfo) {
+ return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo);
+}
+
void initMetaDataEnv() {
- mockCatalogService.reset(new MockCatalogService());
+ g_mockCatalogService.reset(new MockCatalogService());
static Stub stub;
stub.set(catalogGetHandle, __catalogGetHandle);
@@ -208,6 +220,7 @@ void initMetaDataEnv() {
stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo);
stub.set(catalogGetDBCfg, __catalogGetDBCfg);
stub.set(catalogChkAuth, __catalogChkAuth);
+ stub.set(catalogGetUdfInfo, __catalogGetUdfInfo);
// {
// AddrAny any("libcatalog.so");
// std::map result;
@@ -251,11 +264,12 @@ void initMetaDataEnv() {
}
void generateMetaData() {
- generateInformationSchema(mockCatalogService.get());
- generatePerformanceSchema(mockCatalogService.get());
- generateTestT1(mockCatalogService.get());
- generateTestST1(mockCatalogService.get());
- mockCatalogService->showTables();
+ generateInformationSchema(g_mockCatalogService.get());
+ generatePerformanceSchema(g_mockCatalogService.get());
+ generateTestT1(g_mockCatalogService.get());
+ generateTestST1(g_mockCatalogService.get());
+ generateFunctions(g_mockCatalogService.get());
+ g_mockCatalogService->showTables();
}
-void destroyMetaDataEnv() { mockCatalogService.reset(); }
+void destroyMetaDataEnv() { g_mockCatalogService.reset(); }
diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp
index f86cecb9e3399bf6b5b55c59adcc6b99e1950468..4834d2d37711d537d09d0e1b12e2bd8dc9697827 100644
--- a/source/libs/parser/test/mockCatalogService.cpp
+++ b/source/libs/parser/test/mockCatalogService.cpp
@@ -18,12 +18,13 @@
#include
#include
#include
+#include
#include "tdatablock.h"
#include "tname.h"
#include "ttypes.h"
-std::unique_ptr mockCatalogService;
+std::unique_ptr g_mockCatalogService;
class TableBuilder : public ITableBuilder {
public:
@@ -120,6 +121,57 @@ class MockCatalogServiceImpl {
return copyTableVgroup(db, tNameGetTableName(pTableName), vgList);
}
+ int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const {
+ std::string dbFName(pDbFName);
+ DbMetaCache::const_iterator it = meta_.find(dbFName.substr(std::string(pDbFName).find_last_of('.') + 1));
+ if (meta_.end() == it) {
+ return TSDB_CODE_FAILED;
+ }
+ std::set vgSet;
+ *pVgList = taosArrayInit(it->second.size(), sizeof(SVgroupInfo));
+ for (const auto& vgs : it->second) {
+ for (const auto& vg : vgs.second->vgs) {
+ if (0 == vgSet.count(vg.vgId)) {
+ taosArrayPush(*pVgList, &vg);
+ vgSet.insert(vg.vgId);
+ }
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
+ auto it = udf_.find(funcName);
+ if (udf_.end() == it) {
+ return TSDB_CODE_FAILED;
+ }
+ memcpy(pInfo, it->second.get(), sizeof(SFuncInfo));
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
+ int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList);
+ }
+ return code;
+ }
+
TableBuilder& createTableBuilder(const std::string& db, const std::string& tbname, int8_t tableType,
int32_t numOfColumns, int32_t numOfTags) {
builder_ = TableBuilder::createTableBuilder(tableType, numOfColumns, numOfTags);
@@ -155,9 +207,9 @@ class MockCatalogServiceImpl {
// number of backward fills
#define NOB(n) ((n) % 2 ? (n) / 2 + 1 : (n) / 2)
// center aligned
-#define CA(n, s) \
- std::setw(NOF((n) - (s).length())) << "" << (s) << std::setw(NOB((n) - (s).length())) << "" \
- << "|"
+#define CA(n, s) \
+ std::setw(NOF((n) - int((s).length()))) << "" << (s) << std::setw(NOB((n) - int((s).length()))) << "" \
+ << "|"
// string field length
#define SFL 20
// string field header
@@ -203,21 +255,23 @@ class MockCatalogServiceImpl {
}
}
- std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const {
- DbMetaCache::const_iterator it = meta_.find(db);
- if (meta_.end() == it) {
- return std::shared_ptr();
- }
- TableMetaCache::const_iterator tit = it->second.find(tbname);
- if (it->second.end() == tit) {
- return std::shared_ptr();
- }
- return tit->second;
+ void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) {
+ std::shared_ptr info(new SFuncInfo);
+ strcpy(info->name, func.c_str());
+ info->funcType = funcType;
+ info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
+ info->outputType = outputType;
+ info->outputLen = outputLen;
+ info->bufSize = bufSize;
+ info->pCode = nullptr;
+ info->pComment = nullptr;
+ udf_.insert(std::make_pair(func, info));
}
private:
typedef std::map> TableMetaCache;
typedef std::map DbMetaCache;
+ typedef std::map> UdfMetaCache;
std::string toDbname(const std::string& dbFullName) const {
std::string::size_type n = dbFullName.find(".");
@@ -300,9 +354,128 @@ class MockCatalogServiceImpl {
return TSDB_CODE_SUCCESS;
}
+ std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const {
+ DbMetaCache::const_iterator it = meta_.find(db);
+ if (meta_.end() == it) {
+ return std::shared_ptr();
+ }
+ TableMetaCache::const_iterator tit = it->second.find(tbname);
+ if (it->second.end() == tit) {
+ return std::shared_ptr();
+ }
+ return tit->second;
+ }
+
+ int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pTableMetaReq) {
+ int32_t ntables = taosArrayGetSize(pTableMetaReq);
+ *pTableMetaData = taosArrayInit(ntables, POINTER_BYTES);
+ for (int32_t i = 0; i < ntables; ++i) {
+ STableMeta* pMeta = NULL;
+ code = catalogGetTableMeta((const SName*)taosArrayGet(pTableMetaReq, i), &pMeta);
+ if (TSDB_CODE_SUCCESS == code) {
+ taosArrayPush(*pTableMetaData, &pMeta);
+ } else {
+ break;
+ }
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllTableVgroup(SArray* pTableVgroupReq, SArray** pTableVgroupData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pTableVgroupReq) {
+ int32_t ntables = taosArrayGetSize(pTableVgroupReq);
+ *pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo));
+ for (int32_t i = 0; i < ntables; ++i) {
+ SVgroupInfo vgInfo = {0};
+ code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo);
+ if (TSDB_CODE_SUCCESS == code) {
+ taosArrayPush(*pTableVgroupData, &vgInfo);
+ } else {
+ break;
+ }
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbVgroupReq) {
+ int32_t ndbs = taosArrayGetSize(pDbVgroupReq);
+ *pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES);
+ for (int32_t i = 0; i < ndbs; ++i) {
+ int64_t zeroVg = 0;
+ taosArrayPush(*pDbVgroupData, &zeroVg);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbCfgReq) {
+ int32_t ndbs = taosArrayGetSize(pDbCfgReq);
+ *pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SDbCfgInfo dbCfg = {0};
+ taosArrayPush(*pDbCfgData, &dbCfg);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbInfoReq) {
+ int32_t ndbs = taosArrayGetSize(pDbInfoReq);
+ *pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SDbInfo dbInfo = {0};
+ taosArrayPush(*pDbInfoData, &dbInfo);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pUserAuthReq) {
+ int32_t num = taosArrayGetSize(pUserAuthReq);
+ *pUserAuthData = taosArrayInit(num, sizeof(bool));
+ for (int32_t i = 0; i < num; ++i) {
+ bool pass = true;
+ taosArrayPush(*pUserAuthData, &pass);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pUdfReq) {
+ int32_t num = taosArrayGetSize(pUdfReq);
+ *pUdfData = taosArrayInit(num, sizeof(SFuncInfo));
+ for (int32_t i = 0; i < num; ++i) {
+ SFuncInfo info = {0};
+ code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info);
+ if (TSDB_CODE_SUCCESS == code) {
+ taosArrayPush(*pUdfData, &info);
+ } else {
+ break;
+ }
+ }
+ }
+ return code;
+ }
+
uint64_t id_;
std::unique_ptr builder_;
DbMetaCache meta_;
+ UdfMetaCache udf_;
};
MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {}
@@ -321,9 +494,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string
void MockCatalogService::showTables() const { impl_->showTables(); }
-std::shared_ptr MockCatalogService::getTableMeta(const std::string& db,
- const std::string& tbname) const {
- return impl_->getTableMeta(db, tbname);
+void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen,
+ int32_t bufSize) {
+ impl_->createFunction(func, funcType, outputType, outputLen, bufSize);
}
int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
@@ -337,3 +510,15 @@ int32_t MockCatalogService::catalogGetTableHashVgroup(const SName* pTableName, S
int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const {
return impl_->catalogGetTableDistVgInfo(pTableName, pVgList);
}
+
+int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const {
+ return impl_->catalogGetDBVgInfo(pDbFName, pVgList);
+}
+
+int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
+ return impl_->catalogGetUdfInfo(funcName, pInfo);
+}
+
+int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
+ return impl_->catalogGetAllMeta(pCatalogReq, pMetaData);
+}
diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h
index edfc40dbc2114611707276d34bbc491714152b26..133a355c591f80f130fd8fe47d444780b88cc660 100644
--- a/source/libs/parser/test/mockCatalogService.h
+++ b/source/libs/parser/test/mockCatalogService.h
@@ -56,16 +56,19 @@ class MockCatalogService {
int32_t numOfColumns, int32_t numOfTags = 0);
void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid);
void showTables() const;
- std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const;
+ void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize);
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const;
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const;
int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const;
+ int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const;
+ int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const;
+ int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const;
private:
std::unique_ptr impl_;
};
-extern std::unique_ptr mockCatalogService;
+extern std::unique_ptr g_mockCatalogService;
#endif // MOCK_CATALOG_SERVICE_H
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index abcb6bca8bc96f99b2fec79d2813e01524edbf6a..65d5194936811a856ef7e36de2f249e0e8bda63b 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -90,6 +90,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
expect.walLevel = TSDB_DEFAULT_WAL_LEVEL;
expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE;
+ expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
};
auto setDbBufferFunc = [&](int32_t buffer) { expect.buffer = buffer; };
@@ -124,6 +125,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
taosArrayPush(expect.pRetensions, &retention);
++expect.numOfRetensions;
};
+ auto setDbSchemalessFunc = [&](int8_t schemaless) { expect.schemaless = schemaless; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_DATABASE_STMT);
@@ -149,6 +151,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
ASSERT_EQ(req.replications, expect.replications);
ASSERT_EQ(req.strict, expect.strict);
ASSERT_EQ(req.cacheLastRow, expect.cacheLastRow);
+ ASSERT_EQ(req.schemaless, expect.schemaless);
ASSERT_EQ(req.ignoreExist, expect.ignoreExist);
ASSERT_EQ(req.numOfRetensions, expect.numOfRetensions);
if (expect.numOfRetensions > 0) {
@@ -188,6 +191,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
setDbWalLevelFunc(2);
setDbVgroupsFunc(100);
setDbSingleStableFunc(1);
+ setDbSchemalessFunc(1);
run("CREATE DATABASE IF NOT EXISTS wxy_db "
"BUFFER 64 "
"CACHELAST 2 "
@@ -205,7 +209,8 @@ TEST_F(ParserInitialCTest, createDatabase) {
"STRICT 1 "
"WAL 2 "
"VGROUPS 100 "
- "SINGLE_STABLE 1 ");
+ "SINGLE_STABLE 1 "
+ "SCHEMALESS 1");
setCreateDbReqFunc("wxy_db", 1);
setDbDaysFunc(100);
@@ -223,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) {
run("CREATE DNODE 1.1.1.1 PORT 9000");
}
-// todo CREATE FUNCTION
+// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value]
+TEST_F(ParserInitialCTest, createFunction) {
+ useDb("root", "test");
+
+ SCreateFuncReq expect = {0};
+
+ auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0,
+ int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) {
+ memset(&expect, 0, sizeof(SCreateFuncReq));
+ strcpy(expect.name, pUdfName);
+ expect.igExists = igExists;
+ expect.funcType = funcType;
+ expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
+ expect.outputType = outputType;
+ expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes;
+ expect.bufSize = bufSize;
+ };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT);
+ SCreateFuncReq req = {0};
+ ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+
+ ASSERT_EQ(std::string(req.name), std::string(expect.name));
+ ASSERT_EQ(req.igExists, expect.igExists);
+ ASSERT_EQ(req.funcType, expect.funcType);
+ ASSERT_EQ(req.scriptType, expect.scriptType);
+ ASSERT_EQ(req.outputType, expect.outputType);
+ ASSERT_EQ(req.outputLen, expect.outputLen);
+ ASSERT_EQ(req.bufSize, expect.bufSize);
+ });
+
+ setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT);
+ // run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT");
+
+ setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8);
+ // run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8");
+}
TEST_F(ParserInitialCTest, createIndexSma) {
useDb("root", "test");
@@ -256,14 +298,12 @@ TEST_F(ParserInitialCTest, createStable) {
auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0,
float xFilesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR,
- int32_t delay = TSDB_DEFAULT_ROLLUP_DELAY, int32_t ttl = TSDB_DEFAULT_TABLE_TTL,
- const char* pComment = nullptr) {
+ int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) {
memset(&expect, 0, sizeof(SMCreateStbReq));
int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname);
expect.name[len] = '\0';
expect.igExists = igExists;
expect.xFilesFactor = xFilesFactor;
- expect.delay = delay;
expect.ttl = ttl;
if (nullptr != pComment) {
expect.comment = strdup(pComment);
@@ -351,7 +391,7 @@ TEST_F(ParserInitialCTest, createStable) {
addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT);
run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)");
- setCreateStbReqFunc("t1", 1, 0.1, 2, 100, "test create table");
+ setCreateStbReqFunc("t1", 1, 0.1, 100, "test create table");
addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0);
addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT);
addFieldToCreateStbReqFunc(true, "c2", TSDB_DATA_TYPE_UINT);
@@ -389,7 +429,7 @@ TEST_F(ParserInitialCTest, createStable) {
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, "
"a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, "
"a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) "
- "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2");
+ "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1");
}
TEST_F(ParserInitialCTest, createStream) {
@@ -422,7 +462,7 @@ TEST_F(ParserInitialCTest, createTable) {
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), "
"a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, "
"a14 NCHAR(30), a15 VARCHAR(50)) "
- "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2");
+ "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1");
run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy')");
@@ -435,13 +475,62 @@ TEST_F(ParserInitialCTest, createTable) {
TEST_F(ParserInitialCTest, createTopic) {
useDb("root", "test");
+ SCMCreateTopicReq expect = {0};
+
+ auto setCreateTopicReqFunc = [&](const char* pTopicName, int8_t igExists, const char* pSql, const char* pAst,
+ const char* pDbName = nullptr, const char* pTbname = nullptr) {
+ memset(&expect, 0, sizeof(SMCreateStbReq));
+ snprintf(expect.name, sizeof(expect.name), "0.%s", pTopicName);
+ expect.igExists = igExists;
+ expect.sql = (char*)pSql;
+ if (nullptr != pTbname) {
+ expect.subType = TOPIC_SUB_TYPE__TABLE;
+ snprintf(expect.subStbName, sizeof(expect.subStbName), "0.%s.%s", pDbName, pTbname);
+ } else if (nullptr != pAst) {
+ expect.subType = TOPIC_SUB_TYPE__COLUMN;
+ expect.ast = (char*)pAst;
+ } else {
+ expect.subType = TOPIC_SUB_TYPE__DB;
+ snprintf(expect.subDbName, sizeof(expect.subDbName), "0.%s", pDbName);
+ }
+ };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_TOPIC_STMT);
+ SCMCreateTopicReq req = {0};
+ ASSERT_TRUE(TSDB_CODE_SUCCESS ==
+ tDeserializeSCMCreateTopicReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+
+ ASSERT_EQ(std::string(req.name), std::string(expect.name));
+ ASSERT_EQ(req.igExists, expect.igExists);
+ ASSERT_EQ(req.subType, expect.subType);
+ ASSERT_EQ(std::string(req.sql), std::string(expect.sql));
+ switch (expect.subType) {
+ case TOPIC_SUB_TYPE__DB:
+ ASSERT_EQ(std::string(req.subDbName), std::string(expect.subDbName));
+ break;
+ case TOPIC_SUB_TYPE__TABLE:
+ ASSERT_EQ(std::string(req.subStbName), std::string(expect.subStbName));
+ break;
+ case TOPIC_SUB_TYPE__COLUMN:
+ ASSERT_NE(req.ast, nullptr);
+ break;
+ default:
+ ASSERT_TRUE(false);
+ }
+ });
+
+ setCreateTopicReqFunc("tp1", 0, "create topic tp1 as select * from t1", "ast");
run("CREATE TOPIC tp1 AS SELECT * FROM t1");
- run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT * FROM t1");
+ setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as select ts, ceil(c1) from t1", "ast");
+ run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT ts, CEIL(c1) FROM t1");
- run("CREATE TOPIC tp1 AS test");
+ setCreateTopicReqFunc("tp1", 0, "create topic tp1 as database test", nullptr, "test");
+ run("CREATE TOPIC tp1 AS DATABASE test");
- run("CREATE TOPIC IF NOT EXISTS tp1 AS test");
+ setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as stable st1", nullptr, "test", "st1");
+ run("CREATE TOPIC IF NOT EXISTS tp1 AS STABLE st1");
}
TEST_F(ParserInitialCTest, createUser) {
diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp
index 1153b238b1feb1be8167c91acb0bf7f7267a391f..5ad427d964ad1dc47a4fed64b51f89257ae53da6 100644
--- a/source/libs/parser/test/parInitialDTest.cpp
+++ b/source/libs/parser/test/parInitialDTest.cpp
@@ -19,7 +19,7 @@ using namespace std;
namespace ParserTest {
-class ParserInitialDTest : public ParserTestBase {};
+class ParserInitialDTest : public ParserDdlTest {};
// todo delete
// todo desc
@@ -29,7 +29,37 @@ class ParserInitialDTest : public ParserTestBase {};
TEST_F(ParserInitialDTest, dropBnode) {
useDb("root", "test");
- run("drop bnode on dnode 1");
+ run("DROP BNODE ON DNODE 1");
+}
+
+// DROP CONSUMER GROUP [ IF EXISTS ] cgroup_name ON topic_name
+TEST_F(ParserInitialDTest, dropCGroup) {
+ useDb("root", "test");
+
+ SMDropCgroupReq expect = {0};
+
+ auto setDropCgroupReqFunc = [&](const char* pTopicName, const char* pCGroupName, int8_t igNotExists = 0) {
+ memset(&expect, 0, sizeof(SMDropCgroupReq));
+ snprintf(expect.topic, sizeof(expect.topic), "0.%s", pTopicName);
+ strcpy(expect.cgroup, pCGroupName);
+ expect.igNotExists = igNotExists;
+ };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_DROP_CGROUP_STMT);
+ SMDropCgroupReq req = {0};
+ ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMDropCgroupReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+
+ ASSERT_EQ(std::string(req.topic), std::string(expect.topic));
+ ASSERT_EQ(std::string(req.cgroup), std::string(expect.cgroup));
+ ASSERT_EQ(req.igNotExists, expect.igNotExists);
+ });
+
+ setDropCgroupReqFunc("tp1", "cg1");
+ run("DROP CONSUMER GROUP cg1 ON tp1");
+
+ setDropCgroupReqFunc("tp1", "cg1", 1);
+ run("DROP CONSUMER GROUP IF EXISTS cg1 ON tp1");
}
// todo drop database
@@ -73,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) {
}
TEST_F(ParserInitialDTest, dropUser) {
+ login("root");
useDb("root", "test");
run("drop user wxy");
diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp
index 7fafec88824111ef8b170ba25f3b092fd7ba1f1a..4d313fca766e8ab8f8d6ba404f7faf2fe833e9e6 100644
--- a/source/libs/parser/test/parInsertTest.cpp
+++ b/source/libs/parser/test/parInsertTest.cpp
@@ -15,6 +15,7 @@
#include
+#include "mockCatalogService.h"
#include "os.h"
#include "parInt.h"
@@ -57,6 +58,38 @@ class InsertTest : public Test {
return code_;
}
+ int32_t runAsync() {
+ code_ = parseInsertSyntax(&cxt_, &res_);
+ if (code_ != TSDB_CODE_SUCCESS) {
+ cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
+ return code_;
+ }
+
+ SCatalogReq catalogReq = {0};
+ code_ = buildCatalogReq(res_->pMetaCache, &catalogReq);
+ if (code_ != TSDB_CODE_SUCCESS) {
+ cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
+ return code_;
+ }
+
+ SMetaData metaData = {0};
+ g_mockCatalogService->catalogGetAllMeta(&catalogReq, &metaData);
+
+ code_ = putMetaDataToCache(&catalogReq, &metaData, res_->pMetaCache);
+ if (code_ != TSDB_CODE_SUCCESS) {
+ cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
+ return code_;
+ }
+
+ code_ = parseInsertSql(&cxt_, &res_);
+ if (code_ != TSDB_CODE_SUCCESS) {
+ cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
+ return code_;
+ }
+
+ return code_;
+ }
+
void dumpReslut() {
SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
size_t num = taosArrayGetSize(pStmt->pDataBlocks);
@@ -125,7 +158,7 @@ class InsertTest : public Test {
SQuery* res_;
};
-// INSERT INTO tb_name VALUES (field1_value, ...)
+// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...)
TEST_F(InsertTest, singleTableSingleRowTest) {
setDatabase("root", "test");
@@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 1);
+
+ bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
+ ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+
+ bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ dumpReslut();
+ checkReslut(1, 1);
+
+ bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...)
@@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) {
setDatabase("root", "test");
bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, "
- "10, 11)");
+ "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
+ "(now+2s, 3, 'guangzhou', 9, 10, 11)");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 3);
+
+ bind(
+ "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
+ "(now+2s, 3, 'guangzhou', 9, 10, 11)");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
@@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(2, 1);
+
+ bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
@@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(2, 3, 2);
+
+ bind(
+ "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
+ " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO
@@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 3);
+
+ bind(
+ "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
+ "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
+ ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+
+ bind(
+ "insert into st1s1 using st1 tags(1, 'wxy') values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, "
+ "\"guangzhou\")");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+
+ bind(
+ "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
+ "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
+ ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
TEST_F(InsertTest, toleranceTest) {
@@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) {
ASSERT_NE(run(), TSDB_CODE_SUCCESS);
bind("insert into t");
ASSERT_NE(run(), TSDB_CODE_SUCCESS);
+
+ bind("insert into");
+ ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
+ bind("insert into t");
+ ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
}
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index b68ef2c591e0497c6f32a9ce69c9e1f229b5f92f..a5192595f0be83afa459429748dab3d8e9b65c4e 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) {
"timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1");
run("SELECT 123 + 45 FROM t1 WHERE 2 - 1");
+
+ run("SELECT * FROM t1 WHERE -2");
}
TEST_F(ParserSelectTest, expression) {
@@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) {
run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE);
}
+TEST_F(ParserSelectTest, aggFunc) {
+ useDb("root", "test");
+
+ run("SELECT LEASTSQUARES(c1, -1, 1) FROM t1");
+}
+
TEST_F(ParserSelectTest, multiResFunc) {
useDb("root", "test");
@@ -121,13 +129,13 @@ TEST_F(ParserSelectTest, selectFunc) {
run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)");
}
-TEST_F(ParserSelectTest, nonstdFunc) {
+TEST_F(ParserSelectTest, IndefiniteRowsFunc) {
useDb("root", "test");
run("SELECT DIFF(c1) FROM t1");
}
-TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) {
+TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) {
useDb("root", "test");
run("SELECT DIFF(c1), c2 FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
@@ -141,6 +149,14 @@ TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) {
// run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)");
}
+TEST_F(ParserSelectTest, useDefinedFunc) {
+ useDb("root", "test");
+
+ run("SELECT udf1(c1) FROM t1");
+
+ run("SELECT udf2(c1) FROM t1 GROUP BY c2");
+}
+
TEST_F(ParserSelectTest, groupBy) {
useDb("root", "test");
@@ -236,6 +252,8 @@ TEST_F(ParserSelectTest, semanticError) {
// TSDB_CODE_PAR_AMBIGUOUS_COLUMN
run("SELECT c2 FROM t1 tt1, t1 tt2 WHERE tt1.c1 = tt2.c1", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE);
+ run("SELECT c2 FROM (SELECT c1 c2, c2 FROM t1)", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE);
+
// TSDB_CODE_PAR_WRONG_VALUE_TYPE
run("SELECT timestamp '2010a' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE);
diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp
index ebc83fb21981e56666b82ec6a5a08a63cd7f0c87..820b8cca3cdc02633982a3ea797aa605db1e3fd3 100644
--- a/source/libs/parser/test/parTestMain.cpp
+++ b/source/libs/parser/test/parTestMain.cpp
@@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment {
virtual void SetUp() {
initMetaDataEnv();
generateMetaData();
+ initLog(TD_TMP_DIR_PATH "td");
}
virtual void TearDown() {
@@ -47,16 +48,55 @@ class ParserEnv : public testing::Environment {
ParserEnv() {}
virtual ~ParserEnv() {}
+
+ private:
+ void initLog(const char* path) {
+ int32_t logLevel = getLogLevel();
+ dDebugFlag = logLevel;
+ vDebugFlag = logLevel;
+ mDebugFlag = logLevel;
+ cDebugFlag = logLevel;
+ jniDebugFlag = logLevel;
+ tmrDebugFlag = logLevel;
+ uDebugFlag = logLevel;
+ rpcDebugFlag = logLevel;
+ qDebugFlag = logLevel;
+ wDebugFlag = logLevel;
+ sDebugFlag = logLevel;
+ tsdbDebugFlag = logLevel;
+ tsLogEmbedded = 1;
+ tsAsyncLog = 0;
+
+ taosRemoveDir(path);
+ taosMkDir(path);
+ tstrncpy(tsLogDir, path, PATH_MAX);
+ if (taosInitLog("taoslog", 1) != 0) {
+ std::cout << "failed to init log file" << std::endl;
+ }
+ }
};
static void parseArg(int argc, char* argv[]) {
- int opt = 0;
- const char* optstring = "";
- static struct option long_options[] = {{"dump", no_argument, NULL, 'd'}, {0, 0, 0, 0}};
+ int opt = 0;
+ const char* optstring = "";
+ // clang-format off
+ static struct option long_options[] = {
+ {"dump", no_argument, NULL, 'd'},
+ {"async", required_argument, NULL, 'a'},
+ {"skipSql", required_argument, NULL, 's'},
+ {0, 0, 0, 0}
+ };
+ // clang-format on
while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) {
switch (opt) {
case 'd':
- g_isDump = true;
+ g_dump = true;
+ break;
+ case 'a':
+ setAsyncFlag(optarg);
+ break;
+ case 's':
+ setSkipSqlNum(optarg);
break;
default:
break;
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 250ac1c52885f10d45a4ef96321d410f115b9255..fab7ed35b1cb408a5cdd6f455994da07a26596fd 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -17,7 +17,10 @@
#include
#include
+#include
+#include "catalog.h"
+#include "mockCatalogService.h"
#include "parInt.h"
using namespace std;
@@ -41,22 +44,40 @@ namespace ParserTest {
} \
} while (0);
-bool g_isDump = false;
+bool g_dump = false;
+bool g_testAsyncApis = true;
+int32_t g_logLevel = 131;
+int32_t g_skipSql = 0;
+
+void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; }
+void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
struct TerminateFlag : public exception {
const char* what() const throw() { return "success and terminate"; }
};
+void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
+
+int32_t getLogLevel() { return g_logLevel; }
+
class ParserTestBaseImpl {
public:
ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {}
+ void login(const std::string& user) { caseEnv_.user_ = user; }
+
void useDb(const string& acctId, const string& db) {
caseEnv_.acctId_ = acctId;
caseEnv_.db_ = db;
+ caseEnv_.nsql_ = g_skipSql;
}
void run(const string& sql, int32_t expect, ParserStage checkStage) {
+ if (caseEnv_.nsql_ > 0) {
+ --(caseEnv_.nsql_);
+ return;
+ }
+
reset(expect, checkStage);
try {
SParseContext cxt = {0};
@@ -65,11 +86,13 @@ class ParserTestBaseImpl {
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
+ doAuthenticate(&cxt, pQuery);
+
doTranslate(&cxt, pQuery);
doCalculateConstant(&cxt, pQuery);
- if (g_isDump) {
+ if (g_dump) {
dump();
}
} catch (const TerminateFlag& e) {
@@ -79,12 +102,20 @@ class ParserTestBaseImpl {
dump();
throw;
}
+
+ if (g_testAsyncApis) {
+ runAsync(sql, expect, checkStage);
+ }
}
private:
struct caseEnv {
- string acctId_;
- string db_;
+ string acctId_;
+ string user_;
+ string db_;
+ int32_t nsql_;
+
+ caseEnv() : user_("wangxiaoyu"), nsql_(0) {}
};
struct stmtEnv {
@@ -144,16 +175,19 @@ class ParserTestBaseImpl {
cout << res_.calcConstAst_ << endl;
}
- void setParseContext(const string& sql, SParseContext* pCxt) {
+ void setParseContext(const string& sql, SParseContext* pCxt, bool async = false) {
stmtEnv_.sql_ = sql;
transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower);
pCxt->acctId = atoi(caseEnv_.acctId_.c_str());
pCxt->db = caseEnv_.db_.c_str();
+ pCxt->pUser = caseEnv_.user_.c_str();
+ pCxt->isSuperUser = caseEnv_.user_ == "root";
pCxt->pSql = stmtEnv_.sql_.c_str();
pCxt->sqlLen = stmtEnv_.sql_.length();
pCxt->pMsg = stmtEnv_.msgBuf_.data();
pCxt->msgLen = stmtEnv_.msgBuf_.max_size();
+ pCxt->async = async;
}
void doParse(SParseContext* pCxt, SQuery** pQuery) {
@@ -162,6 +196,25 @@ class ParserTestBaseImpl {
res_.parsedAst_ = toString((*pQuery)->pRoot);
}
+ void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) {
+ DO_WITH_THROW(collectMetaKey, pCxt, pQuery);
+ ASSERT_NE(pQuery->pMetaCache, nullptr);
+ }
+
+ void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
+ }
+
+ void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) {
+ DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData);
+ }
+
+ void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
+ }
+
+ void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); }
+
void doTranslate(SParseContext* pCxt, SQuery* pQuery) {
DO_WITH_THROW(translate, pCxt, pQuery);
checkQuery(pQuery, PARSER_STAGE_TRANSLATE);
@@ -184,6 +237,59 @@ class ParserTestBaseImpl {
void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); }
+ void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
+ reset(expect, checkStage);
+ try {
+ SParseContext cxt = {0};
+ setParseContext(sql, &cxt, true);
+
+ SQuery* pQuery = nullptr;
+ doParse(&cxt, &pQuery);
+
+ doCollectMetaKey(&cxt, pQuery);
+
+ SCatalogReq catalogReq = {0};
+ doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
+
+ string err;
+ thread t1([&]() {
+ try {
+ SMetaData metaData = {0};
+ doGetAllMeta(&catalogReq, &metaData);
+
+ doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
+
+ doAuthenticate(&cxt, pQuery);
+
+ doTranslate(&cxt, pQuery);
+
+ doCalculateConstant(&cxt, pQuery);
+ } catch (const TerminateFlag& e) {
+ // success and terminate
+ } catch (const runtime_error& e) {
+ err = e.what();
+ } catch (...) {
+ err = "unknown error";
+ }
+ });
+
+ t1.join();
+ if (!err.empty()) {
+ throw runtime_error(err);
+ }
+
+ if (g_dump) {
+ dump();
+ }
+ } catch (const TerminateFlag& e) {
+ // success and terminate
+ return;
+ } catch (...) {
+ dump();
+ throw;
+ }
+ }
+
caseEnv caseEnv_;
stmtEnv stmtEnv_;
stmtRes res_;
@@ -194,6 +300,8 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {}
ParserTestBase::~ParserTestBase() {}
+void ParserTestBase::login(const std::string& user) { return impl_->login(user); }
+
void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); }
void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) {
diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h
index c7d7ead8dbc8a5d6b7a45cde0552e9e979ea07ec..44be7a24746ecde078f69555c88e4d85344b8313 100644
--- a/source/libs/parser/test/parTestUtil.h
+++ b/source/libs/parser/test/parTestUtil.h
@@ -34,6 +34,7 @@ class ParserTestBase : public testing::Test {
ParserTestBase();
virtual ~ParserTestBase();
+ void login(const std::string& user);
void useDb(const std::string& acctId, const std::string& db);
void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
@@ -63,7 +64,12 @@ class ParserDdlTest : public ParserTestBase {
std::function checkDdl_;
};
-extern bool g_isDump;
+extern bool g_dump;
+
+extern void setAsyncFlag(const char* pFlag);
+extern void setLogLevel(const char* pLogLevel);
+extern int32_t getLogLevel();
+extern void setSkipSqlNum(const char* pNum);
} // namespace ParserTest
diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h
index 6a18a267e2e3909fa57afc3af99105c0663b5caa..1a8c7657df4abc1661e42ea6275281981ee79086 100644
--- a/source/libs/planner/inc/planInt.h
+++ b/source/libs/planner/inc/planInt.h
@@ -36,6 +36,7 @@ extern "C" {
#define planTrace(param, ...) qTrace("PLAN: " param, __VA_ARGS__)
int32_t generateUsageErrMsg(char* pBuf, int32_t len, int32_t errCode, ...);
+int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList);
int32_t createLogicPlan(SPlanContext* pCxt, SLogicNode** pLogicNode);
int32_t optimizeLogicPlan(SPlanContext* pCxt, SLogicNode* pLogicNode);
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 4e77ae5fba32314fafed0de7538056794616c7b1..1cf7ae22f9eb5e64220ae443d5353df062a148a4 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -124,6 +124,7 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec
SLogicNode* pNode = NULL;
int32_t code = func(pCxt, pSelect, &pNode);
if (TSDB_CODE_SUCCESS == code && NULL != pNode) {
+ pNode->precision = pSelect->precision;
code = pushLogicNode(pCxt, pRoot, pNode);
}
if (TSDB_CODE_SUCCESS != code) {
@@ -132,56 +133,56 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec
return code;
}
-typedef struct SCreateColumnCxt {
- int32_t errCode;
- SNodeList* pList;
-} SCreateColumnCxt;
-
-static EDealRes doCreateColumn(SNode* pNode, void* pContext) {
- SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext;
- switch (nodeType(pNode)) {
- case QUERY_NODE_COLUMN: {
- SNode* pCol = nodesCloneNode(pNode);
- if (NULL == pCol) {
- return DEAL_RES_ERROR;
- }
- return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
- }
- case QUERY_NODE_OPERATOR:
- case QUERY_NODE_LOGIC_CONDITION:
- case QUERY_NODE_FUNCTION: {
- SExprNode* pExpr = (SExprNode*)pNode;
- SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
- if (NULL == pCol) {
- return DEAL_RES_ERROR;
- }
- pCol->node.resType = pExpr->resType;
- strcpy(pCol->colName, pExpr->aliasName);
- return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
- }
- default:
- break;
- }
-
- return DEAL_RES_CONTINUE;
-}
-
-static int32_t createColumnByRewriteExps(SLogicPlanContext* pCxt, SNodeList* pExprs, SNodeList** pList) {
- SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)};
- if (NULL == cxt.pList) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- nodesWalkExprs(pExprs, doCreateColumn, &cxt);
- if (TSDB_CODE_SUCCESS != cxt.errCode) {
- nodesDestroyList(cxt.pList);
- return cxt.errCode;
- }
- if (NULL == *pList) {
- *pList = cxt.pList;
- }
- return cxt.errCode;
-}
+// typedef struct SCreateColumnCxt {
+// int32_t errCode;
+// SNodeList* pList;
+// } SCreateColumnCxt;
+
+// static EDealRes doCreateColumn(SNode* pNode, void* pContext) {
+// SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext;
+// switch (nodeType(pNode)) {
+// case QUERY_NODE_COLUMN: {
+// SNode* pCol = nodesCloneNode(pNode);
+// if (NULL == pCol) {
+// return DEAL_RES_ERROR;
+// }
+// return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
+// }
+// case QUERY_NODE_OPERATOR:
+// case QUERY_NODE_LOGIC_CONDITION:
+// case QUERY_NODE_FUNCTION: {
+// SExprNode* pExpr = (SExprNode*)pNode;
+// SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+// if (NULL == pCol) {
+// return DEAL_RES_ERROR;
+// }
+// pCol->node.resType = pExpr->resType;
+// strcpy(pCol->colName, pExpr->aliasName);
+// return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
+// }
+// default:
+// break;
+// }
+
+// return DEAL_RES_CONTINUE;
+// }
+
+// static int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList) {
+// SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)};
+// if (NULL == cxt.pList) {
+// return TSDB_CODE_OUT_OF_MEMORY;
+// }
+
+// nodesWalkExprs(pExprs, doCreateColumn, &cxt);
+// if (TSDB_CODE_SUCCESS != cxt.errCode) {
+// nodesDestroyList(cxt.pList);
+// return cxt.errCode;
+// }
+// if (NULL == *pList) {
+// *pList = cxt.pList;
+// }
+// return cxt.errCode;
+// }
static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols, SNodeList* pScanCols,
STableMeta* pMeta) {
@@ -293,10 +294,10 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
// set output
if (TSDB_CODE_SUCCESS == code) {
- code = createColumnByRewriteExps(pCxt, pScan->pScanCols, &pScan->node.pTargets);
+ code = createColumnByRewriteExps(pScan->pScanCols, &pScan->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
- code = createColumnByRewriteExps(pCxt, pScan->pScanPseudoCols, &pScan->node.pTargets);
+ code = createColumnByRewriteExps(pScan->pScanPseudoCols, &pScan->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -321,6 +322,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
}
pJoin->joinType = pJoinTable->joinType;
+ pJoin->isSingleTableJoin = pJoinTable->table.singleTable;
int32_t code = TSDB_CODE_SUCCESS;
@@ -399,6 +401,7 @@ static int32_t createLogicNodeByTable(SLogicPlanContext* pCxt, SSelectStmt* pSel
nodesDestroyNode(pNode);
return TSDB_CODE_OUT_OF_MEMORY;
}
+ pNode->precision = pSelect->precision;
*pLogicNode = pNode;
}
return code;
@@ -418,7 +421,7 @@ static SColumnNode* createColumnByExpr(const char* pStmtName, SExprNode* pExpr)
}
static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
- if (!pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && NULL == pSelect->pGroupByList) {
+ if (!pSelect->hasAggFuncs && NULL == pSelect->pGroupByList) {
return TSDB_CODE_SUCCESS;
}
@@ -442,8 +445,8 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
code = rewriteExprForSelect(pAgg->pGroupKeys, pSelect, SQL_CLAUSE_GROUP_BY);
}
- if (TSDB_CODE_SUCCESS == code && (pSelect->hasAggFuncs || pSelect->hasIndefiniteRowsFunc)) {
- code = nodesCollectFuncs(pSelect, SQL_CLAUSE_GROUP_BY, fmIsVectorFunc, &pAgg->pAggFuncs);
+ if (TSDB_CODE_SUCCESS == code && pSelect->hasAggFuncs) {
+ code = nodesCollectFuncs(pSelect, SQL_CLAUSE_GROUP_BY, fmIsAggFunc, &pAgg->pAggFuncs);
}
// rewrite the expression in subsequent clauses
@@ -460,10 +463,10 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
// set the output
if (TSDB_CODE_SUCCESS == code && NULL != pAgg->pGroupKeys) {
- code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets);
+ code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code && NULL != pAgg->pAggFuncs) {
- code = createColumnByRewriteExps(pCxt, pAgg->pAggFuncs, &pAgg->node.pTargets);
+ code = createColumnByRewriteExps(pAgg->pAggFuncs, &pAgg->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -484,12 +487,16 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
pWindow->watermark = pCxt->pPlanCxt->watermark;
}
+ if (pCxt->pPlanCxt->rSmaQuery) {
+ pWindow->filesFactor = pCxt->pPlanCxt->filesFactor;
+ }
+
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprForSelect(pWindow->pFuncs, pSelect, SQL_CLAUSE_WINDOW);
}
if (TSDB_CODE_SUCCESS == code) {
- code = createColumnByRewriteExps(pCxt, pWindow->pFuncs, &pWindow->node.pTargets);
+ code = createColumnByRewriteExps(pWindow->pFuncs, &pWindow->node.pTargets);
}
pSelect->hasAggFuncs = false;
@@ -555,6 +562,7 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva
pWindow->sliding = (NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->datum.i : pWindow->interval);
pWindow->slidingUnit =
(NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->unit : pWindow->intervalUnit);
+ pWindow->stmInterAlgo = STREAM_INTERVAL_ALGO_SINGLE;
pWindow->pTspk = nodesCloneNode(pInterval->pCol);
if (NULL == pWindow->pTspk) {
@@ -759,7 +767,7 @@ static int32_t createDistinctLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSe
// set the output
if (TSDB_CODE_SUCCESS == code) {
- code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets);
+ code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -906,7 +914,7 @@ static int32_t createSetOpAggLogicNode(SLogicPlanContext* pCxt, SSetOperator* pS
// set the output
if (TSDB_CODE_SUCCESS == code) {
- code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets);
+ code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets);
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 4d489f68e7c4ff042e6f0d0c82bbd98a6dbbfb2b..5f88fc40e54c5e000a6e4506b30a2063acfbc8f1 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -99,7 +99,8 @@ static bool osdMayBeOptimized(SLogicNode* pNode) {
return false;
}
// todo: release after function splitting
- if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType) {
+ if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType &&
+ SCAN_TYPE_STREAM != ((SScanLogicNode*)pNode)->scanType) {
return false;
}
if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) &&
@@ -223,6 +224,10 @@ static void setScanWindowInfo(SScanLogicNode* pScan) {
pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding;
pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit;
pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit;
+ pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType;
+ pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark;
+ pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId;
+ pScan->filesFactor = ((SWindowLogicNode*)pScan->node.pParent)->filesFactor;
}
}
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index fcba2aa2d33a926b1608c03d22489bd86fdded8a..737c0fc1d557b939162e63ec3c5d4e07ea0ebb57 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -468,6 +468,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla
return TSDB_CODE_OUT_OF_MEMORY;
}
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
+ SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode);
}
@@ -489,7 +490,8 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
pSubplan->execNodeStat.tableNum = pScanLogicNode->pVgroupList->vgroups[0].numOfTable;
}
if (pCxt->pExecNodeList) {
- taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
+ SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
+ taosArrayPush(pCxt->pExecNodeList, &node);
}
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
pTableScan->dataRequired = pScanLogicNode->dataRequired;
@@ -503,6 +505,10 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
pTableScan->sliding = pScanLogicNode->sliding;
pTableScan->intervalUnit = pScanLogicNode->intervalUnit;
pTableScan->slidingUnit = pScanLogicNode->slidingUnit;
+ pTableScan->triggerType = pScanLogicNode->triggerType;
+ pTableScan->watermark = pScanLogicNode->watermark;
+ pTableScan->tsColId = pScanLogicNode->tsColId;
+ pTableScan->filesFactor = pScanLogicNode->filesFactor;
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
}
@@ -520,10 +526,11 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
pScan->accountId = pCxt->pPlanCxt->acctId;
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES)) {
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
+ SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
} else {
- SQueryNodeAddr addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet};
- taosArrayPush(pCxt->pExecNodeList, &addr);
+ SQueryNodeLoad node = {.addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}, .load = 0};
+ taosArrayPush(pCxt->pExecNodeList, &node);
}
pScan->mgmtEpSet = pCxt->pPlanCxt->mgmtEpSet;
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
@@ -832,7 +839,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild
static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNode* pExchangeLogicNode,
SPhysiNode** pPhyNode) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)makePhysiNode(
- pCxt, pExchangeLogicNode->precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE);
+ pCxt, pExchangeLogicNode->node.precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -842,10 +849,11 @@ static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogic
return TSDB_CODE_SUCCESS;
}
+
static int32_t createStreamScanPhysiNodeByExchange(SPhysiPlanContext* pCxt, SExchangeLogicNode* pExchangeLogicNode,
SPhysiNode** pPhyNode) {
SScanPhysiNode* pScan = (SScanPhysiNode*)makePhysiNode(
- pCxt, pExchangeLogicNode->precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
+ pCxt, pExchangeLogicNode->node.precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
if (NULL == pScan) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -914,6 +922,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
+ pWindow->filesFactor = pWindowLogicNode->filesFactor;
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pWindow;
@@ -924,11 +933,22 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
return code;
}
+static ENodeType getIntervalOperatorType(bool streamQuery, EStreamIntervalAlgorithm stmAlgo) {
+ if (streamQuery) {
+ return STREAM_INTERVAL_ALGO_FINAL == stmAlgo
+ ? QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL
+ : (STREAM_INTERVAL_ALGO_SEMI == stmAlgo ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL
+ : QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL);
+ } else {
+ return QUERY_NODE_PHYSICAL_PLAN_INTERVAL;
+ }
+}
+
static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
SIntervalPhysiNode* pInterval = (SIntervalPhysiNode*)makePhysiNode(
pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode,
- (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL : QUERY_NODE_PHYSICAL_PLAN_INTERVAL));
+ getIntervalOperatorType(pCxt->pPlanCxt->streamQuery, pWindowLogicNode->stmInterAlgo));
if (NULL == pInterval) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -945,7 +965,9 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode(
- pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode, QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW);
+ pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode,
+ (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW
+ : QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW));
if (NULL == pSession) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -1128,6 +1150,54 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
return code;
}
+static int32_t createExchangePhysiNodeByMerge(SMergePhysiNode* pMerge) {
+ SExchangePhysiNode* pExchange = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_EXCHANGE);
+ if (NULL == pExchange) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pExchange->srcGroupId = pMerge->srcGroupId;
+ pExchange->node.pParent = (SPhysiNode*)pMerge;
+ pExchange->node.pOutputDataBlockDesc = nodesCloneNode(pMerge->node.pOutputDataBlockDesc);
+ if (NULL == pExchange->node.pOutputDataBlockDesc) {
+ nodesDestroyNode(pExchange);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return nodesListMakeStrictAppend(&pMerge->node.pChildren, pExchange);
+}
+
+static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pMergeLogicNode, SPhysiNode** pPhyNode) {
+ SMergePhysiNode* pMerge = (SMergePhysiNode*)makePhysiNode(
+ pCxt, pMergeLogicNode->node.precision, (SLogicNode*)pMergeLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE);
+ if (NULL == pMerge) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pMerge->numOfChannels = pMergeLogicNode->numOfChannels;
+ pMerge->srcGroupId = pMergeLogicNode->srcGroupId;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ for (int32_t i = 0; i < pMerge->numOfChannels; ++i) {
+ code = createExchangePhysiNodeByMerge(pMerge);
+ if (TSDB_CODE_SUCCESS != code) {
+ break;
+ }
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setListSlotId(pCxt, pMerge->node.pOutputDataBlockDesc->dataBlockId, -1, pMergeLogicNode->pMergeKeys,
+ &pMerge->pMergeKeys);
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPhyNode = (SPhysiNode*)pMerge;
+ } else {
+ nodesDestroyNode(pMerge);
+ }
+
+ return code;
+}
+
static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode, SSubplan* pSubplan,
SNodeList* pChildren, SPhysiNode** pPhyNode) {
switch (nodeType(pLogicNode)) {
@@ -1149,6 +1219,8 @@ static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode
return createPartitionPhysiNode(pCxt, pChildren, (SPartitionLogicNode*)pLogicNode, pPhyNode);
case QUERY_NODE_LOGIC_PLAN_FILL:
return createFillPhysiNode(pCxt, pChildren, (SFillLogicNode*)pLogicNode, pPhyNode);
+ case QUERY_NODE_LOGIC_PLAN_MERGE:
+ return createMergePhysiNode(pCxt, (SMergeLogicNode*)pLogicNode, pPhyNode);
default:
break;
}
@@ -1179,9 +1251,13 @@ static int32_t createPhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode,
}
if (TSDB_CODE_SUCCESS == code) {
- (*pPhyNode)->pChildren = pChildren;
- SNode* pChild;
- FOREACH(pChild, (*pPhyNode)->pChildren) { ((SPhysiNode*)pChild)->pParent = (*pPhyNode); }
+ if (LIST_LENGTH(pChildren) > 0) {
+ (*pPhyNode)->pChildren = pChildren;
+ SNode* pChild;
+ FOREACH(pChild, (*pPhyNode)->pChildren) { ((SPhysiNode*)pChild)->pParent = (*pPhyNode); }
+ } else {
+ nodesDestroyList(pChildren);
+ }
} else {
nodesDestroyList(pChildren);
}
@@ -1242,7 +1318,8 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic
SVnodeModifLogicNode* pModif = (SVnodeModifLogicNode*)pLogicSubplan->pNode;
pSubplan->msgType = pModif->msgType;
pSubplan->execNode.epSet = pModif->pVgDataBlocks->vg.epSet;
- taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
+ SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
+ taosArrayPush(pCxt->pExecNodeList, &node);
code = createDataInserter(pCxt, pModif->pVgDataBlocks, &pSubplan->pDataSink);
} else {
pSubplan->msgType = TDMT_VND_QUERY;
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index 1a97d9ab1b898b9b4a5dae1d4bade0e9b6d87bb8..e3c8b82e3988bd7943815a645332920f9d31d08b 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -13,20 +13,21 @@
* along with this program. If not, see .
*/
+#include "functionMgt.h"
#include "planInt.h"
#define SPLIT_FLAG_MASK(n) (1 << n)
-#define SPLIT_FLAG_STS SPLIT_FLAG_MASK(0)
-#define SPLIT_FLAG_CTJ SPLIT_FLAG_MASK(1)
+#define SPLIT_FLAG_STABLE_SPLIT SPLIT_FLAG_MASK(0)
#define SPLIT_FLAG_SET_MASK(val, mask) (val) |= (mask)
#define SPLIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0)
typedef struct SSplitContext {
- uint64_t queryId;
- int32_t groupId;
- bool split;
+ SPlanContext* pPlanCxt;
+ uint64_t queryId;
+ int32_t groupId;
+ bool split;
} SSplitContext;
typedef int32_t (*FSplit)(SSplitContext* pCxt, SLogicSubplan* pSubplan);
@@ -36,29 +37,19 @@ typedef struct SSplitRule {
FSplit splitFunc;
} SSplitRule;
-typedef struct SStsInfo {
- SScanLogicNode* pScan;
- SLogicSubplan* pSubplan;
-} SStsInfo;
-
-typedef struct SCtjInfo {
- SScanLogicNode* pScan;
- SLogicSubplan* pSubplan;
-} SCtjInfo;
-
-typedef struct SUaInfo {
- SProjectLogicNode* pProject;
- SLogicSubplan* pSubplan;
-} SUaInfo;
+typedef bool (*FSplFindSplitNode)(SSplitContext* pCxt, SLogicSubplan* pSubplan, void* pInfo);
-typedef struct SUnInfo {
- SAggLogicNode* pAgg;
- SLogicSubplan* pSubplan;
-} SUnInfo;
-
-typedef bool (*FSplFindSplitNode)(SLogicSubplan* pSubplan, void* pInfo);
+static void splSetSubplanVgroups(SLogicSubplan* pSubplan, SLogicNode* pNode) {
+ if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
+ TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pNode)->pVgroupList);
+ } else {
+ if (1 == LIST_LENGTH(pNode->pChildren)) {
+ splSetSubplanVgroups(pSubplan, (SLogicNode*)nodesListGetNode(pNode->pChildren, 0));
+ }
+ }
+}
-static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SScanLogicNode* pScan, int32_t flag) {
+static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SLogicNode* pNode, int32_t flag) {
SLogicSubplan* pSubplan = nodesMakeNode(QUERY_NODE_LOGIC_SUBPLAN);
if (NULL == pSubplan) {
return NULL;
@@ -66,37 +57,48 @@ static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SScanLogicNode*
pSubplan->id.queryId = pCxt->queryId;
pSubplan->id.groupId = pCxt->groupId;
pSubplan->subplanType = SUBPLAN_TYPE_SCAN;
- pSubplan->pNode = (SLogicNode*)nodesCloneNode(pScan);
- TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pSubplan->pNode)->pVgroupList);
+ pSubplan->pNode = pNode;
+ pSubplan->pNode->pParent = NULL;
+ splSetSubplanVgroups(pSubplan, pNode);
SPLIT_FLAG_SET_MASK(pSubplan->splitFlag, flag);
return pSubplan;
}
-static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SScanLogicNode* pScan,
- ESubplanType subplanType) {
+static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SExchangeLogicNode** pOutput) {
SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pExchange->srcGroupId = pCxt->groupId;
- pExchange->precision = pScan->pMeta->tableInfo.precision;
- pExchange->node.pTargets = nodesCloneList(pScan->node.pTargets);
+ pExchange->node.precision = pChild->precision;
+ pExchange->node.pTargets = nodesCloneList(pChild->pTargets);
if (NULL == pExchange->node.pTargets) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
+ *pOutput = pExchange;
+ return TSDB_CODE_SUCCESS;
+}
- if (NULL == pScan->node.pParent) {
+static int32_t splCreateExchangeNodeForSubplan(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pSplitNode,
+ ESubplanType subplanType) {
+ SExchangeLogicNode* pExchange = NULL;
+ if (TSDB_CODE_SUCCESS != splCreateExchangeNode(pCxt, pSplitNode, &pExchange)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pSubplan->subplanType = subplanType;
+
+ if (NULL == pSplitNode->pParent) {
pSubplan->pNode = (SLogicNode*)pExchange;
return TSDB_CODE_SUCCESS;
}
SNode* pNode;
- FOREACH(pNode, pScan->node.pParent->pChildren) {
- if (nodesEqualNode(pNode, pScan)) {
+ FOREACH(pNode, pSplitNode->pParent->pChildren) {
+ if (nodesEqualNode(pNode, pSplitNode)) {
REPLACE_NODE(pExchange);
- nodesDestroyNode(pNode);
+ pExchange->node.pParent = pSplitNode->pParent;
return TSDB_CODE_SUCCESS;
}
}
@@ -106,7 +108,7 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla
static bool splMatch(SSplitContext* pCxt, SLogicSubplan* pSubplan, int32_t flag, FSplFindSplitNode func, void* pInfo) {
if (!SPLIT_FLAG_TEST_MASK(pSubplan->splitFlag, flag)) {
- if (func(pSubplan, pInfo)) {
+ if (func(pCxt, pSubplan, pInfo)) {
return true;
}
}
@@ -119,14 +121,62 @@ static bool splMatch(SSplitContext* pCxt, SLogicSubplan* pSubplan, int32_t flag,
return false;
}
-static SLogicNode* stsMatchByNode(SLogicNode* pNode) {
- if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != ((SScanLogicNode*)pNode)->pVgroupList &&
- ((SScanLogicNode*)pNode)->pVgroupList->numOfVgroups > 1) {
+typedef struct SStableSplitInfo {
+ SLogicNode* pSplitNode;
+ SLogicSubplan* pSubplan;
+} SStableSplitInfo;
+
+static bool stbSplHasGatherExecFunc(const SNodeList* pFuncs) {
+ SNode* pFunc = NULL;
+ FOREACH(pFunc, pFuncs) {
+ if (!fmIsDistExecFunc(((SFunctionNode*)pFunc)->funcId)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool stbSplIsMultiTbScan(bool streamQuery, SScanLogicNode* pScan) {
+ return (NULL != pScan->pVgroupList && pScan->pVgroupList->numOfVgroups > 1) ||
+ (streamQuery && TSDB_SUPER_TABLE == pScan->pMeta->tableType);
+}
+
+static bool stbSplHasMultiTbScan(bool streamQuery, SLogicNode* pNode) {
+ if (1 != LIST_LENGTH(pNode->pChildren)) {
+ return false;
+ }
+ SNode* pChild = nodesListGetNode(pNode->pChildren, 0);
+ return (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pChild) && stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pChild));
+}
+
+static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
+ switch (nodeType(pNode)) {
+ // case QUERY_NODE_LOGIC_PLAN_AGG:
+ // return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(pNode);
+ case QUERY_NODE_LOGIC_PLAN_WINDOW: {
+ SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode;
+ if (WINDOW_TYPE_INTERVAL != pWindow->winType) {
+ return false;
+ }
+ return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
+ }
+ // case QUERY_NODE_LOGIC_PLAN_SORT:
+ // return stbSplHasMultiTbScan(pNode);
+ case QUERY_NODE_LOGIC_PLAN_SCAN:
+ return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode);
+ default:
+ break;
+ }
+ return false;
+}
+
+static SLogicNode* stbSplMatchByNode(bool streamQuery, SLogicNode* pNode) {
+ if (stbSplNeedSplit(streamQuery, pNode)) {
return pNode;
}
SNode* pChild;
FOREACH(pChild, pNode->pChildren) {
- SLogicNode* pSplitNode = stsMatchByNode((SLogicNode*)pChild);
+ SLogicNode* pSplitNode = stbSplMatchByNode(streamQuery, (SLogicNode*)pChild);
if (NULL != pSplitNode) {
return pSplitNode;
}
@@ -134,47 +184,244 @@ static SLogicNode* stsMatchByNode(SLogicNode* pNode) {
return NULL;
}
-static bool stsFindSplitNode(SLogicSubplan* pSubplan, SStsInfo* pInfo) {
- SLogicNode* pSplitNode = stsMatchByNode(pSubplan->pNode);
+static bool stbSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SStableSplitInfo* pInfo) {
+ SLogicNode* pSplitNode = stbSplMatchByNode(pCxt->pPlanCxt->streamQuery, pSubplan->pNode);
if (NULL != pSplitNode) {
- pInfo->pScan = (SScanLogicNode*)pSplitNode;
+ pInfo->pSplitNode = pSplitNode;
pInfo->pSubplan = pSubplan;
}
return NULL != pSplitNode;
}
-static int32_t stsSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
- SStsInfo info = {0};
- if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_STS, (FSplFindSplitNode)stsFindSplitNode, &info)) {
- return TSDB_CODE_SUCCESS;
+static int32_t stbSplRewriteFuns(const SNodeList* pFuncs, SNodeList** pPartialFuncs, SNodeList** pMergeFuncs) {
+ SNode* pNode = NULL;
+ FOREACH(pNode, pFuncs) {
+ SFunctionNode* pFunc = (SFunctionNode*)pNode;
+ SFunctionNode* pPartFunc = NULL;
+ SFunctionNode* pMergeFunc = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (fmIsWindowPseudoColumnFunc(pFunc->funcId)) {
+ pPartFunc = nodesCloneNode(pFunc);
+ pMergeFunc = nodesCloneNode(pFunc);
+ if (NULL == pPartFunc || NULL == pMergeFunc) {
+ nodesDestroyNode(pPartFunc);
+ nodesDestroyNode(pMergeFunc);
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ } else {
+ code = fmGetDistMethod(pFunc, &pPartFunc, &pMergeFunc);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeStrictAppend(pPartialFuncs, pPartFunc);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeStrictAppend(pMergeFuncs, pMergeFunc);
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesDestroyList(*pPartialFuncs);
+ nodesDestroyList(*pMergeFuncs);
+ return code;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t stbSplAppendWStart(SNodeList* pFuncs, int32_t* pIndex) {
+ int32_t index = 0;
+ SNode* pFunc = NULL;
+ FOREACH(pFunc, pFuncs) {
+ if (FUNCTION_TYPE_WSTARTTS == ((SFunctionNode*)pFunc)->funcType) {
+ *pIndex = index;
+ return TSDB_CODE_SUCCESS;
+ }
+ ++index;
+ }
+
+ SFunctionNode* pWStart = nodesMakeNode(QUERY_NODE_FUNCTION);
+ if (NULL == pWStart) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ strcpy(pWStart->functionName, "_wstartts");
+ snprintf(pWStart->node.aliasName, sizeof(pWStart->node.aliasName), "%s.%p", pWStart->functionName, pWStart);
+ int32_t code = fmGetFuncInfo(pWStart, NULL, 0);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListStrictAppend(pFuncs, pWStart);
+ }
+ *pIndex = index;
+ return code;
+}
+
+static int32_t stbSplCreatePartWindowNode(SWindowLogicNode* pMergeWindow, SLogicNode** pPartWindow) {
+ SNodeList* pFunc = pMergeWindow->pFuncs;
+ pMergeWindow->pFuncs = NULL;
+ SNodeList* pTargets = pMergeWindow->node.pTargets;
+ pMergeWindow->node.pTargets = NULL;
+ SNodeList* pChildren = pMergeWindow->node.pChildren;
+ pMergeWindow->node.pChildren = NULL;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ SWindowLogicNode* pPartWin = nodesCloneNode(pMergeWindow);
+ if (NULL == pPartWin) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ pMergeWindow->node.pTargets = pTargets;
+ pPartWin->node.pChildren = pChildren;
+ code = stbSplRewriteFuns(pFunc, &pPartWin->pFuncs, &pMergeWindow->pFuncs);
+ }
+ int32_t index = 0;
+ if (TSDB_CODE_SUCCESS == code) {
+ code = stbSplAppendWStart(pPartWin->pFuncs, &index);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = createColumnByRewriteExps(pPartWin->pFuncs, &pPartWin->node.pTargets);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ nodesDestroyNode(pMergeWindow->pTspk);
+ pMergeWindow->pTspk = nodesCloneNode(nodesListGetNode(pPartWin->node.pTargets, index));
+ if (NULL == pMergeWindow->pTspk) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
+ nodesDestroyList(pFunc);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPartWindow = (SLogicNode*)pPartWin;
+ } else {
+ nodesDestroyNode(pPartWin);
+ }
+
+ return code;
+}
+
+static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicNode* pParent, SLogicNode* pPartChild) {
+ SMergeLogicNode* pMerge = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_MERGE);
+ if (NULL == pMerge) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pMerge->numOfChannels = ((SScanLogicNode*)nodesListGetNode(pPartChild->pChildren, 0))->pVgroupList->numOfVgroups;
+ pMerge->srcGroupId = pCxt->groupId;
+ pMerge->node.pParent = pParent;
+ pMerge->node.precision = pPartChild->precision;
+ int32_t code = nodesListMakeStrictAppend(&pMerge->pMergeKeys, nodesCloneNode(((SWindowLogicNode*)pParent)->pTspk));
+ if (TSDB_CODE_SUCCESS == code) {
+ pMerge->node.pTargets = nodesCloneList(pPartChild->pTargets);
+ if (NULL == pMerge->node.pTargets) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeAppend(&pParent->pChildren, pMerge);
+ }
+
+ return code;
+}
+
+static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
+ SLogicNode* pPartWindow = NULL;
+ int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = stbSplCreateMergeNode(pCxt, pInfo->pSplitNode, pPartWindow);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
+ splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT));
+ }
+ pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
+ return code;
+}
+
+static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent, SLogicNode* pPartChild) {
+ SExchangeLogicNode* pExchange = NULL;
+ int32_t code = splCreateExchangeNode(pCxt, pPartChild, &pExchange);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeAppend(&pParent->pChildren, pExchange);
+ }
+ return code;
+}
+
+static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
+ SLogicNode* pPartWindow = NULL;
+ int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow);
+ if (TSDB_CODE_SUCCESS == code) {
+ ((SWindowLogicNode*)pPartWindow)->stmInterAlgo = STREAM_INTERVAL_ALGO_SEMI;
+ ((SWindowLogicNode*)pInfo->pSplitNode)->stmInterAlgo = STREAM_INTERVAL_ALGO_FINAL;
+ code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow);
}
- int32_t code =
- nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pScan, SPLIT_FLAG_STS));
if (TSDB_CODE_SUCCESS == code) {
- code = splCreateExchangeNode(pCxt, info.pSubplan, info.pScan, SUBPLAN_TYPE_MERGE);
+ code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
+ splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT));
}
+ pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
+ return code;
+}
+
+static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
+ if (pCxt->pPlanCxt->streamQuery) {
+ return stbSplSplitWindowNodeForStream(pCxt, pInfo);
+ } else {
+ return stbSplSplitWindowNodeForBatch(pCxt, pInfo);
+ }
+}
+
+static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
+ int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pInfo->pSplitNode, SUBPLAN_TYPE_MERGE);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
+ splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
+ }
+ return code;
+}
+
+static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
+ if (pCxt->pPlanCxt->rSmaQuery) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SStableSplitInfo info = {0};
+ if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_STABLE_SPLIT, (FSplFindSplitNode)stbSplFindSplitNode, &info)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(info.pSplitNode)) {
+ case QUERY_NODE_LOGIC_PLAN_WINDOW:
+ code = stbSplSplitWindowNode(pCxt, &info);
+ break;
+ case QUERY_NODE_LOGIC_PLAN_SCAN:
+ code = stbSplSplitScanNode(pCxt, &info);
+ break;
+ default:
+ break;
+ }
+
++(pCxt->groupId);
pCxt->split = true;
return code;
}
-static bool ctjIsSingleTable(int8_t tableType) {
- return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType);
+typedef struct SSigTbJoinSplitInfo {
+ SJoinLogicNode* pJoin;
+ SLogicNode* pSplitNode;
+ SLogicSubplan* pSubplan;
+} SSigTbJoinSplitInfo;
+
+static bool sigTbJoinSplNeedSplit(SJoinLogicNode* pJoin) {
+ if (!pJoin->isSingleTableJoin) {
+ return false;
+ }
+ return QUERY_NODE_LOGIC_PLAN_EXCHANGE != nodeType(nodesListGetNode(pJoin->node.pChildren, 0)) &&
+ QUERY_NODE_LOGIC_PLAN_EXCHANGE != nodeType(nodesListGetNode(pJoin->node.pChildren, 1));
}
-static SLogicNode* ctjMatchByNode(SLogicNode* pNode) {
- if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode)) {
- SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
- SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pNode->pChildren, 1);
- if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pLeft) && ctjIsSingleTable(((SScanLogicNode*)pLeft)->pMeta->tableType) &&
- QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pRight) &&
- ctjIsSingleTable(((SScanLogicNode*)pRight)->pMeta->tableType)) {
- return pRight;
- }
+static SJoinLogicNode* sigTbJoinSplMatchByNode(SLogicNode* pNode) {
+ if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode) && sigTbJoinSplNeedSplit((SJoinLogicNode*)pNode)) {
+ return (SJoinLogicNode*)pNode;
}
SNode* pChild;
FOREACH(pChild, pNode->pChildren) {
- SLogicNode* pSplitNode = ctjMatchByNode((SLogicNode*)pChild);
+ SJoinLogicNode* pSplitNode = sigTbJoinSplMatchByNode((SLogicNode*)pChild);
if (NULL != pSplitNode) {
return pSplitNode;
}
@@ -182,24 +429,24 @@ static SLogicNode* ctjMatchByNode(SLogicNode* pNode) {
return NULL;
}
-static bool ctjFindSplitNode(SLogicSubplan* pSubplan, SCtjInfo* pInfo) {
- SLogicNode* pSplitNode = ctjMatchByNode(pSubplan->pNode);
- if (NULL != pSplitNode) {
- pInfo->pScan = (SScanLogicNode*)pSplitNode;
+static bool sigTbJoinSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SSigTbJoinSplitInfo* pInfo) {
+ SJoinLogicNode* pJoin = sigTbJoinSplMatchByNode(pSubplan->pNode);
+ if (NULL != pJoin) {
+ pInfo->pJoin = pJoin;
+ pInfo->pSplitNode = nodesListGetNode(pJoin->node.pChildren, 1);
pInfo->pSubplan = pSubplan;
}
- return NULL != pSplitNode;
+ return NULL != pJoin;
}
-static int32_t ctjSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
- SCtjInfo info = {0};
- if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_CTJ, (FSplFindSplitNode)ctjFindSplitNode, &info)) {
+static int32_t singleTableJoinSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
+ SSigTbJoinSplitInfo info = {0};
+ if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)sigTbJoinSplFindSplitNode, &info)) {
return TSDB_CODE_SUCCESS;
}
- int32_t code =
- nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pScan, SPLIT_FLAG_CTJ));
+ int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType);
if (TSDB_CODE_SUCCESS == code) {
- code = splCreateExchangeNode(pCxt, info.pSubplan, info.pScan, info.pSubplan->subplanType);
+ code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pSplitNode, 0));
}
++(pCxt->groupId);
pCxt->split = true;
@@ -277,13 +524,18 @@ static int32_t unionSplitSubplan(SSplitContext* pCxt, SLogicSubplan* pUnionSubpl
return code;
}
-static SLogicNode* uaMatchByNode(SLogicNode* pNode) {
+typedef struct SUnionAllSplitInfo {
+ SProjectLogicNode* pProject;
+ SLogicSubplan* pSubplan;
+} SUnionAllSplitInfo;
+
+static SLogicNode* unAllSplMatchByNode(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pNode) && LIST_LENGTH(pNode->pChildren) > 1) {
return pNode;
}
SNode* pChild;
FOREACH(pChild, pNode->pChildren) {
- SLogicNode* pSplitNode = uaMatchByNode((SLogicNode*)pChild);
+ SLogicNode* pSplitNode = unAllSplMatchByNode((SLogicNode*)pChild);
if (NULL != pSplitNode) {
return pSplitNode;
}
@@ -291,8 +543,8 @@ static SLogicNode* uaMatchByNode(SLogicNode* pNode) {
return NULL;
}
-static bool uaFindSplitNode(SLogicSubplan* pSubplan, SUaInfo* pInfo) {
- SLogicNode* pSplitNode = uaMatchByNode(pSubplan->pNode);
+static bool unAllSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SUnionAllSplitInfo* pInfo) {
+ SLogicNode* pSplitNode = unAllSplMatchByNode(pSubplan->pNode);
if (NULL != pSplitNode) {
pInfo->pProject = (SProjectLogicNode*)pSplitNode;
pInfo->pSubplan = pSubplan;
@@ -300,13 +552,13 @@ static bool uaFindSplitNode(SLogicSubplan* pSubplan, SUaInfo* pInfo) {
return NULL != pSplitNode;
}
-static int32_t uaCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SProjectLogicNode* pProject) {
+static int32_t unAllSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SProjectLogicNode* pProject) {
SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pExchange->srcGroupId = pCxt->groupId;
- // pExchange->precision = pScan->pMeta->tableInfo.precision;
+ pExchange->node.precision = pProject->node.precision;
pExchange->node.pTargets = nodesCloneList(pProject->node.pTargets);
if (NULL == pExchange->node.pTargets) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -332,28 +584,33 @@ static int32_t uaCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan
return TSDB_CODE_FAILED;
}
-static int32_t uaSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
- SUaInfo info = {0};
- if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)uaFindSplitNode, &info)) {
+static int32_t unionAllSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
+ SUnionAllSplitInfo info = {0};
+ if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unAllSplFindSplitNode, &info)) {
return TSDB_CODE_SUCCESS;
}
int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pProject);
if (TSDB_CODE_SUCCESS == code) {
- code = uaCreateExchangeNode(pCxt, info.pSubplan, info.pProject);
+ code = unAllSplCreateExchangeNode(pCxt, info.pSubplan, info.pProject);
}
++(pCxt->groupId);
pCxt->split = true;
return code;
}
-static SLogicNode* unMatchByNode(SLogicNode* pNode) {
+typedef struct SUnionDistinctSplitInfo {
+ SAggLogicNode* pAgg;
+ SLogicSubplan* pSubplan;
+} SUnionDistinctSplitInfo;
+
+static SLogicNode* unDistSplMatchByNode(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) && LIST_LENGTH(pNode->pChildren) > 1) {
return pNode;
}
SNode* pChild;
FOREACH(pChild, pNode->pChildren) {
- SLogicNode* pSplitNode = unMatchByNode((SLogicNode*)pChild);
+ SLogicNode* pSplitNode = unDistSplMatchByNode((SLogicNode*)pChild);
if (NULL != pSplitNode) {
return pSplitNode;
}
@@ -361,13 +618,13 @@ static SLogicNode* unMatchByNode(SLogicNode* pNode) {
return NULL;
}
-static int32_t unCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SAggLogicNode* pAgg) {
+static int32_t unDistSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SAggLogicNode* pAgg) {
SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pExchange->srcGroupId = pCxt->groupId;
- // pExchange->precision = pScan->pMeta->tableInfo.precision;
+ pExchange->node.precision = pAgg->node.precision;
pExchange->node.pTargets = nodesCloneList(pAgg->pGroupKeys);
if (NULL == pExchange->node.pTargets) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -378,8 +635,8 @@ static int32_t unCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan
return nodesListMakeAppend(&pAgg->node.pChildren, pExchange);
}
-static bool unFindSplitNode(SLogicSubplan* pSubplan, SUnInfo* pInfo) {
- SLogicNode* pSplitNode = unMatchByNode(pSubplan->pNode);
+static bool unDistSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SUnionDistinctSplitInfo* pInfo) {
+ SLogicNode* pSplitNode = unDistSplMatchByNode(pSubplan->pNode);
if (NULL != pSplitNode) {
pInfo->pAgg = (SAggLogicNode*)pSplitNode;
pInfo->pSubplan = pSubplan;
@@ -387,25 +644,29 @@ static bool unFindSplitNode(SLogicSubplan* pSubplan, SUnInfo* pInfo) {
return NULL != pSplitNode;
}
-static int32_t unSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
- SUnInfo info = {0};
- if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unFindSplitNode, &info)) {
+static int32_t unionDistinctSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
+ SUnionDistinctSplitInfo info = {0};
+ if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unDistSplFindSplitNode, &info)) {
return TSDB_CODE_SUCCESS;
}
int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pAgg);
if (TSDB_CODE_SUCCESS == code) {
- code = unCreateExchangeNode(pCxt, info.pSubplan, info.pAgg);
+ code = unDistSplCreateExchangeNode(pCxt, info.pSubplan, info.pAgg);
}
++(pCxt->groupId);
pCxt->split = true;
return code;
}
-static const SSplitRule splitRuleSet[] = {{.pName = "SuperTableScan", .splitFunc = stsSplit},
- {.pName = "ChildTableJoin", .splitFunc = ctjSplit},
- {.pName = "UnionAll", .splitFunc = uaSplit},
- {.pName = "Union", .splitFunc = unSplit}};
+// clang-format off
+static const SSplitRule splitRuleSet[] = {
+ {.pName = "SuperTableSplit", .splitFunc = stableSplit},
+ {.pName = "SingleTableJoinSplit", .splitFunc = singleTableJoinSplit},
+ {.pName = "UnionAllSplit", .splitFunc = unionAllSplit},
+ {.pName = "UnionDistinctSplit", .splitFunc = unionDistinctSplit}
+};
+// clang-format on
static const int32_t splitRuleNum = (sizeof(splitRuleSet) / sizeof(SSplitRule));
@@ -416,9 +677,10 @@ static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
taosMemoryFree(pStr);
}
-static int32_t applySplitRule(SLogicSubplan* pSubplan) {
- SSplitContext cxt = {.queryId = pSubplan->id.queryId, .groupId = pSubplan->id.groupId + 1, .split = false};
- bool split = false;
+static int32_t applySplitRule(SPlanContext* pCxt, SLogicSubplan* pSubplan) {
+ SSplitContext cxt = {
+ .pPlanCxt = pCxt, .queryId = pSubplan->id.queryId, .groupId = pSubplan->id.groupId + 1, .split = false};
+ bool split = false;
do {
split = false;
for (int32_t i = 0; i < splitRuleNum; ++i) {
@@ -465,7 +727,7 @@ int32_t splitLogicPlan(SPlanContext* pCxt, SLogicNode* pLogicNode, SLogicSubplan
pSubplan->id.groupId = 1;
setLogicNodeParent(pSubplan->pNode);
- int32_t code = applySplitRule(pSubplan);
+ int32_t code = applySplitRule(pCxt, pSubplan);
if (TSDB_CODE_SUCCESS == code) {
*pLogicSubplan = pSubplan;
} else {
diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c
index 3c83d9f53a8669535eda1dc883af2951e9470d54..63d31912f0cccdf177b87681687e0faf8168642a 100644
--- a/source/libs/planner/src/planUtil.c
+++ b/source/libs/planner/src/planUtil.c
@@ -34,3 +34,54 @@ int32_t generateUsageErrMsg(char* pBuf, int32_t len, int32_t errCode, ...) {
va_end(vArgList);
return errCode;
}
+
+typedef struct SCreateColumnCxt {
+ int32_t errCode;
+ SNodeList* pList;
+} SCreateColumnCxt;
+
+static EDealRes doCreateColumn(SNode* pNode, void* pContext) {
+ SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext;
+ switch (nodeType(pNode)) {
+ case QUERY_NODE_COLUMN: {
+ SNode* pCol = nodesCloneNode(pNode);
+ if (NULL == pCol) {
+ return DEAL_RES_ERROR;
+ }
+ return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
+ }
+ case QUERY_NODE_OPERATOR:
+ case QUERY_NODE_LOGIC_CONDITION:
+ case QUERY_NODE_FUNCTION: {
+ SExprNode* pExpr = (SExprNode*)pNode;
+ SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == pCol) {
+ return DEAL_RES_ERROR;
+ }
+ pCol->node.resType = pExpr->resType;
+ strcpy(pCol->colName, pExpr->aliasName);
+ return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
+ }
+ default:
+ break;
+ }
+
+ return DEAL_RES_CONTINUE;
+}
+
+int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList) {
+ SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)};
+ if (NULL == cxt.pList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ nodesWalkExprs(pExprs, doCreateColumn, &cxt);
+ if (TSDB_CODE_SUCCESS != cxt.errCode) {
+ nodesDestroyList(cxt.pList);
+ return cxt.errCode;
+ }
+ if (NULL == *pList) {
+ *pList = cxt.pList;
+ }
+ return cxt.errCode;
+}
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index af62c52a89baa90aaf857fa6606267a437275f87..f8d240c7b2d2162800cbc32ee7af2eeb62645d89 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -58,16 +58,19 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown
if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == nodeType(pNode)) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pNode;
if (pExchange->srcGroupId == groupId) {
- if (NULL == pExchange->pSrcEndPoints) {
- pExchange->pSrcEndPoints = nodesMakeList();
- if (NULL == pExchange->pSrcEndPoints) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(pExchange->pSrcEndPoints, nodesCloneNode(pSource))) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ return nodesListMakeStrictAppend(&pExchange->pSrcEndPoints, nodesCloneNode(pSource));
+ }
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == nodeType(pNode)) {
+ SMergePhysiNode* pMerge = (SMergePhysiNode*)pNode;
+ if (pMerge->srcGroupId == groupId) {
+ SExchangePhysiNode* pExchange =
+ (SExchangePhysiNode*)nodesListGetNode(pMerge->node.pChildren, pMerge->numOfChannels - 1);
+ if (1 == pMerge->numOfChannels) {
+ pMerge->numOfChannels = LIST_LENGTH(pMerge->node.pChildren);
+ } else {
+ --(pMerge->numOfChannels);
}
- return TSDB_CODE_SUCCESS;
+ return nodesListMakeStrictAppend(&pExchange->pSrcEndPoints, nodesCloneNode(pSource));
}
}
diff --git a/source/libs/planner/test/CMakeLists.txt b/source/libs/planner/test/CMakeLists.txt
index a21b36fef6b3eecc51bdbe4abbb7fff3dc065098..abea60b0c798a055617abf3693be25f365fbc867 100644
--- a/source/libs/planner/test/CMakeLists.txt
+++ b/source/libs/planner/test/CMakeLists.txt
@@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT})
target_link_libraries(plannerTest PUBLIC wingetopt)
endif()
-add_test(
- NAME plannerTest
- COMMAND plannerTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME plannerTest
+ COMMAND plannerTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/libs/planner/test/planIntervalTest.cpp b/source/libs/planner/test/planIntervalTest.cpp
index c9bae46ca9438977f4078ceac82e6c7c4b3c680e..a04f47741e50f4b0b02bc86e6713636b9b4fff97 100644
--- a/source/libs/planner/test/planIntervalTest.cpp
+++ b/source/libs/planner/test/planIntervalTest.cpp
@@ -50,4 +50,10 @@ TEST_F(PlanIntervalTest, selectFunc) {
run("SELECT MAX(c1), MIN(c1) FROM t1 INTERVAL(10s)");
// select function along with the columns of select row, and with INTERVAL clause
run("SELECT MAX(c1), c2 FROM t1 INTERVAL(10s)");
-}
\ No newline at end of file
+}
+
+TEST_F(PlanIntervalTest, stable) {
+ useDb("root", "test");
+
+ run("SELECT COUNT(*) FROM st1 INTERVAL(10s)");
+}
diff --git a/source/libs/planner/test/planJoinTest.cpp b/source/libs/planner/test/planJoinTest.cpp
index eaedbd1db0036d78084026cf8864ccb977fed80f..a3c5258e33dfb7ccbb6db5bbd600a6efdd01359d 100644
--- a/source/libs/planner/test/planJoinTest.cpp
+++ b/source/libs/planner/test/planJoinTest.cpp
@@ -44,3 +44,9 @@ TEST_F(PlanJoinTest, withWhere) {
run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts "
"WHERE t1.c1 > t2.c1 AND t1.c2 = 'abc' AND t2.c2 = 'qwe'");
}
+
+TEST_F(PlanJoinTest, multiJoin) {
+ useDb("root", "test");
+
+ run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts JOIN st1s3 t3 ON t1.ts = t3.ts");
+}
diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp
index 67c09d706e34ea44ab0c4070d9bbb665a15dded1..f153604e6b6b43ca601bfe662f7a21b2f36327ff 100644
--- a/source/libs/planner/test/planOtherTest.cpp
+++ b/source/libs/planner/test/planOtherTest.cpp
@@ -33,6 +33,12 @@ TEST_F(PlanOtherTest, createStream) {
"interval(10s)");
}
+TEST_F(PlanOtherTest, createStreamUseSTable) {
+ useDb("root", "test");
+
+ run("create stream if not exists s1 as select count(*) from st1 interval(10s)");
+}
+
TEST_F(PlanOtherTest, createSmaIndex) {
useDb("root", "test");
diff --git a/source/libs/planner/test/planSTableTest.cpp b/source/libs/planner/test/planSTableTest.cpp
index ed75b75e514aede02f41bf29ea044ccf833aef83..d1608cbad1155baf1bda19cf7c06a5121b0d581a 100644
--- a/source/libs/planner/test/planSTableTest.cpp
+++ b/source/libs/planner/test/planSTableTest.cpp
@@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) {
run("SELECT TBNAME, tag1, tag2 FROM st1");
}
+TEST_F(PlanSuperTableTest, pseudoColOnChildTable) {
+ useDb("root", "test");
+
+ run("SELECT TBNAME FROM st1s1");
+
+ run("SELECT TBNAME, tag1, tag2 FROM st1s1");
+}
+
TEST_F(PlanSuperTableTest, orderBy) {
useDb("root", "test");
diff --git a/source/libs/planner/test/planSubqueryTest.cpp b/source/libs/planner/test/planSubqueryTest.cpp
index 2d559c6f3b5322e4bd27bd571fc5e6829ccf262c..f82e10e9983004204544ecd16632bd2a59a37623 100644
--- a/source/libs/planner/test/planSubqueryTest.cpp
+++ b/source/libs/planner/test/planSubqueryTest.cpp
@@ -26,6 +26,8 @@ TEST_F(PlanSubqeuryTest, basic) {
run("SELECT * FROM (SELECT * FROM t1)");
run("SELECT LAST(c1) FROM (SELECT * FROM t1)");
+
+ run("SELECT c1 FROM (SELECT TODAY() AS c1 FROM t1)");
}
TEST_F(PlanSubqeuryTest, doubleGroupBy) {
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 084762088823edee627b4ea3bad2286208d570ac..e2082d49364727719bc72f3445bcb038d5584976 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -73,7 +73,7 @@ void setDumpModule(const char* pModule) {
}
}
-void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); }
+void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index f4ba2fca8146b37a3e25bc7173488dd85c8e48d0..a5a499aaf5bc3b38998528d1550bd9b16c1d7671 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -199,3 +199,30 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
tstrncpy(s.name, name, tListLen(s.name));
return s;
}
+
+void destroyQueryExecRes(SQueryExecRes* pRes) {
+ if (NULL == pRes || NULL == pRes->res) {
+ return;
+ }
+
+ switch (pRes->msgType) {
+ case TDMT_VND_ALTER_TABLE:
+ case TDMT_MND_ALTER_STB: {
+ tFreeSTableMetaRsp((STableMetaRsp *)pRes->res);
+ taosMemoryFreeClear(pRes->res);
+ break;
+ }
+ case TDMT_VND_SUBMIT: {
+ tFreeSSubmitRsp((SSubmitRsp*)pRes->res);
+ break;
+ }
+ case TDMT_VND_QUERY: {
+ taosArrayDestroy((SArray*)pRes->res);
+ break;
+ }
+ default:
+ qError("invalid exec result for request type %d", pRes->msgType);
+ }
+}
+
+
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index fb9319bedeabfbd3673c72dda58ca0c3686cd940..e77f2b0ca42e58744ea14de5286ae24d1c4ceb14 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -22,7 +22,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-truncation"
-int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen) = {0};
+int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t)) = {0};
int32_t (*queryProcessMsgRsp[TDMT_MAX])(void *output, char *msg, int32_t msgSize) = {0};
int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
@@ -58,7 +58,7 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
SBuildTableMetaInput *pInput = input;
if (NULL == input || NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
@@ -72,7 +72,7 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3
tstrncpy(infoReq.tbName, pInput->tbName, TSDB_TABLE_NAME_LEN);
int32_t bufLen = tSerializeSTableInfoReq(NULL, 0, &infoReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSTableInfoReq(pBuf, bufLen, &infoReq);
*msg = pBuf;
@@ -81,7 +81,7 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
SBuildUseDBInput *pInput = input;
if (NULL == pInput || NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
@@ -95,7 +95,7 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms
usedbReq.numOfTable = pInput->numOfTable;
int32_t bufLen = tSerializeSUseDbReq(NULL, 0, &usedbReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSUseDbReq(pBuf, bufLen, &usedbReq);
*msg = pBuf;
@@ -104,7 +104,7 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -113,7 +113,7 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
qnodeListReq.rowNum = -1;
int32_t bufLen = tSerializeSQnodeListReq(NULL, 0, &qnodeListReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSQnodeListReq(pBuf, bufLen, &qnodeListReq);
*msg = pBuf;
@@ -122,7 +122,7 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -131,7 +131,7 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
strcpy(dbCfgReq.db, input);
int32_t bufLen = tSerializeSDbCfgReq(NULL, 0, &dbCfgReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSDbCfgReq(pBuf, bufLen, &dbCfgReq);
*msg = pBuf;
@@ -140,7 +140,7 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -149,7 +149,7 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t
strcpy(indexReq.indexFName, input);
int32_t bufLen = tSerializeSUserIndexReq(NULL, 0, &indexReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSUserIndexReq(pBuf, bufLen, &indexReq);
*msg = pBuf;
@@ -158,7 +158,7 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -170,7 +170,7 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3
taosArrayPush(funcReq.pFuncNames, input);
int32_t bufLen = tSerializeSRetrieveFuncReq(NULL, 0, &funcReq);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSRetrieveFuncReq(pBuf, bufLen, &funcReq);
taosArrayDestroy(funcReq.pFuncNames);
@@ -181,7 +181,7 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3
return TSDB_CODE_SUCCESS;
}
-int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
+int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -190,7 +190,7 @@ int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32
strncpy(req.user, input, sizeof(req.user));
int32_t bufLen = tSerializeSGetUserAuthReq(NULL, 0, &req);
- void *pBuf = rpcMallocCont(bufLen);
+ void *pBuf = (*mallcFp)(bufLen);
tSerializeSGetUserAuthReq(pBuf, bufLen, &req);
*msg = pBuf;
@@ -273,7 +273,7 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isSuperTable, STableMeta **pMeta) {
+int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
@@ -283,14 +283,14 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isSuperTable, STabl
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- pTableMeta->vgId = isSuperTable ? 0 : msg->vgId;
- pTableMeta->tableType = isSuperTable ? TSDB_SUPER_TABLE : msg->tableType;
- pTableMeta->uid = isSuperTable ? msg->suid : msg->tuid;
+ pTableMeta->vgId = isStb ? 0 : msg->vgId;
+ pTableMeta->tableType = isStb ? TSDB_SUPER_TABLE : msg->tableType;
+ pTableMeta->uid = isStb ? msg->suid : msg->tuid;
pTableMeta->suid = msg->suid;
pTableMeta->sversion = msg->sversion;
pTableMeta->tversion = msg->tversion;
- if (isSuperTable) {
+ if (isStb) {
qDebug("stable %s meta returned, suid:%" PRIx64, msg->stbName, pTableMeta->suid);
}
@@ -373,7 +373,7 @@ int32_t queryProcessQnodeListRsp(void *output, char *msg, int32_t msgSize) {
return code;
}
- out.addrsList = (SArray *)output;
+ out.qnodeList = (SArray *)output;
if (tDeserializeSQnodeListRsp(msg, msgSize, &out) != 0) {
qError("invalid qnode list rsp msg, msgSize:%d", msgSize);
code = TSDB_CODE_INVALID_MSG;
diff --git a/source/libs/qworker/inc/qworkerInt.h b/source/libs/qworker/inc/qwInt.h
similarity index 83%
rename from source/libs/qworker/inc/qworkerInt.h
rename to source/libs/qworker/inc/qwInt.h
index 511327658f14a58e25460f979a4ebb197c8d4b8c..4fe3c1839310be9e264f7241fbc0cce48837a05c 100644
--- a/source/libs/qworker/inc/qworkerInt.h
+++ b/source/libs/qworker/inc/qwInt.h
@@ -26,7 +26,7 @@ extern "C" {
#include "ttimer.h"
#include "tref.h"
#include "plannodes.h"
-
+#include "executor.h"
#include "trpc.h"
#define QW_DEFAULT_SCHEDULER_NUMBER 10000
@@ -76,6 +76,8 @@ typedef struct SQWDebug {
bool dumpEnable;
} SQWDebug;
+extern SQWDebug gQWDebug;
+
typedef struct SQWMsg {
void *node;
int32_t code;
@@ -143,6 +145,32 @@ typedef struct SQWSchStatus {
SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus
} SQWSchStatus;
+typedef struct SQWTimeInQ {
+ uint64_t num;
+ uint64_t total;
+} SQWTimeInQ;
+
+typedef struct SQWMsgStat {
+ SQWTimeInQ waitTime[2];
+ uint64_t queryProcessed;
+ uint64_t cqueryProcessed;
+ uint64_t fetchProcessed;
+ uint64_t fetchRspProcessed;
+ uint64_t cancelProcessed;
+ uint64_t dropProcessed;
+ uint64_t hbProcessed;
+} SQWMsgStat;
+
+typedef struct SQWRTStat {
+ uint64_t startTaskNum;
+ uint64_t stopTaskNum;
+} SQWRTStat;
+
+typedef struct SQWStat {
+ SQWMsgStat msgStat;
+ SQWRTStat rtStat;
+} SQWStat;
+
// Qnode/Vnode level task management
typedef struct SQWorker {
int64_t refId;
@@ -153,9 +181,10 @@ typedef struct SQWorker {
tmr_h hbTimer;
SRWLatch schLock;
// SRWLatch ctxLock;
- SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
- SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
- SMsgCb msgCb;
+ SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
+ SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
+ SMsgCb msgCb;
+ SQWStat stat;
} SQWorker;
typedef struct SQWorkerMgmt {
@@ -170,10 +199,13 @@ typedef struct SQWorkerMgmt {
#define QW_IDS() sId, qId, tId, rId
#define QW_FPARAMS() mgmt, QW_IDS()
-#define QW_GET_EVENT_VALUE(ctx, event) atomic_load_8(&(ctx)->events[event])
+#define QW_STAT_INC(_item, _n) atomic_add_fetch_64(&(_item), _n)
+#define QW_STAT_DEC(_item, _n) atomic_sub_fetch_64(&(_item), _n)
+#define QW_STAT_GET(_item) atomic_load_64(&(_item))
-#define QW_IS_EVENT_RECEIVED(ctx, event) (atomic_load_8(&(ctx)->events[event]) == QW_EVENT_RECEIVED)
-#define QW_IS_EVENT_PROCESSED(ctx, event) (atomic_load_8(&(ctx)->events[event]) == QW_EVENT_PROCESSED)
+#define QW_GET_EVENT(ctx, event) atomic_load_8(&(ctx)->events[event])
+#define QW_IS_EVENT_RECEIVED(ctx, event) (QW_GET_EVENT(ctx, event) == QW_EVENT_RECEIVED)
+#define QW_IS_EVENT_PROCESSED(ctx, event) (QW_GET_EVENT(ctx, event) == QW_EVENT_PROCESSED)
#define QW_SET_EVENT_RECEIVED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_RECEIVED)
#define QW_SET_EVENT_PROCESSED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_PROCESSED)
@@ -227,6 +259,7 @@ typedef struct SQWorkerMgmt {
#define QW_ELOG(_param, ...) qError("QW:%p " _param, mgmt, __VA_ARGS__)
#define QW_DLOG(_param, ...) qDebug("QW:%p " _param, mgmt, __VA_ARGS__)
+#define QW_TLOG(_param, ...) qTrace("QW:%p " _param, mgmt, __VA_ARGS__)
#define QW_DUMP(_param, ...) \
do { \
@@ -302,9 +335,29 @@ typedef struct SQWorkerMgmt {
extern SQWorkerMgmt gQwMgmt;
static FORCE_INLINE SQWorker *qwAcquire(int64_t refId) { return (SQWorker *)taosAcquireRef(atomic_load_32(&gQwMgmt.qwRef), refId); }
-
static FORCE_INLINE int32_t qwRelease(int64_t refId) { return taosReleaseRef(gQwMgmt.qwRef, refId); }
+char *qwPhaseStr(int32_t phase);
+char *qwBufStatusStr(int32_t bufStatus);
+int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch);
+void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt);
+int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status);
+int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
+int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
+int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
+void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx);
+int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
+int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status);
+int32_t qwDropTask(QW_FPARAMS_DEF);
+void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
+int32_t qwOpenRef(void);
+void qwSetHbParam(int64_t refId, SQWHbParam **pParam);
+int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type);
+int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type);
+
+void qwDbgDumpMgmtInfo(SQWorker *mgmt);
+int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);
+
#ifdef __cplusplus
}
diff --git a/source/libs/qworker/inc/qworkerMsg.h b/source/libs/qworker/inc/qwMsg.h
similarity index 92%
rename from source/libs/qworker/inc/qworkerMsg.h
rename to source/libs/qworker/inc/qwMsg.h
index 6453cff70095b246f0ede7034da07536b1075f2f..ede085b6f912842c85dce8597374613856d80f1f 100644
--- a/source/libs/qworker/inc/qworkerMsg.h
+++ b/source/libs/qworker/inc/qwMsg.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-#include "qworkerInt.h"
+#include "qwInt.h"
#include "dataSinkMgt.h"
int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, int8_t taskType, int8_t explain);
@@ -36,12 +36,10 @@ int32_t qwBuildAndSendFetchRsp(SRpcHandleInfo *pConn, SRetrieveTableRsp *pRsp, i
int32_t code);
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
-int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo);
-int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code);
+int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo);
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
void qwFreeFetchRsp(void *msg);
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
-int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp);
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code);
int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn);
diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c
new file mode 100644
index 0000000000000000000000000000000000000000..27fe22295d3706eb21a237f8d662e34b4dce9b36
--- /dev/null
+++ b/source/libs/qworker/src/qwDbg.c
@@ -0,0 +1,128 @@
+#include "qworker.h"
+#include "dataSinkMgt.h"
+#include "executor.h"
+#include "planner.h"
+#include "query.h"
+#include "qwInt.h"
+#include "qwMsg.h"
+#include "tcommon.h"
+#include "tmsg.h"
+#include "tname.h"
+
+SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true};
+
+int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
+ if (!gQWDebug.statusEnable) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = 0;
+
+ if (oriStatus == newStatus) {
+ if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) {
+ *ignore = true;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ switch (oriStatus) {
+ case JOB_TASK_STATUS_NULL:
+ if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED &&
+ newStatus != JOB_TASK_STATUS_NOT_START) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_NOT_START:
+ if (newStatus != JOB_TASK_STATUS_CANCELLED) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_EXECUTING:
+ if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED &&
+ newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING &&
+ newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_PARTIAL_SUCCEED:
+ if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED &&
+ newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED &&
+ newStatus != JOB_TASK_STATUS_DROPPING) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_SUCCEED:
+ if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING &&
+ newStatus != JOB_TASK_STATUS_FAILED) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_FAILED:
+ if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+ break;
+
+ case JOB_TASK_STATUS_CANCELLING:
+ if (newStatus != JOB_TASK_STATUS_CANCELLED) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ break;
+ case JOB_TASK_STATUS_CANCELLED:
+ case JOB_TASK_STATUS_DROPPING:
+ if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+ break;
+
+ default:
+ QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus));
+ return TSDB_CODE_QRY_APP_ERROR;
+ }
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
+ QW_RET(code);
+}
+
+void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {}
+
+void qwDbgDumpMgmtInfo(SQWorker *mgmt) {
+ if (!gQWDebug.dumpEnable) {
+ return;
+ }
+
+ QW_LOCK(QW_READ, &mgmt->schLock);
+
+ /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/
+
+ void *key = NULL;
+ size_t keyLen = 0;
+ int32_t i = 0;
+ SQWSchStatus *sch = NULL;
+
+ void *pIter = taosHashIterate(mgmt->schHash, NULL);
+ while (pIter) {
+ sch = (SQWSchStatus *)pIter;
+ qwDbgDumpSchInfo(sch, i);
+ ++i;
+ pIter = taosHashIterate(mgmt->schHash, pIter);
+ }
+
+ QW_UNLOCK(QW_READ, &mgmt->schLock);
+
+ /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/
+}
+
+
diff --git a/source/libs/qworker/src/qworkerMsg.c b/source/libs/qworker/src/qwMsg.c
similarity index 73%
rename from source/libs/qworker/src/qworkerMsg.c
rename to source/libs/qworker/src/qwMsg.c
index 0a192eb795b689285831f366aff30af4a3743b27..f8205a6bb4b2d004bc1c4f35b67eabc5635c5ca7 100644
--- a/source/libs/qworker/src/qworkerMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -1,10 +1,10 @@
-#include "qworkerMsg.h"
+#include "qwMsg.h"
#include "dataSinkMgt.h"
#include "executor.h"
#include "planner.h"
#include "query.h"
#include "qworker.h"
-#include "qworkerInt.h"
+#include "qwInt.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
@@ -43,28 +43,8 @@ void qwFreeFetchRsp(void *msg) {
}
}
-int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) {
- SQueryTableRsp rsp = {.code = code};
-
- int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp);
- void * msg = rpcMallocCont(contLen);
- tSerializeSQueryTableRsp(msg, contLen, &rsp);
-
- SRpcMsg rpcRsp = {
- .msgType = TDMT_VND_QUERY_RSP,
- .pCont = msg,
- .contLen = contLen,
- .code = code,
- .info = *pConn,
- };
-
- tmsgSendRsp(&rpcRsp);
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) {
- SResReadyRsp *pRsp = (SResReadyRsp *)rpcMallocCont(sizeof(SResReadyRsp));
+int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) {
+ SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code;
if (tbInfo) {
strcpy(pRsp->tbFName, tbInfo->tbFName);
@@ -73,13 +53,12 @@ int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo*
}
SRpcMsg rpcRsp = {
- .msgType = TDMT_VND_RES_READY_RSP,
+ .msgType = TDMT_VND_QUERY_RSP,
.pCont = pRsp,
.contLen = sizeof(*pRsp),
.code = code,
.info = *pConn,
};
- rpcRsp.info.ahandle = NULL;
tmsgSendRsp(&rpcRsp);
@@ -177,76 +156,6 @@ int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code) {
return TSDB_CODE_SUCCESS;
}
-int32_t qwBuildAndSendShowRsp(SRpcMsg *pMsg, int32_t code) {
- int32_t numOfCols = 6;
- SVShowTablesRsp showRsp = {0};
-
- // showRsp.showId = 1;
- showRsp.tableMeta.pSchemas = taosMemoryCalloc(numOfCols, sizeof(SSchema));
- if (showRsp.tableMeta.pSchemas == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- col_id_t cols = 0;
- SSchema *pSchema = showRsp.tableMeta.pSchemas;
-
- const SSchema *s = tGetTbnameColumnSchema();
- *pSchema = createSchema(s->type, s->bytes, ++cols, "name");
- pSchema++;
-
- int32_t type = TSDB_DATA_TYPE_TIMESTAMP;
- *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "created");
- pSchema++;
-
- type = TSDB_DATA_TYPE_SMALLINT;
- *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "columns");
- pSchema++;
-
- *pSchema = createSchema(s->type, s->bytes, ++cols, "stable");
- pSchema++;
-
- type = TSDB_DATA_TYPE_BIGINT;
- *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "uid");
- pSchema++;
-
- type = TSDB_DATA_TYPE_INT;
- *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "vgId");
-
- assert(cols == numOfCols);
- showRsp.tableMeta.numOfColumns = cols;
-
- int32_t bufLen = tSerializeSShowRsp(NULL, 0, &showRsp);
- void * pBuf = rpcMallocCont(bufLen);
- tSerializeSShowRsp(pBuf, bufLen, &showRsp);
-
- SRpcMsg rpcMsg = {
- .info = pMsg->info,
- .pCont = pBuf,
- .contLen = bufLen,
- .code = code,
- };
-
- tmsgSendRsp(&rpcMsg);
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwBuildAndSendShowFetchRsp(SRpcMsg *pMsg, SVShowTablesFetchReq *pFetchReq) {
- SVShowTablesFetchRsp *pRsp = (SVShowTablesFetchRsp *)rpcMallocCont(sizeof(SVShowTablesFetchRsp));
- int32_t handle = htonl(pFetchReq->id);
-
- pRsp->numOfRows = 0;
- SRpcMsg rpcMsg = {
- .info = pMsg->info,
- .pCont = pRsp,
- .contLen = sizeof(*pRsp),
- .code = 0,
- };
-
- tmsgSendRsp(&rpcMsg);
- return TSDB_CODE_SUCCESS;
-}
-
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
SQueryContinueReq *req = (SQueryContinueReq *)rpcMallocCont(sizeof(SQueryContinueReq));
if (NULL == req) {
@@ -339,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
@@ -348,6 +257,9 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSubQueryMsg *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateTimeInQueue(mgmt, ts, QUERY_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.queryProcessed, 1);
+
if (NULL == msg || pMsg->contLen <= sizeof(*msg)) {
QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -377,7 +289,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
int32_t code = 0;
int8_t status = 0;
bool queryDone = false;
@@ -386,6 +298,9 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWTaskCtx * handles = NULL;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateTimeInQueue(mgmt, ts, QUERY_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.cqueryProcessed, 1);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -407,66 +322,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
- if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
- return TSDB_CODE_QRY_INVALID_INPUT;
- }
-
- SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
- SResReadyReq *msg = pMsg->pCont;
- if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
- QW_ELOG("invalid task ready msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
- QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
- }
-
- msg->sId = be64toh(msg->sId);
- msg->queryId = be64toh(msg->queryId);
- msg->taskId = be64toh(msg->taskId);
-
- uint64_t sId = msg->sId;
- uint64_t qId = msg->queryId;
- uint64_t tId = msg->taskId;
- int64_t rId = 0;
-
- SQWMsg qwMsg = {.node = node, .msg = NULL, .msgLen = 0, .connInfo = pMsg->info};
-
- QW_SCH_TASK_DLOG("processReady start, node:%p, handle:%p", node, pMsg->info.handle);
-
- QW_ERR_RET(qwProcessReady(QW_FPARAMS(), &qwMsg));
-
- QW_SCH_TASK_DLOG("processReady end, node:%p", node);
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
- if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
- return TSDB_CODE_QRY_INVALID_INPUT;
- }
-
- int32_t code = 0;
- SSchTasksStatusReq *msg = pMsg->pCont;
- if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
- qError("invalid task status msg");
- QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
- }
-
- SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
- msg->sId = htobe64(msg->sId);
- uint64_t sId = msg->sId;
-
- SSchedulerStatusRsp *sStatus = NULL;
-
- // QW_ERR_JRET(qwGetSchTasksStatus(qWorkerMgmt, msg->sId, &sStatus));
-
-_return:
-
- // QW_ERR_RET(qwBuildAndSendStatusRsp(pMsg, sStatus));
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -474,6 +330,9 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SResFetchReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.fetchProcessed, 1);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -499,13 +358,19 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
+ SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ if (mgmt) {
+ qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.fetchRspProcessed, 1);
+ }
+
qProcessFetchRsp(NULL, pMsg, NULL);
pMsg->pCont = NULL;
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -513,6 +378,10 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
int32_t code = 0;
STaskCancelReq *msg = pMsg->pCont;
+
+ qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.cancelProcessed, 1);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
qError("invalid task cancel msg");
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -540,7 +409,7 @@ _return:
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -549,6 +418,9 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
STaskDropReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.dropProcessed, 1);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -579,7 +451,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -588,6 +460,9 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSchedulerHbReq req = {0};
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
+ QW_STAT_INC(mgmt->stat.msgStat.hbProcessed, 1);
+
if (NULL == pMsg->pCont) {
QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -613,22 +488,3 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-
-int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
- if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
- return TSDB_CODE_QRY_INVALID_INPUT;
- }
-
- int32_t code = 0;
- SVShowTablesReq *pReq = pMsg->pCont;
- QW_RET(qwBuildAndSendShowRsp(pMsg, code));
-}
-
-int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
- if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
- return TSDB_CODE_QRY_INVALID_INPUT;
- }
-
- SVShowTablesFetchReq *pFetchReq = pMsg->pCont;
- QW_RET(qwBuildAndSendShowFetchRsp(pMsg, pFetchReq));
-}
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
new file mode 100644
index 0000000000000000000000000000000000000000..3d0204e355bd228836a2729cc9e52e74981c4e0f
--- /dev/null
+++ b/source/libs/qworker/src/qwUtil.c
@@ -0,0 +1,537 @@
+#include "dataSinkMgt.h"
+#include "executor.h"
+#include "planner.h"
+#include "query.h"
+#include "qwInt.h"
+#include "qwMsg.h"
+#include "qworker.h"
+#include "tcommon.h"
+#include "tmsg.h"
+#include "tname.h"
+
+char *qwPhaseStr(int32_t phase) {
+ switch (phase) {
+ case QW_PHASE_PRE_QUERY:
+ return "PRE_QUERY";
+ case QW_PHASE_POST_QUERY:
+ return "POST_QUERY";
+ case QW_PHASE_PRE_FETCH:
+ return "PRE_FETCH";
+ case QW_PHASE_POST_FETCH:
+ return "POST_FETCH";
+ case QW_PHASE_PRE_CQUERY:
+ return "PRE_CQUERY";
+ case QW_PHASE_POST_CQUERY:
+ return "POST_CQUERY";
+ default:
+ break;
+ }
+
+ return "UNKNOWN";
+}
+
+char *qwBufStatusStr(int32_t bufStatus) {
+ switch (bufStatus) {
+ case DS_BUF_LOW:
+ return "LOW";
+ case DS_BUF_FULL:
+ return "FULL";
+ case DS_BUF_EMPTY:
+ return "EMPTY";
+ default:
+ break;
+ }
+
+ return "UNKNOWN";
+}
+
+int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) {
+ int32_t code = 0;
+ int8_t origStatus = 0;
+ bool ignore = false;
+
+ while (true) {
+ origStatus = atomic_load_8(&task->status);
+
+ QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore));
+ if (ignore) {
+ break;
+ }
+
+ if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) {
+ continue;
+ }
+
+ QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status));
+
+ break;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) {
+ SQWSchStatus newSch = {0};
+ newSch.tasksHash =
+ taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ if (NULL == newSch.tasksHash) {
+ QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum);
+ QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ QW_LOCK(QW_WRITE, &mgmt->schLock);
+ int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch));
+ if (0 != code) {
+ if (!HASH_NODE_EXIST(code)) {
+ QW_UNLOCK(QW_WRITE, &mgmt->schLock);
+
+ QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno);
+ taosHashCleanup(newSch.tasksHash);
+ QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ taosHashCleanup(newSch.tasksHash);
+ }
+ QW_UNLOCK(QW_WRITE, &mgmt->schLock);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) {
+ while (true) {
+ QW_LOCK(rwType, &mgmt->schLock);
+ *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId));
+ if (NULL == (*sch)) {
+ QW_UNLOCK(rwType, &mgmt->schLock);
+
+ if (QW_NOT_EXIST_ADD == nOpt) {
+ QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType));
+
+ nOpt = QW_NOT_EXIST_RET_ERR;
+
+ continue;
+ } else if (QW_NOT_EXIST_RET_ERR == nOpt) {
+ QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST);
+ } else {
+ QW_SCH_ELOG("unknown notExistOpt:%d", nOpt);
+ QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
+ }
+ }
+
+ break;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
+ return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD);
+}
+
+int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
+ return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR);
+}
+
+void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
+
+int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ QW_LOCK(rwType, &sch->tasksLock);
+ *task = taosHashGet(sch->tasksHash, id, sizeof(id));
+ if (NULL == (*task)) {
+ QW_UNLOCK(rwType, &sch->tasksLock);
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
+ int32_t code = 0;
+
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ SQWTaskStatus ntask = {0};
+ ntask.status = status;
+ ntask.refId = rId;
+
+ QW_LOCK(QW_WRITE, &sch->tasksLock);
+ code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask));
+ if (0 != code) {
+ QW_UNLOCK(QW_WRITE, &sch->tasksLock);
+ if (HASH_NODE_EXIST(code)) {
+ if (rwType && task) {
+ QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
+ } else {
+ QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status));
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
+ }
+ } else {
+ QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code));
+ QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ }
+ QW_UNLOCK(QW_WRITE, &sch->tasksLock);
+
+ QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status));
+
+ if (rwType && task) {
+ QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) {
+ SQWSchStatus *tsch = NULL;
+ int32_t code = 0;
+ QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch));
+
+ QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL));
+
+_return:
+
+ qwReleaseScheduler(QW_READ, mgmt);
+
+ QW_RET(code);
+}
+
+int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status,
+ SQWTaskStatus **task) {
+ return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task);
+}
+
+void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
+
+int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
+ if (NULL == (*ctx)) {
+ QW_TASK_DLOG_E("task ctx not exist, may be dropped");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
+ if (NULL == (*ctx)) {
+ QW_TASK_DLOG_E("task ctx not exist, may be dropped");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ SQWTaskCtx nctx = {0};
+
+ int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx));
+ if (0 != code) {
+ if (HASH_NODE_EXIST(code)) {
+ if (acquire && ctx) {
+ QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
+ } else if (ctx) {
+ QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
+ } else {
+ QW_TASK_ELOG_E("task ctx already exist");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
+ }
+ } else {
+ QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code);
+ QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ }
+
+ if (acquire && ctx) {
+ QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
+ } else if (ctx) {
+ QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); }
+
+int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); }
+
+void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
+
+void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
+ // Note: free/kill may in RC
+ qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
+ if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
+ qDestroyTask(otaskHandle);
+ }
+}
+
+int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
+ int32_t code = 0;
+ // Note: free/kill may in RC
+ qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
+ if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) {
+ code = qAsyncKillTask(taskHandle);
+ atomic_store_ptr(&ctx->taskHandle, taskHandle);
+ }
+
+ QW_RET(code);
+}
+
+void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
+ tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
+ ctx->ctrlConnInfo.handle = NULL;
+ ctx->ctrlConnInfo.refId = -1;
+
+ // NO need to release dataConnInfo
+
+ qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
+
+ if (ctx->sinkHandle) {
+ dsDestroyDataSinker(ctx->sinkHandle);
+ ctx->sinkHandle = NULL;
+ }
+
+ if (ctx->plan) {
+ nodesDestroyNode(ctx->plan);
+ ctx->plan = NULL;
+ }
+}
+
+int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+ SQWTaskCtx octx;
+
+ SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
+ if (NULL == ctx) {
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ }
+
+ octx = *ctx;
+
+ atomic_store_ptr(&ctx->taskHandle, NULL);
+ atomic_store_ptr(&ctx->sinkHandle, NULL);
+ atomic_store_ptr(&ctx->plan, NULL);
+
+ QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP);
+
+ if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) {
+ QW_TASK_ELOG_E("taosHashRemove from ctx hash failed");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ }
+
+ qwFreeTask(QW_FPARAMS(), &octx);
+
+ QW_TASK_DLOG_E("task ctx dropped");
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
+ SQWSchStatus *sch = NULL;
+ SQWTaskStatus *task = NULL;
+ int32_t code = 0;
+
+ char id[sizeof(qId) + sizeof(tId)] = {0};
+ QW_SET_QTID(id, qId, tId);
+
+ if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
+ QW_TASK_WLOG_E("scheduler does not exist");
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) {
+ qwReleaseScheduler(QW_WRITE, mgmt);
+
+ QW_TASK_WLOG_E("task does not exist");
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (taosHashRemove(sch->tasksHash, id, sizeof(id))) {
+ QW_TASK_ELOG_E("taosHashRemove task from hash failed");
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ QW_TASK_DLOG_E("task status dropped");
+
+_return:
+
+ if (task) {
+ qwReleaseTaskStatus(QW_WRITE, sch);
+ }
+ qwReleaseScheduler(QW_WRITE, mgmt);
+
+ QW_RET(code);
+}
+
+int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) {
+ SQWSchStatus *sch = NULL;
+ SQWTaskStatus *task = NULL;
+ int32_t code = 0;
+
+ QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
+ QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task));
+
+ QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status));
+
+_return:
+
+ if (task) {
+ qwReleaseTaskStatus(QW_READ, sch);
+ }
+ qwReleaseScheduler(QW_READ, mgmt);
+
+ QW_RET(code);
+}
+
+int32_t qwDropTask(QW_FPARAMS_DEF) {
+ QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS()));
+ QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS()));
+
+ QW_TASK_DLOG_E("task is dropped");
+
+ return TSDB_CODE_SUCCESS;
+}
+
+void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
+ int32_t paramIdx = 0;
+ int32_t newParamIdx = 0;
+
+ while (true) {
+ paramIdx = atomic_load_32(&gQwMgmt.paramIdx);
+ if (paramIdx == tListLen(gQwMgmt.param)) {
+ newParamIdx = 0;
+ } else {
+ newParamIdx = paramIdx + 1;
+ }
+
+ if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) {
+ break;
+ }
+ }
+
+ gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef;
+ gQwMgmt.param[paramIdx].refId = refId;
+
+ *pParam = &gQwMgmt.param[paramIdx];
+}
+
+void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
+ char dbFName[TSDB_DB_FNAME_LEN];
+ char tbName[TSDB_TABLE_NAME_LEN];
+
+ qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
+
+ if (dbFName[0] && tbName[0]) {
+ sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName);
+ } else {
+ ctx->tbInfo.tbFName[0] = 0;
+ }
+}
+
+void qwCloseRef(void) {
+ taosWLockLatch(&gQwMgmt.lock);
+ if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) {
+ taosCloseRef(gQwMgmt.qwRef);
+ gQwMgmt.qwRef = -1;
+ }
+ taosWUnLockLatch(&gQwMgmt.lock);
+}
+
+void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); }
+
+void qwDestroyImpl(void *pMgmt) {
+ SQWorker *mgmt = (SQWorker *)pMgmt;
+
+ taosTmrStop(mgmt->hbTimer);
+ mgmt->hbTimer = NULL;
+ taosTmrCleanUp(mgmt->timer);
+
+ // TODO STOP ALL QUERY
+
+ // TODO FREE ALL
+
+ taosHashCleanup(mgmt->ctxHash);
+
+ void *pIter = taosHashIterate(mgmt->schHash, NULL);
+ while (pIter) {
+ SQWSchStatus *sch = (SQWSchStatus *)pIter;
+ qwDestroySchStatus(sch);
+ pIter = taosHashIterate(mgmt->schHash, pIter);
+ }
+ taosHashCleanup(mgmt->schHash);
+
+ taosMemoryFree(mgmt);
+
+ atomic_sub_fetch_32(&gQwMgmt.qwNum, 1);
+
+ qwCloseRef();
+}
+
+int32_t qwOpenRef(void) {
+ taosWLockLatch(&gQwMgmt.lock);
+ if (gQwMgmt.qwRef < 0) {
+ gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl);
+ if (gQwMgmt.qwRef < 0) {
+ taosWUnLockLatch(&gQwMgmt.lock);
+ qError("init qworker ref failed");
+ QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ }
+ taosWUnLockLatch(&gQwMgmt.lock);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) {
+ if (ts <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int64_t duration = taosGetTimestampUs() - ts;
+ switch (type) {
+ case QUERY_QUEUE:
+ ++mgmt->stat.msgStat.waitTime[0].num;
+ mgmt->stat.msgStat.waitTime[0].total += duration;
+ break;
+ case FETCH_QUEUE:
+ ++mgmt->stat.msgStat.waitTime[1].num;
+ mgmt->stat.msgStat.waitTime[1].total += duration;
+ break;
+ default:
+ qError("unsupported queue type %d", type);
+ return TSDB_CODE_APP_ERROR;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type) {
+ SQWTimeInQ *pStat = NULL;
+ switch (type) {
+ case QUERY_QUEUE:
+ pStat = &mgmt->stat.msgStat.waitTime[0];
+ return pStat->num ? (pStat->total / pStat->num) : 0;
+ case FETCH_QUEUE:
+ pStat = &mgmt->stat.msgStat.waitTime[1];
+ return pStat->num ? (pStat->total / pStat->num) : 0;
+ default:
+ qError("unsupported queue type %d", type);
+ }
+
+ return -1;
+}
+
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index e7a680de3c0287be37dc5084715c05e4d1283088..fd16fa53b7a6d9b4c5e460719c3fc09134ef034b 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -1,530 +1,45 @@
-#include "qworker.h"
#include "dataSinkMgt.h"
#include "executor.h"
#include "planner.h"
#include "query.h"
-#include "qworkerInt.h"
-#include "qworkerMsg.h"
+#include "qwInt.h"
+#include "qwMsg.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
+#include "qworker.h"
-SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true};
SQWorkerMgmt gQwMgmt = {
.lock = 0,
.qwRef = -1,
.qwNum = 0,
};
-int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
- if (!gQWDebug.statusEnable) {
- return TSDB_CODE_SUCCESS;
- }
-
- int32_t code = 0;
-
- if (oriStatus == newStatus) {
- if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) {
- *ignore = true;
- return TSDB_CODE_SUCCESS;
- }
-
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- switch (oriStatus) {
- case JOB_TASK_STATUS_NULL:
- if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED &&
- newStatus != JOB_TASK_STATUS_NOT_START) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_NOT_START:
- if (newStatus != JOB_TASK_STATUS_CANCELLED) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_EXECUTING:
- if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED &&
- newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING &&
- newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_PARTIAL_SUCCEED:
- if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED &&
- newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED &&
- newStatus != JOB_TASK_STATUS_DROPPING) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_SUCCEED:
- if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING &&
- newStatus != JOB_TASK_STATUS_FAILED) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_FAILED:
- if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
- break;
-
- case JOB_TASK_STATUS_CANCELLING:
- if (newStatus != JOB_TASK_STATUS_CANCELLED) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- break;
- case JOB_TASK_STATUS_CANCELLED:
- case JOB_TASK_STATUS_DROPPING:
- if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
- break;
-
- default:
- QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus));
- return TSDB_CODE_QRY_APP_ERROR;
- }
-
- return TSDB_CODE_SUCCESS;
-
-_return:
-
- QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
- QW_RET(code);
-}
-
-void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {}
-
-void qwDbgDumpMgmtInfo(SQWorker *mgmt) {
- if (!gQWDebug.dumpEnable) {
- return;
- }
-
- QW_LOCK(QW_READ, &mgmt->schLock);
-
- /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/
-
- void *key = NULL;
- size_t keyLen = 0;
- int32_t i = 0;
- SQWSchStatus *sch = NULL;
-
- void *pIter = taosHashIterate(mgmt->schHash, NULL);
- while (pIter) {
- sch = (SQWSchStatus *)pIter;
- qwDbgDumpSchInfo(sch, i);
- ++i;
- pIter = taosHashIterate(mgmt->schHash, pIter);
- }
-
- QW_UNLOCK(QW_READ, &mgmt->schLock);
-
- /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/
-}
-
-char *qwPhaseStr(int32_t phase) {
- switch (phase) {
- case QW_PHASE_PRE_QUERY:
- return "PRE_QUERY";
- case QW_PHASE_POST_QUERY:
- return "POST_QUERY";
- case QW_PHASE_PRE_FETCH:
- return "PRE_FETCH";
- case QW_PHASE_POST_FETCH:
- return "POST_FETCH";
- case QW_PHASE_PRE_CQUERY:
- return "PRE_CQUERY";
- case QW_PHASE_POST_CQUERY:
- return "POST_CQUERY";
- default:
- break;
- }
-
- return "UNKNOWN";
-}
-
-char *qwBufStatusStr(int32_t bufStatus) {
- switch (bufStatus) {
- case DS_BUF_LOW:
- return "LOW";
- case DS_BUF_FULL:
- return "FULL";
- case DS_BUF_EMPTY:
- return "EMPTY";
- default:
- break;
- }
-
- return "UNKNOWN";
-}
-
-int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) {
- int32_t code = 0;
- int8_t origStatus = 0;
- bool ignore = false;
-
- while (true) {
- origStatus = atomic_load_8(&task->status);
-
- QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore));
- if (ignore) {
- break;
- }
-
- if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) {
- continue;
- }
-
- QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status));
-
- break;
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) {
- SQWSchStatus newSch = {0};
- newSch.tasksHash =
- taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
- if (NULL == newSch.tasksHash) {
- QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum);
- QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- QW_LOCK(QW_WRITE, &mgmt->schLock);
- int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch));
- if (0 != code) {
- if (!HASH_NODE_EXIST(code)) {
- QW_UNLOCK(QW_WRITE, &mgmt->schLock);
-
- QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno);
- taosHashCleanup(newSch.tasksHash);
- QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- taosHashCleanup(newSch.tasksHash);
- }
- QW_UNLOCK(QW_WRITE, &mgmt->schLock);
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) {
- while (true) {
- QW_LOCK(rwType, &mgmt->schLock);
- *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId));
- if (NULL == (*sch)) {
- QW_UNLOCK(rwType, &mgmt->schLock);
-
- if (QW_NOT_EXIST_ADD == nOpt) {
- QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType));
-
- nOpt = QW_NOT_EXIST_RET_ERR;
-
- continue;
- } else if (QW_NOT_EXIST_RET_ERR == nOpt) {
- QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST);
- } else {
- QW_SCH_ELOG("unknown notExistOpt:%d", nOpt);
- QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
- }
- }
-
- break;
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
- return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD);
-}
-
-int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
- return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR);
-}
-
-void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
-
-int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- QW_LOCK(rwType, &sch->tasksLock);
- *task = taosHashGet(sch->tasksHash, id, sizeof(id));
- if (NULL == (*task)) {
- QW_UNLOCK(rwType, &sch->tasksLock);
- QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
- int32_t code = 0;
-
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- SQWTaskStatus ntask = {0};
- ntask.status = status;
- ntask.refId = rId;
-
- QW_LOCK(QW_WRITE, &sch->tasksLock);
- code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask));
- if (0 != code) {
- QW_UNLOCK(QW_WRITE, &sch->tasksLock);
- if (HASH_NODE_EXIST(code)) {
- if (rwType && task) {
- QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
- } else {
- QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status));
- QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
- }
- } else {
- QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code));
- QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- }
- QW_UNLOCK(QW_WRITE, &sch->tasksLock);
-
- QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status));
-
- if (rwType && task) {
- QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) {
- SQWSchStatus *tsch = NULL;
- int32_t code = 0;
- QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch));
-
- QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL));
-
-_return:
-
- qwReleaseScheduler(QW_READ, mgmt);
-
- QW_RET(code);
-}
-
-int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status,
- SQWTaskStatus **task) {
- return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task);
-}
-
-void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
-
-int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
- if (NULL == (*ctx)) {
- QW_TASK_DLOG_E("task ctx not exist, may be dropped");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
- if (NULL == (*ctx)) {
- QW_TASK_DLOG_E("task ctx not exist, may be dropped");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- SQWTaskCtx nctx = {0};
-
- int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx));
- if (0 != code) {
- if (HASH_NODE_EXIST(code)) {
- if (acquire && ctx) {
- QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
- } else if (ctx) {
- QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
- } else {
- QW_TASK_ELOG_E("task ctx already exist");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
- }
- } else {
- QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code);
- QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- }
-
- if (acquire && ctx) {
- QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
- } else if (ctx) {
- QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); }
-
-int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); }
-
-void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
-
-void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
- // Note: free/kill may in RC
- qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
- if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
- qDestroyTask(otaskHandle);
- }
-}
-
-int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
- int32_t code = 0;
- // Note: free/kill may in RC
- qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
- if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) {
- code = qAsyncKillTask(taskHandle);
- atomic_store_ptr(&ctx->taskHandle, taskHandle);
- }
-
- QW_RET(code);
-}
-
-void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
- tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
- ctx->ctrlConnInfo.handle = NULL;
- ctx->ctrlConnInfo.refId = -1;
-
- // NO need to release dataConnInfo
-
- qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
-
- if (ctx->sinkHandle) {
- dsDestroyDataSinker(ctx->sinkHandle);
- ctx->sinkHandle = NULL;
- }
-
- if (ctx->plan) {
- nodesDestroyNode(ctx->plan);
- ctx->plan = NULL;
- }
-}
-
-int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
- SQWTaskCtx octx;
-
- SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
- if (NULL == ctx) {
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
- }
-
- octx = *ctx;
-
- atomic_store_ptr(&ctx->taskHandle, NULL);
- atomic_store_ptr(&ctx->sinkHandle, NULL);
- atomic_store_ptr(&ctx->plan, NULL);
-
- QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP);
-
- if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) {
- QW_TASK_ELOG_E("taosHashRemove from ctx hash failed");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
- }
-
- qwFreeTask(QW_FPARAMS(), &octx);
-
- QW_TASK_DLOG_E("task ctx dropped");
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
- SQWSchStatus *sch = NULL;
- SQWTaskStatus *task = NULL;
- int32_t code = 0;
-
- char id[sizeof(qId) + sizeof(tId)] = {0};
- QW_SET_QTID(id, qId, tId);
-
- if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
- QW_TASK_WLOG_E("scheduler does not exist");
- return TSDB_CODE_SUCCESS;
- }
-
- if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) {
- qwReleaseScheduler(QW_WRITE, mgmt);
- QW_TASK_WLOG_E("task does not exist");
- return TSDB_CODE_SUCCESS;
- }
+int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
+ int32_t code = 0;
+ SSchedulerHbRsp rsp = {0};
+ SQWSchStatus *sch = NULL;
- if (taosHashRemove(sch->tasksHash, id, sizeof(id))) {
- QW_TASK_ELOG_E("taosHashRemove task from hash failed");
- QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
- }
+ QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch));
- QW_TASK_DLOG_E("task status dropped");
+ QW_LOCK(QW_WRITE, &sch->hbConnLock);
-_return:
+ if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
+ tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
+ sch->hbConnInfo.handle = NULL;
+ sch->hbConnInfo.ahandle = NULL;
- if (task) {
- qwReleaseTaskStatus(QW_WRITE, sch);
+ QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle);
+ } else {
+ QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle);
}
- qwReleaseScheduler(QW_WRITE, mgmt);
-
- QW_RET(code);
-}
-
-int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) {
- SQWSchStatus *sch = NULL;
- SQWTaskStatus *task = NULL;
- int32_t code = 0;
-
- QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
- QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task));
-
- QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status));
-_return:
+ QW_UNLOCK(QW_WRITE, &sch->hbConnLock);
- if (task) {
- qwReleaseTaskStatus(QW_READ, sch);
- }
qwReleaseScheduler(QW_READ, mgmt);
- QW_RET(code);
-}
-
-int32_t qwDropTask(QW_FPARAMS_DEF) {
- QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS()));
- QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS()));
-
- QW_TASK_DLOG_E("task is dropped");
-
- return TSDB_CODE_SUCCESS;
+ QW_RET(TSDB_CODE_SUCCESS);
}
int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
@@ -564,7 +79,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
if (taskHandle) {
code = qExecTask(taskHandle, &pRes, &useconds);
if (code) {
- QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
+ if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
+ QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
+ } else {
+ QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
+ }
QW_ERR_RET(code);
}
}
@@ -722,23 +241,9 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void
}
-void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
- char dbFName[TSDB_DB_FNAME_LEN];
- char tbName[TSDB_TABLE_NAME_LEN];
-
- qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
-
- if (dbFName[0] && tbName[0]) {
- sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName);
- } else {
- ctx->tbInfo.tbFName[0] = 0;
- }
-}
-
int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
int32_t code = 0;
SQWTaskCtx *ctx = NULL;
- SRpcHandleInfo *dropConnection = NULL;
SRpcHandleInfo *cancelConnection = NULL;
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -771,12 +276,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
}
if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
- dropConnection = &ctx->ctrlConnInfo;
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- dropConnection = NULL;
- qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
break;
@@ -809,12 +312,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
}
if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
- dropConnection = &ctx->ctrlConnInfo;
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- dropConnection = NULL;
- qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
}
@@ -839,11 +340,6 @@ _return:
qwReleaseTaskCtx(mgmt, ctx);
}
- if (dropConnection) {
- qwBuildAndSendDropRsp(dropConnection, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", dropConnection->handle, code, tstrerror(code));
- }
-
if (cancelConnection) {
qwBuildAndSendCancelRsp(cancelConnection, code);
QW_TASK_DLOG("cancel rsp send, handle:%p, code:%x - %s", cancelConnection->handle, code, tstrerror(code));
@@ -862,7 +358,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
int32_t code = 0;
SQWTaskCtx *ctx = NULL;
SRpcHandleInfo connInfo = {0};
- SRpcHandleInfo *readyConnection = NULL;
+ SRpcHandleInfo *rspConnection = NULL;
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -883,7 +379,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
}
#else
connInfo = ctx->ctrlConnInfo;
- readyConnection = &connInfo;
+ rspConnection = &connInfo;
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY);
#endif
@@ -895,8 +391,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
- qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
@@ -916,9 +412,9 @@ _return:
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PARTIAL_SUCCEED);
}
- if (readyConnection) {
- qwBuildAndSendReadyRsp(readyConnection, code, ctx ? &ctx->tbInfo : NULL);
- QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code));
+ if (rspConnection) {
+ qwBuildAndSendQueryRsp(rspConnection, code, ctx ? &ctx->tbInfo : NULL);
+ QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code));
}
if (ctx) {
@@ -1009,69 +505,6 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-int32_t qwProcessReady(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
- int32_t code = 0;
- SQWTaskCtx *ctx = NULL;
- int8_t phase = 0;
- bool needRsp = true;
-
- QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx));
-
- QW_LOCK(QW_WRITE, &ctx->lock);
-
- if (QW_IS_EVENT_PROCESSED(ctx, QW_EVENT_DROP) || QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
- QW_TASK_WLOG_E("task is dropping or already dropped");
- QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
- }
-
- if (ctx->phase == QW_PHASE_PRE_QUERY) {
- ctx->ctrlConnInfo = qwMsg->connInfo;
- QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_READY);
- needRsp = false;
- QW_TASK_DLOG_E("ready msg will not rsp now");
- goto _return;
- }
-
- QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY);
-
- if (atomic_load_8((int8_t *)&ctx->queryEnd) || atomic_load_8((int8_t *)&ctx->queryFetched)) {
- QW_TASK_ELOG("got ready msg at wrong status, queryEnd:%d, queryFetched:%d", atomic_load_8((int8_t *)&ctx->queryEnd),
- atomic_load_8((int8_t *)&ctx->queryFetched));
- QW_ERR_JRET(TSDB_CODE_QW_MSG_ERROR);
- }
-
- if (ctx->phase == QW_PHASE_POST_QUERY) {
- code = ctx->rspCode;
- goto _return;
- }
-
- QW_TASK_ELOG("invalid phase when got ready msg, phase:%s", qwPhaseStr(ctx->phase));
-
- QW_ERR_JRET(TSDB_CODE_QRY_TASK_STATUS_ERROR);
-
-_return:
-
- if (code && ctx) {
- QW_UPDATE_RSP_CODE(ctx, code);
- }
-
- if (code) {
- qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_FAILED);
- }
-
- if (ctx) {
- QW_UNLOCK(QW_WRITE, &ctx->lock);
- qwReleaseTaskCtx(mgmt, ctx);
- }
-
- if (needRsp) {
- qwBuildAndSendReadyRsp(&qwMsg->connInfo, code, NULL);
- QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
- }
-
- QW_RET(TSDB_CODE_SUCCESS);
-}
-
int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
SQWTaskCtx *ctx = NULL;
int32_t code = 0;
@@ -1245,11 +678,6 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx));
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROPPING);
} else if (ctx->phase > 0) {
- if (0 == qwMsg->code) {
- qwBuildAndSendDropRsp(&qwMsg->connInfo, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
- }
-
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
rsped = true;
} else {
@@ -1280,37 +708,6 @@ _return:
qwReleaseTaskCtx(mgmt, ctx);
}
- if ((TSDB_CODE_SUCCESS != code) && (0 == qwMsg->code)) {
- qwBuildAndSendDropRsp(&qwMsg->connInfo, code);
- QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
- }
-
- QW_RET(TSDB_CODE_SUCCESS);
-}
-
-int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
- int32_t code = 0;
- SSchedulerHbRsp rsp = {0};
- SQWSchStatus *sch = NULL;
-
- QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch));
-
- QW_LOCK(QW_WRITE, &sch->hbConnLock);
-
- if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
- tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
- sch->hbConnInfo.handle = NULL;
- sch->hbConnInfo.ahandle = NULL;
-
- QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle);
- } else {
- QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle);
- }
-
- QW_UNLOCK(QW_WRITE, &sch->hbConnLock);
-
- qwReleaseScheduler(QW_READ, mgmt);
-
QW_RET(TSDB_CODE_SUCCESS);
}
@@ -1409,7 +806,7 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) {
SQWSchStatus *sch = (SQWSchStatus *)pIter;
if (NULL == sch->hbConnInfo.handle) {
uint64_t *sId = taosHashGetKey(pIter, NULL);
- QW_DLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId);
+ QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId);
pIter = taosHashIterate(mgmt->schHash, pIter);
continue;
}
@@ -1441,81 +838,6 @@ _return:
qwRelease(refId);
}
-void qwCloseRef(void) {
- taosWLockLatch(&gQwMgmt.lock);
- if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) {
- taosCloseRef(gQwMgmt.qwRef);
- gQwMgmt.qwRef = -1;
- }
- taosWUnLockLatch(&gQwMgmt.lock);
-}
-
-void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); }
-
-void qwDestroyImpl(void *pMgmt) {
- SQWorker *mgmt = (SQWorker *)pMgmt;
-
- taosTmrStopA(&mgmt->hbTimer);
- taosTmrCleanUp(mgmt->timer);
-
- // TODO STOP ALL QUERY
-
- // TODO FREE ALL
-
- taosHashCleanup(mgmt->ctxHash);
-
- void *pIter = taosHashIterate(mgmt->schHash, NULL);
- while (pIter) {
- SQWSchStatus *sch = (SQWSchStatus *)pIter;
- qwDestroySchStatus(sch);
- pIter = taosHashIterate(mgmt->schHash, pIter);
- }
- taosHashCleanup(mgmt->schHash);
-
- taosMemoryFree(mgmt);
-
- atomic_sub_fetch_32(&gQwMgmt.qwNum, 1);
-
- qwCloseRef();
-}
-
-int32_t qwOpenRef(void) {
- taosWLockLatch(&gQwMgmt.lock);
- if (gQwMgmt.qwRef < 0) {
- gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl);
- if (gQwMgmt.qwRef < 0) {
- taosWUnLockLatch(&gQwMgmt.lock);
- qError("init qworker ref failed");
- QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- }
- taosWUnLockLatch(&gQwMgmt.lock);
-
- return TSDB_CODE_SUCCESS;
-}
-
-void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
- int32_t paramIdx = 0;
- int32_t newParamIdx = 0;
-
- while (true) {
- paramIdx = atomic_load_32(&gQwMgmt.paramIdx);
- if (paramIdx == tListLen(gQwMgmt.param)) {
- newParamIdx = 0;
- } else {
- newParamIdx = paramIdx + 1;
- }
-
- if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) {
- break;
- }
- }
-
- gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef;
- gQwMgmt.param[paramIdx].refId = refId;
-
- *pParam = &gQwMgmt.param[paramIdx];
-}
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) {
@@ -1632,146 +954,30 @@ void qWorkerDestroy(void **qWorkerMgmt) {
}
}
-int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp) {
- /*
- SQWSchStatus *sch = NULL;
- int32_t taskNum = 0;
-
- QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
-
- sch->lastAccessTs = taosGetTimestampSec();
-
- QW_LOCK(QW_READ, &sch->tasksLock);
-
- taskNum = taosHashGetSize(sch->tasksHash);
-
- int32_t size = sizeof(SSchedulerStatusRsp) + sizeof((*rsp)->status[0]) * taskNum;
- *rsp = taosMemoryCalloc(1, size);
- if (NULL == *rsp) {
- QW_SCH_ELOG("calloc %d failed", size);
- QW_UNLOCK(QW_READ, &sch->tasksLock);
- qwReleaseScheduler(QW_READ, mgmt);
-
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
-
- void *key = NULL;
- size_t keyLen = 0;
- int32_t i = 0;
-
- void *pIter = taosHashIterate(sch->tasksHash, NULL);
- while (pIter) {
- SQWTaskStatus *taskStatus = (SQWTaskStatus *)pIter;
- taosHashGetKey(pIter, &key, &keyLen);
-
- QW_GET_QTID(key, (*rsp)->status[i].queryId, (*rsp)->status[i].taskId);
- (*rsp)->status[i].status = taskStatus->status;
-
- ++i;
- pIter = taosHashIterate(sch->tasksHash, pIter);
- }
-
- QW_UNLOCK(QW_READ, &sch->tasksLock);
- qwReleaseScheduler(QW_READ, mgmt);
-
- (*rsp)->num = taskNum;
- */
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t qwUpdateSchLastAccess(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) {
- SQWSchStatus *sch = NULL;
+int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat) {
+ if (NULL == handle || NULL == qWorkerMgmt || NULL == pStat) {
+ QW_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
- /*
- QW_ERR_RET(qwAcquireScheduler(QW_READ, mgmt, sId, &sch));
+ SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
+ SDataSinkStat sinkStat = {0};
+
+ dsDataSinkGetCacheSize(&sinkStat);
+ pStat->cacheDataSize = sinkStat.cachedSize;
+
+ pStat->queryProcessed = QW_STAT_GET(mgmt->stat.msgStat.queryProcessed);
+ pStat->cqueryProcessed = QW_STAT_GET(mgmt->stat.msgStat.cqueryProcessed);
+ pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed);
+ pStat->dropProcessed = QW_STAT_GET(mgmt->stat.msgStat.dropProcessed);
+ pStat->hbProcessed = QW_STAT_GET(mgmt->stat.msgStat.hbProcessed);
- sch->lastAccessTs = taosGetTimestampSec();
+ pStat->numOfQueryInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, QUERY_QUEUE);
+ pStat->numOfFetchInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, FETCH_QUEUE);
+ pStat->timeInQueryQueue = qwGetTimeInQueue((SQWorker *)qWorkerMgmt, QUERY_QUEUE);
+ pStat->timeInFetchQueue = qwGetTimeInQueue((SQWorker *)qWorkerMgmt, FETCH_QUEUE);
- qwReleaseScheduler(QW_READ, mgmt);
- */
return TSDB_CODE_SUCCESS;
}
-int32_t qwGetTaskStatus(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int8_t *taskStatus) {
- SQWSchStatus *sch = NULL;
- SQWTaskStatus *task = NULL;
- int32_t code = 0;
- /*
- if (qwAcquireScheduler(QW_READ, mgmt, sId, &sch)) {
- *taskStatus = JOB_TASK_STATUS_NULL;
- return TSDB_CODE_SUCCESS;
- }
-
- if (qwAcquireTask(mgmt, QW_READ, sch, queryId, taskId, &task)) {
- qwReleaseScheduler(QW_READ, mgmt);
-
- *taskStatus = JOB_TASK_STATUS_NULL;
- return TSDB_CODE_SUCCESS;
- }
-
- *taskStatus = task->status;
-
- qwReleaseTask(QW_READ, sch);
- qwReleaseScheduler(QW_READ, mgmt);
- */
-
- QW_RET(code);
-}
-
-int32_t qwCancelTask(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) {
- SQWSchStatus *sch = NULL;
- SQWTaskStatus *task = NULL;
- int32_t code = 0;
- /*
- QW_ERR_RET(qwAcquireAddScheduler(QW_READ, mgmt, sId, &sch));
-
- QW_ERR_JRET(qwAcquireAddTask(mgmt, QW_READ, sch, qId, tId, JOB_TASK_STATUS_NOT_START, &task));
-
-
- QW_LOCK(QW_WRITE, &task->lock);
-
- task->cancel = true;
-
- int8_t oriStatus = task->status;
- int8_t newStatus = 0;
-
- if (task->status == JOB_TASK_STATUS_CANCELLED || task->status == JOB_TASK_STATUS_NOT_START || task->status ==
- JOB_TASK_STATUS_CANCELLING || task->status == JOB_TASK_STATUS_DROPPING) { QW_UNLOCK(QW_WRITE, &task->lock);
-
- qwReleaseTask(QW_READ, sch);
- qwReleaseScheduler(QW_READ, mgmt);
-
- return TSDB_CODE_SUCCESS;
- } else if (task->status == JOB_TASK_STATUS_FAILED || task->status == JOB_TASK_STATUS_SUCCEED || task->status ==
- JOB_TASK_STATUS_PARTIAL_SUCCEED) { QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLED)); } else {
- QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLING));
- }
-
- QW_UNLOCK(QW_WRITE, &task->lock);
-
- qwReleaseTask(QW_READ, sch);
- qwReleaseScheduler(QW_READ, mgmt);
-
- if (oriStatus == JOB_TASK_STATUS_EXECUTING) {
- //TODO call executer to cancel subquery async
- }
-
- return TSDB_CODE_SUCCESS;
-
- _return:
-
- if (task) {
- QW_UNLOCK(QW_WRITE, &task->lock);
-
- qwReleaseTask(QW_READ, sch);
- }
-
- if (sch) {
- qwReleaseScheduler(QW_READ, mgmt);
- }
- */
-
- QW_RET(code);
-}
diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp
index b573828e7694cc2f19ddd2e31fa9b34b590fc6ed..16dcd7b6e025dd5761202308d00c20435d9a55f0 100644
--- a/source/libs/qworker/test/qworkerTests.cpp
+++ b/source/libs/qworker/test/qworkerTests.cpp
@@ -108,7 +108,7 @@ void qwtInitLogFile() {
tsAsyncLog = 0;
qDebugFlag = 159;
- strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsLogDir, TD_LOG_DIR_PATH);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@@ -127,15 +127,6 @@ void qwtBuildQueryReqMsg(SRpcMsg *queryRpc) {
queryRpc->contLen = sizeof(SSubQueryMsg) + 100;
}
-void qwtBuildReadyReqMsg(SResReadyReq *readyMsg, SRpcMsg *readyRpc) {
- readyMsg->sId = htobe64(1);
- readyMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId));
- readyMsg->taskId = htobe64(1);
- readyRpc->msgType = TDMT_VND_RES_READY;
- readyRpc->pCont = readyMsg;
- readyRpc->contLen = sizeof(SResReadyReq);
-}
-
void qwtBuildFetchReqMsg(SResFetchReq *fetchMsg, SRpcMsg *fetchRpc) {
fetchMsg->sId = htobe64(1);
fetchMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId));
@@ -154,13 +145,6 @@ void qwtBuildDropReqMsg(STaskDropReq *dropMsg, SRpcMsg *dropRpc) {
dropRpc->contLen = sizeof(STaskDropReq);
}
-void qwtBuildStatusReqMsg(SSchTasksStatusReq *statusMsg, SRpcMsg *statusRpc) {
- statusMsg->sId = htobe64(1);
- statusRpc->pCont = statusMsg;
- statusRpc->contLen = sizeof(SSchTasksStatusReq);
- statusRpc->msgType = TDMT_VND_TASKS_STATUS;
-}
-
int32_t qwtStringToPlan(const char* str, SSubplan** subplan) {
*subplan = (SSubplan *)0x1;
return 0;
@@ -222,10 +206,7 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
case TDMT_VND_QUERY_RSP: {
SQueryTableRsp *rsp = (SQueryTableRsp *)pRsp->pCont;
- if (0 == pRsp->code) {
- qwtBuildReadyReqMsg(&qwtreadyMsg, &qwtreadyRpc);
- qwtPutReqToFetchQueue((void *)0x1, &qwtreadyRpc);
- } else {
+ if (pRsp->code) {
qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc);
qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc);
}
@@ -233,19 +214,6 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
rpcFreeCont(rsp);
break;
}
- case TDMT_VND_RES_READY_RSP: {
- SResReadyRsp *rsp = (SResReadyRsp *)pRsp->pCont;
-
- if (0 == pRsp->code) {
- qwtBuildFetchReqMsg(&qwtfetchMsg, &qwtfetchRpc);
- qwtPutReqToFetchQueue((void *)0x1, &qwtfetchRpc);
- } else {
- qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc);
- qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc);
- }
- rpcFreeCont(rsp);
- break;
- }
case TDMT_VND_FETCH_RSP: {
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)pRsp->pCont;
@@ -667,7 +635,7 @@ void *queryThread(void *param) {
while (!qwtTestStop) {
qwtBuildQueryReqMsg(&queryRpc);
- qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -679,28 +647,6 @@ void *queryThread(void *param) {
return NULL;
}
-void *readyThread(void *param) {
- SRpcMsg readyRpc = {0};
- int32_t code = 0;
- uint32_t n = 0;
- void *mockPointer = (void *)0x1;
- void *mgmt = param;
- SResReadyReq readyMsg = {0};
-
- while (!qwtTestStop) {
- qwtBuildReadyReqMsg(&readyMsg, &readyRpc);
- code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc);
- if (qwtTestEnableSleep) {
- taosUsleep(taosRand()%5);
- }
- if (++n % qwtTestPrintNum == 0) {
- printf("ready:%d\n", n);
- }
- }
-
- return NULL;
-}
-
void *fetchThread(void *param) {
SRpcMsg fetchRpc = {0};
int32_t code = 0;
@@ -711,7 +657,7 @@ void *fetchThread(void *param) {
while (!qwtTestStop) {
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -733,7 +679,7 @@ void *dropThread(void *param) {
while (!qwtTestStop) {
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -745,29 +691,6 @@ void *dropThread(void *param) {
return NULL;
}
-void *statusThread(void *param) {
- SRpcMsg statusRpc = {0};
- int32_t code = 0;
- uint32_t n = 0;
- void *mockPointer = (void *)0x1;
- void *mgmt = param;
- SSchTasksStatusReq statusMsg = {0};
-
- while (!qwtTestStop) {
- qwtBuildStatusReqMsg(&statusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
- if (qwtTestEnableSleep) {
- taosUsleep(taosRand()%5);
- }
- if (++n % qwtTestPrintNum == 0) {
- printf("status:%d\n", n);
- }
- }
-
- return NULL;
-}
-
-
void *qwtclientThread(void *param) {
int32_t code = 0;
uint32_t n = 0;
@@ -835,9 +758,9 @@ void *queryQueueThread(void *param) {
}
if (TDMT_VND_QUERY == queryRpc->msgType) {
- qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc);
+ qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) {
- qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc);
+ qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else {
printf("unknown msg in query queue, type:%d\n", queryRpc->msgType);
assert(0);
@@ -892,19 +815,13 @@ void *fetchQueueThread(void *param) {
switch (fetchRpc->msgType) {
case TDMT_VND_FETCH:
- qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc);
- break;
- case TDMT_VND_RES_READY:
- qWorkerProcessReadyMsg(mockPointer, mgmt, fetchRpc);
- break;
- case TDMT_VND_TASKS_STATUS:
- qWorkerProcessStatusMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_CANCEL_TASK:
- qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_DROP_TASK:
- qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0);
break;
default:
printf("unknown msg type:%d in fetch queue", fetchRpc->msgType);
@@ -934,15 +851,12 @@ TEST(seqTest, normalCase) {
int32_t code = 0;
void *mockPointer = (void *)0x1;
SRpcMsg queryRpc = {0};
- SRpcMsg readyRpc = {0};
SRpcMsg fetchRpc = {0};
SRpcMsg dropRpc = {0};
- SRpcMsg statusRpc = {0};
qwtInitLogFile();
qwtBuildQueryReqMsg(&queryRpc);
- qwtBuildReadyReqMsg(&qwtreadyMsg, &readyRpc);
qwtBuildFetchReqMsg(&qwtfetchMsg, &fetchRpc);
qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc);
@@ -964,20 +878,16 @@ TEST(seqTest, normalCase) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_EQ(code, 0);
//code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc);
//ASSERT_EQ(code, 0);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
- ASSERT_EQ(code, 0);
-
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
ASSERT_EQ(code, 0);
- qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
qWorkerDestroy(&mgmt);
@@ -989,13 +899,11 @@ TEST(seqTest, cancelFirst) {
void *mockPointer = (void *)0x1;
SRpcMsg queryRpc = {0};
SRpcMsg dropRpc = {0};
- SRpcMsg statusRpc = {0};
qwtInitLogFile();
qwtBuildQueryReqMsg(&queryRpc);
qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc);
- qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
stubSetStringToPlan();
stubSetRpcSendResponse();
@@ -1006,24 +914,12 @@ TEST(seqTest, cancelFirst) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
- qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
- ASSERT_EQ(code, 0);
-
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
- qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
- ASSERT_EQ(code, 0);
-
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_TRUE(0 != code);
- qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
- ASSERT_EQ(code, 0);
-
qWorkerDestroy(&mgmt);
}
@@ -1063,7 +959,7 @@ TEST(seqTest, randCase) {
if (r >= 0 && r < maxr/5) {
printf("Query,%d\n", t++);
qwtBuildQueryReqMsg(&queryRpc);
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
} else if (r >= maxr/5 && r < maxr * 2/5) {
//printf("Ready,%d\n", t++);
//qwtBuildReadyReqMsg(&readyMsg, &readyRpc);
@@ -1074,22 +970,19 @@ TEST(seqTest, randCase) {
} else if (r >= maxr * 2/5 && r < maxr* 3/5) {
printf("Fetch,%d\n", t++);
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
} else if (r >= maxr * 3/5 && r < maxr * 4/5) {
printf("Drop,%d\n", t++);
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
} else if (r >= maxr * 4/5 && r < maxr-1) {
printf("Status,%d\n", t++);
- qwtBuildStatusReqMsg(&statusMsg, &statusRpc);
- code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
- ASSERT_EQ(code, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
@@ -1137,7 +1030,6 @@ TEST(seqTest, multithreadRand) {
//taosThreadCreate(&(t2), &thattr, readyThread, NULL);
taosThreadCreate(&(t3), &thattr, fetchThread, NULL);
taosThreadCreate(&(t4), &thattr, dropThread, NULL);
- taosThreadCreate(&(t5), &thattr, statusThread, NULL);
taosThreadCreate(&(t6), &thattr, fetchQueueThread, mgmt);
while (true) {
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -51,7 +51,7 @@ typedef struct SScalarCtx {
int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out);
SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows);
-void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
+int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
#define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type)
#define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
- sclConvertToTsValueNode(stat->precision, valueNode);
+ int32_t code = sclConvertToTsValueNode(stat->precision, valueNode);
+ if (code) {
+ stat->code = code;
+ return DEAL_RES_ERROR;
+ }
return DEAL_RES_CONTINUE;
}
@@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) {
for (int32_t i = 0; i < nodeNum; ++i) {
SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i);
- sclConvertToTsValueNode(pStat->precision, valueNode);
+ FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode));
}
_return:
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index fb03eaefa4fe79034d731b74de6bd166fa0db83e..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) {
return 2;
}
-void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
+int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
char *timeStr = valueNode->datum.p;
- if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) !=
- TSDB_CODE_SUCCESS) {
- valueNode->datum.i = 0;
+ int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
taosMemoryFree(timeStr);
valueNode->typeData = valueNode->datum.i;
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+
+ return TSDB_CODE_SUCCESS;
}
@@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT
EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
+ int32_t code = 0;
if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) {
SValueNode *valueNode = (SValueNode *)node->pLeft;
@@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight)
&& ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
- sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
+ code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
+ if (code) {
+ ctx->code = code;
+ return DEAL_RES_ERROR;
+ }
}
}
@@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft)
&& ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
- sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
+ code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
+ if (code) {
+ ctx->code = code;
+ return DEAL_RES_ERROR;
+ }
}
}
diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c
index 12496eec551c3793cede8b6e065b31d4e554052e..370ea8fa2ecd6fb1dab661f931199f08e6f8f65a 100644
--- a/source/libs/scalar/src/sclfunc.c
+++ b/source/libs/scalar/src/sclfunc.c
@@ -15,7 +15,11 @@ typedef void (*_trim_fn)(char *, char*, int32_t, int32_t);
typedef int16_t (*_len_fn)(char *, int32_t);
/** Math functions **/
-static double tlog(double v, double base) {
+static double tlog(double v) {
+ return log(v);
+}
+
+static double tlog2(double v, double base) {
double a = log(v);
double b = log(base);
if (isnan(a) || isinf(a)) {
@@ -444,7 +448,8 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
for (int32_t k = 0; k < numOfRows; ++k) {
bool hasNull = false;
for (int32_t i = 0; i < inputNum; ++i) {
- if (colDataIsNull_s(pInputData[i], k)) {
+ if (colDataIsNull_s(pInputData[i], k) ||
+ GET_PARAM_TYPE(&pInput[i]) == TSDB_DATA_TYPE_NULL) {
colDataAppendNULL(pOutputData, k);
hasNull = true;
break;
@@ -520,7 +525,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
char *output = outputBuf;
for (int32_t k = 0; k < numOfRows; ++k) {
- if (colDataIsNull_s(pInputData[0], k)) {
+ if (colDataIsNull_s(pInputData[0], k) ||
+ GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_NULL) {
colDataAppendNULL(pOutputData, k);
continue;
}
@@ -528,7 +534,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int16_t dataLen = 0;
bool hasNull = false;
for (int32_t i = 1; i < inputNum; ++i) {
- if (colDataIsNull_s(pInputData[i], k)) {
+ if (colDataIsNull_s(pInputData[i], k) ||
+ GET_PARAM_TYPE(&pInput[i]) == TSDB_DATA_TYPE_NULL) {
hasNull = true;
break;
}
@@ -633,7 +640,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar
continue;
}
- char *input = colDataGetData(pInput[0].columnData, i);
+ char *input = colDataGetData(pInputData, i);
int32_t len = varDataLen(input);
int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE;
trimFn(input, output, type, charLen);
@@ -707,6 +714,7 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
int16_t inputType = GET_PARAM_TYPE(&pInput[0]);
+ int16_t inputLen = GET_PARAM_BYTES(&pInput[0]);
int16_t outputType = GET_PARAM_TYPE(&pOutput[0]);
int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]);
@@ -718,15 +726,15 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
colDataAppendNULL(pOutput->columnData, i);
continue;
}
+
char *input = colDataGetData(pInput[0].columnData, i);
switch(outputType) {
case TSDB_DATA_TYPE_BIGINT: {
if (inputType == TSDB_DATA_TYPE_BINARY) {
- memcpy(output, varDataVal(input), varDataLen(input));
- *(int64_t *)output = taosStr2Int64(output, NULL, 10);
+ *(int64_t *)output = taosStr2Int64(varDataVal(input), NULL, 10);
} else if (inputType == TSDB_DATA_TYPE_NCHAR) {
- char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1);
+ char *newBuf = taosMemoryCalloc(1, inputLen);
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf);
if (len < 0) {
taosMemoryFree(newBuf);
@@ -742,10 +750,9 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
}
case TSDB_DATA_TYPE_UBIGINT: {
if (inputType == TSDB_DATA_TYPE_BINARY) {
- memcpy(output, varDataVal(input), varDataLen(input));
- *(uint64_t *)output = taosStr2UInt64(output, NULL, 10);
+ *(uint64_t *)output = taosStr2UInt64(varDataVal(input), NULL, 10);
} else if (inputType == TSDB_DATA_TYPE_NCHAR) {
- char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1);
+ char *newBuf = taosMemoryCalloc(1, inputLen);
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf);
if (len < 0) {
taosMemoryFree(newBuf);
@@ -849,6 +856,11 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
int32_t type = GET_PARAM_TYPE(pInput);
+ char* tz;
+ int32_t tzLen;
+ tz = varDataVal(pInput[1].columnData->pData);
+ tzLen = varDataLen(pInput[1].columnData->pData);
+
for (int32_t i = 0; i < pInput[0].numOfRows; ++i) {
if (colDataIsNull_s(pInput[0].columnData, i)) {
colDataAppendNULL(pOutput->columnData, i);
@@ -880,9 +892,13 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
}
struct tm *tmInfo = taosLocalTime((const time_t *)&timeVal, NULL);
- strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S%z", tmInfo);
+ strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", tmInfo);
int32_t len = (int32_t)strlen(buf);
+ //add timezone string
+ snprintf(buf + len, tzLen + 1, "%s", tz);
+ len += tzLen;
+
if (hasFraction) {
int32_t fracLen = (int32_t)strlen(fraction) + 1;
char *tzInfo = strchr(buf, '+');
@@ -893,7 +909,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo));
}
- char tmp[32];
+ char tmp[32] = {0};
sprintf(tmp, ".%s", fraction);
memcpy(tzInfo, tmp, fracLen);
len += fracLen;
@@ -925,10 +941,9 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
int32_t ret = convertStringToTimestamp(type, input, timePrec, &timeVal);
if (ret != TSDB_CODE_SUCCESS) {
colDataAppendNULL(pOutput->columnData, i);
- continue;
+ } else {
+ colDataAppend(pOutput->columnData, i, (char *)&timeVal, false);
}
-
- colDataAppend(pOutput->columnData, i, (char *)&timeVal, false);
}
pOutput->numOfRows = pInput->numOfRows;
@@ -1366,7 +1381,11 @@ int32_t powFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu
}
int32_t logFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
- return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog);
+ if (inputNum == 1) {
+ return doScalarFunctionUnique(pInput, inputNum, pOutput, tlog);
+ } else {
+ return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog2);
+ }
}
int32_t sqrtFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index 0fb3712c30bb349a406a92f14346370f80112ae6..e844b3cdb6b29612aab571acfb2739e47044770b 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -922,23 +922,13 @@ static void doReleaseVec(SColumnInfoData* pCol, int32_t type) {
}
}
-char *getJsonValue(char *json, char *key){ //todo
- json++; // jump type
- int16_t cols = kvRowNCols(json);
- for (int i = 0; i < cols; ++i) {
- SColIdx *pColIdx = kvRowColIdxAt(json, i);
- char *data = kvRowColVal(json, pColIdx);
- if(i == 0){
- if(*data == TSDB_DATA_TYPE_NULL) {
- return NULL;
- }
- continue;
- }
- if(memcmp(key, data, varDataTLen(data)) == 0){
- return data + varDataTLen(data);
- }
+STagVal getJsonValue(char *json, char *key, bool *isExist) {
+ STagVal val = {.pKey = key};
+ bool find = tTagGet(((const STag *)json), &val); // json value is null and not exist is different
+ if(isExist){
+ *isExist = find;
}
- return NULL;
+ return val;
}
void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) {
@@ -950,6 +940,8 @@ void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
char *pRightData = colDataGetVarData(pRight->columnData, 0);
+ char *jsonKey = taosMemoryCalloc(1, varDataLen(pRightData) + 1);
+ memcpy(jsonKey, varDataVal(pRightData), varDataLen(pRightData));
for (; i >= 0 && i < pLeft->numOfRows; i += step) {
if (colDataIsNull_var(pLeft->columnData, i)) {
colDataSetNull_var(pOutputCol, i);
@@ -957,14 +949,15 @@ void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO
continue;
}
char *pLeftData = colDataGetVarData(pLeft->columnData, i);
- char *value = getJsonValue(pLeftData, pRightData);
- if (!value) {
- colDataSetNull_var(pOutputCol, i);
- pOutputCol->hasNull = true;
- continue;
+ bool isExist = false;
+ STagVal value = getJsonValue(pLeftData, jsonKey, &isExist);
+ char *data = isExist ? tTagValToData(&value, true) : NULL;
+ colDataAppend(pOutputCol, i, data, data == NULL);
+ if(isExist && IS_VAR_DATA_TYPE(value.type) && data){
+ taosMemoryFree(data);
}
- colDataAppend(pOutputCol, i, value, false);
}
+ taosMemoryFree(jsonKey);
}
void vectorMathAdd(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) {
diff --git a/source/libs/scalar/test/filter/filterTests.cpp b/source/libs/scalar/test/filter/filterTests.cpp
index 59c3104e96c0320804ba4f17dd0a013146b27a2d..7fb1ffbd64aecb2fee9d7c862f295070dbea8e09 100644
--- a/source/libs/scalar/test/filter/filterTests.cpp
+++ b/source/libs/scalar/test/filter/filterTests.cpp
@@ -60,7 +60,7 @@ void flttInitLogFile() {
tsAsyncLog = 0;
qDebugFlag = 159;
- strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsLogDir, TD_LOG_DIR_PATH);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt
index 15d1c2cb4424fded0b04d1c82504768d57b21807..86b936d93ae950e27069835cffcb0e8a99768ac9 100644
--- a/source/libs/scalar/test/scalar/CMakeLists.txt
+++ b/source/libs/scalar/test/scalar/CMakeLists.txt
@@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(scalarTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(
scalarTest
- PUBLIC os util common gtest qcom function nodes scalar parser
+ PUBLIC os util common gtest qcom function nodes scalar parser catalog transport
)
TARGET_INCLUDE_DIRECTORIES(
@@ -18,6 +18,6 @@ TARGET_INCLUDE_DIRECTORIES(
PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc"
)
add_test(
- NAME scalarTest
- COMMAND scalarTest
+ NAME scalarTest
+ COMMAND scalarTest
)
diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp
index 3fafc83b18365d490003a792748606c8d4fce804..8a29462a2bbde2b1d4bf57d5f9ed50c983aed6e6 100644
--- a/source/libs/scalar/test/scalar/scalarTests.cpp
+++ b/source/libs/scalar/test/scalar/scalarTests.cpp
@@ -74,7 +74,7 @@ void scltInitLogFile() {
tsAsyncLog = 0;
qDebugFlag = 159;
- strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsLogDir, TD_LOG_DIR_PATH);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@@ -217,7 +217,7 @@ void scltMakeOpNode(SNode **pNode, EOperatorType opType, int32_t resType, SNode
SOperatorNode *onode = (SOperatorNode *)node;
onode->node.resType.type = resType;
onode->node.resType.bytes = tDataTypes[resType].bytes;
-
+
onode->opType = opType;
onode->pLeft = pLeft;
onode->pRight = pRight;
@@ -1035,7 +1035,7 @@ void makeJsonArrow(SSDataBlock **src, SNode **opNode, void *json, char *key){
SNode *pLeft = NULL, *pRight = NULL;
scltMakeValueNode(&pRight, TSDB_DATA_TYPE_BINARY, keyVar);
- scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, kvRowLen(json), 1, json);
+ scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, ((STag*)json)->len, 1, json);
scltMakeOpNode(opNode, OP_TYPE_JSON_GET_VALUE, TSDB_DATA_TYPE_JSON, pLeft, pRight);
}
@@ -1111,17 +1111,9 @@ TEST(columnTest, json_column_arith_op) {
char rightv[256] = {0};
memcpy(rightv, rightvTmp, strlen(rightvTmp));
- SKVRowBuilder kvRowBuilder;
- tdInitKVRowBuilder(&kvRowBuilder);
- parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0);
- SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
- char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1);
- if(tmp == NULL){
- ASSERT_TRUE(0);
- }
- memmove(tmp+1, tmp, kvRowLen(tmp));
- *tmp = TSDB_DATA_TYPE_JSON;
- row = tmp;
+ SArray *tags = taosArrayInit(1, sizeof(STagVal));
+ STag* row = NULL;
+ parseJsontoTagData(rightv, tags, &row, NULL);
const int32_t len = 8;
EOperatorType op[len] = {OP_TYPE_ADD, OP_TYPE_SUB, OP_TYPE_MULTI, OP_TYPE_DIV,
@@ -1175,7 +1167,7 @@ TEST(columnTest, json_column_arith_op) {
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes5[i], op[i]);
}
- tdDestroyKVRowBuilder(&kvRowBuilder);
+ taosArrayDestroy(tags);
taosMemoryFree(row);
}
@@ -1195,17 +1187,9 @@ TEST(columnTest, json_column_logic_op) {
char rightv[256] = {0};
memcpy(rightv, rightvTmp, strlen(rightvTmp));
- SKVRowBuilder kvRowBuilder;
- tdInitKVRowBuilder(&kvRowBuilder);
- parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0);
- SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
- char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1);
- if(tmp == NULL){
- ASSERT_TRUE(0);
- }
- memmove(tmp+1, tmp, kvRowLen(tmp));
- *tmp = TSDB_DATA_TYPE_JSON;
- row = tmp;
+ SArray *tags = taosArrayInit(1, sizeof(STagVal));
+ STag* row = NULL;
+ parseJsontoTagData(rightv, tags, &row, NULL);
const int32_t len = 9;
const int32_t len1 = 4;
@@ -1305,7 +1289,7 @@ TEST(columnTest, json_column_logic_op) {
taosMemoryFree(rightData);
}
- tdDestroyKVRowBuilder(&kvRowBuilder);
+ taosArrayDestroy(tags);
taosMemoryFree(row);
}
@@ -2498,7 +2482,7 @@ TEST(ScalarFunctionTest, tanFunction_column) {
code = tanFunction(pInput, 1, pOutput);
ASSERT_EQ(code, TSDB_CODE_SUCCESS);
for (int32_t i = 0; i < rowNum; ++i) {
- ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]);
+ ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15);
PRINTF("tiny_int after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i)));
}
scltDestroyDataBlock(pInput);
@@ -2517,7 +2501,7 @@ TEST(ScalarFunctionTest, tanFunction_column) {
code = tanFunction(pInput, 1, pOutput);
ASSERT_EQ(code, TSDB_CODE_SUCCESS);
for (int32_t i = 0; i < rowNum; ++i) {
- ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]);
+ ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15);
PRINTF("float after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i)));
}
diff --git a/source/libs/scheduler/inc/schedulerInt.h b/source/libs/scheduler/inc/schedulerInt.h
index ffac0f856dbebf7b04b44c0cdfad8df75021e650..44b3e6d396b8750a78ba9e414243f45686a121ec 100644
--- a/source/libs/scheduler/inc/schedulerInt.h
+++ b/source/libs/scheduler/inc/schedulerInt.h
@@ -39,9 +39,14 @@ enum {
SCH_WRITE,
};
+enum {
+ SCH_EXEC_CB = 1,
+ SCH_FETCH_CB,
+};
+
typedef struct SSchTrans {
- void *transInst;
- void *transHandle;
+ void *pTrans;
+ void *pHandle;
} SSchTrans;
typedef struct SSchHbTrans {
@@ -74,12 +79,19 @@ typedef struct SSchJobStat {
} SSchJobStat;
-typedef struct SSchedulerStat {
+typedef struct SSchStat {
SSchApiStat api;
SSchRuntimeStat runtime;
SSchJobStat job;
-} SSchedulerStat;
+} SSchStat;
+typedef struct SSchResInfo {
+ SQueryResult* queryRes;
+ void** fetchRes;
+ schedulerExecCallback execFp;
+ schedulerFetchCallback fetchFp;
+ void* userParam;
+} SSchResInfo;
typedef struct SSchedulerMgmt {
uint64_t taskId; // sequential taksId
@@ -89,7 +101,7 @@ typedef struct SSchedulerMgmt {
bool exit;
int32_t jobRef;
int32_t jobNum;
- SSchedulerStat stat;
+ SSchStat stat;
SHashObj *hbConnections;
} SSchedulerMgmt;
@@ -108,7 +120,7 @@ typedef struct SSchTaskCallbackParam {
typedef struct SSchHbCallbackParam {
SSchCallbackParamHeader head;
SQueryNodeEpId nodeEpId;
- void *transport;
+ void *pTrans;
} SSchHbCallbackParam;
typedef struct SSchFlowControl {
@@ -170,7 +182,7 @@ typedef struct SSchJob {
SSchJobAttr attr;
int32_t levelNum;
int32_t taskNum;
- void *transport;
+ void *pTrans;
SArray *nodeList; // qnode/vnode list, SArray
SArray *levels; // starting from 0. SArray
SNodeList *subPlans; // subplan pointer copied from DAG, no need to free it in scheduler
@@ -191,12 +203,13 @@ typedef struct SSchJob {
int32_t remoteFetch;
SSchTask *fetchTask;
int32_t errCode;
- SArray *errList; // SArray
SRWLatch resLock;
- void *queryRes;
+ SQueryExecRes execRes;
void *resData; //TODO free it or not
int32_t resNumOfRows;
+ SSchResInfo userRes;
const char *sql;
+ int32_t userCb;
SQueryProfileSummary summary;
} SSchJob;
@@ -284,23 +297,28 @@ void schFreeRpcCtx(SRpcCtx *pCtx);
int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp);
bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus);
int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask);
-int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp);
+int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp);
int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp);
void schProcessOnDataFetched(SSchJob *job);
-int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask);
+int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask);
int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode);
void schFreeRpcCtxVal(const void *arg);
int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb);
int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, void *handle);
-int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
- bool syncSchedule);
-int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
- int64_t startTs, bool sync);
+int32_t schExecStaticExplainJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
+ SSchResInfo *pRes, bool sync);
+int32_t schExecJobImpl(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
+ SSchResInfo *pRes, int64_t startTs, bool sync);
int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus);
int32_t schCancelJob(SSchJob *pJob);
int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode);
uint64_t schGenTaskId(void);
void schCloseJobRef(void);
+int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SSchResInfo *pRes);
+int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SSchResInfo *pRes);
+int32_t schFetchRows(SSchJob *pJob);
+int32_t schAsyncFetchRows(SSchJob *pJob);
+int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, int32_t msgType, void *handle, int32_t rspCode);
#ifdef __cplusplus
diff --git a/source/libs/scheduler/src/schDbg.c b/source/libs/scheduler/src/schDbg.c
new file mode 100644
index 0000000000000000000000000000000000000000..4b5f74114d2ae7d4ec47b09f8a48da2f3f61de8d
--- /dev/null
+++ b/source/libs/scheduler/src/schDbg.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "query.h"
+#include "schedulerInt.h"
+
+tsem_t schdRspSem;
+
+void schdExecCallback(SQueryResult* pResult, void* param, int32_t code) {
+ if (code) {
+ pResult->code = code;
+ }
+
+ *(SQueryResult*)param = *pResult;
+
+ taosMemoryFree(pResult);
+
+ tsem_post(&schdRspSem);
+}
+
+void schdFetchCallback(void* pResult, void* param, int32_t code) {
+ SSchdFetchParam* fParam = (SSchdFetchParam*)param;
+
+ *fParam->pData = pResult;
+ *fParam->code = code;
+
+ tsem_post(&schdRspSem);
+}
+
+
diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c
index 14f46463971fd103b1cf79a1c6bddd9ee0efa85c..dbad053c65ba3572a6d4740e4b991f3760e9631b 100644
--- a/source/libs/scheduler/src/schJob.c
+++ b/source/libs/scheduler/src/schJob.c
@@ -39,8 +39,8 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
return TSDB_CODE_SUCCESS;
}
-int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *transport, SArray *pNodeList, const char *sql,
- int64_t startTs, bool syncSchedule) {
+int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *pTrans, SArray *pNodeList, const char *sql,
+ SSchResInfo *pRes, int64_t startTs, bool syncSchedule) {
int32_t code = 0;
int64_t refId = -1;
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
@@ -51,9 +51,12 @@ int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *transport, SArray
pJob->attr.explainMode = pDag->explainInfo.mode;
pJob->attr.syncSchedule = syncSchedule;
- pJob->transport = transport;
+ pJob->pTrans = pTrans;
pJob->sql = sql;
-
+ if (pRes) {
+ pJob->userRes = *pRes;
+ }
+
if (pNodeList != NULL) {
pJob->nodeList = taosArrayDup(pNodeList);
}
@@ -339,6 +342,36 @@ int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *ad
return TSDB_CODE_SUCCESS;
}
+int32_t schDropTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle) {
+ if (NULL == pTask->execNodes) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t num = taosArrayGetSize(pTask->execNodes);
+ for (int32_t i = 0; i < num; ++i) {
+ SSchNodeInfo* pNode = taosArrayGet(pTask->execNodes, i);
+ if (pNode->handle == handle) {
+ taosArrayRemove(pTask->execNodes, i);
+ break;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, int32_t msgType, void *handle, int32_t rspCode) {
+ SCH_SET_TASK_HANDLE(pTask, handle);
+
+ schUpdateTaskExecNodeHandle(pTask, handle, rspCode);
+
+ if (msgType == TDMT_SCH_LINK_BROKEN) {
+ schDropTaskExecNode(pJob, pTask, handle);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t schRecordQueryDataSrc(SSchJob *pJob, SSchTask *pTask) {
if (!SCH_IS_DATA_SRC_QRY_TASK(pTask)) {
return TSDB_CODE_SUCCESS;
@@ -458,6 +491,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask));
_return:
+
if (planToTask) {
taosHashCleanup(planToTask);
}
@@ -465,6 +499,34 @@ _return:
SCH_RET(code);
}
+int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
+ int32_t addNum = 0;
+ int32_t nodeNum = 0;
+
+ if (pJob->nodeList) {
+ nodeNum = taosArrayGetSize(pJob->nodeList);
+
+ for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
+ SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i);
+
+ if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) {
+ SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno);
+ SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ ++addNum;
+ }
+ }
+
+ if (addNum <= 0) {
+ SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum);
+ SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
if (NULL != pTask->candidateAddrs) {
return TSDB_CODE_SUCCESS;
@@ -488,27 +550,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
return TSDB_CODE_SUCCESS;
}
- int32_t addNum = 0;
- int32_t nodeNum = 0;
- if (pJob->nodeList) {
- nodeNum = taosArrayGetSize(pJob->nodeList);
-
- for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
- SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i);
-
- if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) {
- SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno);
- SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- ++addNum;
- }
- }
-
- if (addNum <= 0) {
- SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum);
- SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
- }
+ SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask));
/*
for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
@@ -727,6 +769,69 @@ _return:
SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
}
+
+int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) {
+ pRes->code = atomic_load_32(&pJob->errCode);
+ pRes->numOfRows = pJob->resNumOfRows;
+ pRes->res = pJob->execRes;
+ pJob->execRes.res = NULL;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) {
+ int32_t code = 0;
+ if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
+ SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
+ }
+
+ while (true) {
+ *pData = atomic_load_ptr(&pJob->resData);
+ if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) {
+ continue;
+ }
+
+ break;
+ }
+
+ if (NULL == *pData) {
+ SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, sizeof(SRetrieveTableRsp));
+ if (rsp) {
+ rsp->completed = 1;
+ }
+
+ *pData = rsp;
+ SCH_JOB_DLOG("empty res and set query complete, code:%x", code);
+ }
+
+ SCH_JOB_DLOG("fetch done, totalRows:%d", pJob->resNumOfRows);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t schNotifyUserQueryRes(SSchJob* pJob) {
+ pJob->userRes.queryRes = taosMemoryCalloc(1, sizeof(*pJob->userRes.queryRes));
+ if (pJob->userRes.queryRes) {
+ schSetJobQueryRes(pJob, pJob->userRes.queryRes);
+ }
+
+ (*pJob->userRes.execFp)(pJob->userRes.queryRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode));
+
+ pJob->userRes.queryRes = NULL;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t schNotifyUserFetchRes(SSchJob* pJob) {
+ void* pRes = NULL;
+
+ SCH_ERR_RET(schSetJobFetchRes(pJob, &pRes));
+
+ (*pJob->userRes.fetchFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode));
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) {
// if already FAILED, no more processing
SCH_ERR_RET(schChkUpdateJobStatus(pJob, status));
@@ -741,6 +846,14 @@ int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCod
SCH_JOB_DLOG("job failed with error: %s", tstrerror(code));
+ if (!pJob->attr.syncSchedule) {
+ if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) {
+ schNotifyUserQueryRes(pJob);
+ } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) {
+ schNotifyUserFetchRes(pJob);
+ }
+ }
+
SCH_RET(code);
}
@@ -762,6 +875,10 @@ int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
if (pJob->attr.syncSchedule) {
tsem_post(&pJob->rspSem);
+ } else if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) {
+ schNotifyUserQueryRes(pJob);
+ } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) {
+ schNotifyUserFetchRes(pJob);
}
if (atomic_load_8(&pJob->userFetch)) {
@@ -777,7 +894,12 @@ _return:
void schProcessOnDataFetched(SSchJob *job) {
atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0);
- tsem_post(&job->rspSem);
+
+ if (job->attr.syncSchedule) {
+ tsem_post(&job->rspSem);
+ } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&job->userCb, SCH_FETCH_CB, 0)) {
+ schNotifyUserFetchRes(job);
+ }
}
// Note: no more task error processing, handled in function internal
@@ -917,19 +1039,19 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
*/
for (int32_t i = 0; i < parentNum; ++i) {
- SSchTask *par = *(SSchTask **)taosArrayGet(pTask->parents, i);
- int32_t readyNum = atomic_add_fetch_32(&par->childReady, 1);
+ SSchTask *parent = *(SSchTask **)taosArrayGet(pTask->parents, i);
+ int32_t readyNum = atomic_add_fetch_32(&parent->childReady, 1);
- SCH_LOCK(SCH_WRITE, &par->lock);
+ SCH_LOCK(SCH_WRITE, &parent->lock);
SDownstreamSourceNode source = {.type = QUERY_NODE_DOWNSTREAM_SOURCE,
.taskId = pTask->taskId,
.schedId = schMgmt.sId,
.addr = pTask->succeedAddr};
- qSetSubplanExecutionNode(par->plan, pTask->plan->id.groupId, &source);
- SCH_UNLOCK(SCH_WRITE, &par->lock);
+ qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source);
+ SCH_UNLOCK(SCH_WRITE, &parent->lock);
- if (SCH_TASK_READY_FOR_LAUNCH(readyNum, par)) {
- SCH_ERR_RET(schLaunchTask(pJob, par));
+ if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) {
+ SCH_ERR_RET(schLaunchTask(pJob, parent));
}
}
@@ -983,11 +1105,11 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs
return TSDB_CODE_SUCCESS;
}
-int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) {
+int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
if (rsp->tbFName[0]) {
- if (NULL == pJob->queryRes) {
- pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo));
- if (NULL == pJob->queryRes) {
+ if (NULL == pJob->execRes.res) {
+ pJob->execRes.res = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo));
+ if (NULL == pJob->execRes.res) {
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
@@ -997,13 +1119,14 @@ int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) {
tbInfo.sversion = rsp->sversion;
tbInfo.tversion = rsp->tversion;
- taosArrayPush((SArray *)pJob->queryRes, &tbInfo);
+ taosArrayPush((SArray *)pJob->execRes.res, &tbInfo);
+ pJob->execRes.msgType = TDMT_VND_QUERY;
}
return TSDB_CODE_SUCCESS;
}
-int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) {
+int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) {
int32_t s = taosHashGetSize(pTaskList);
if (s <= 0) {
return TSDB_CODE_SUCCESS;
@@ -1019,6 +1142,21 @@ int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **
return TSDB_CODE_SUCCESS;
}
+int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) {
+ schGetTaskFromList(pJob->execTasks, taskId, pTask);
+ if (NULL == *pTask) {
+ schGetTaskFromList(pJob->succTasks, taskId, pTask);
+
+ if (NULL == *pTask) {
+ SCH_JOB_ELOG("task not found in execList & succList, taskId:%" PRIx64, taskId);
+ SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR);
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode) {
if (rspCode || NULL == pTask->execNodes || taosArrayGetSize(pTask->execNodes) > 1 ||
taosArrayGetSize(pTask->execNodes) <= 0) {
@@ -1212,12 +1350,9 @@ void schFreeJobImpl(void *job) {
qExplainFreeCtx(pJob->explainCtx);
- if (SCH_IS_QUERY_JOB(pJob)) {
- taosArrayDestroy((SArray *)pJob->queryRes);
- } else {
- tFreeSSubmitRsp((SSubmitRsp*)pJob->queryRes);
- }
+ destroyQueryExecRes(&pJob->execRes);
+ taosMemoryFreeClear(pJob->userRes.queryRes);
taosMemoryFreeClear(pJob->resData);
taosMemoryFreeClear(pJob);
@@ -1228,8 +1363,8 @@ void schFreeJobImpl(void *job) {
schCloseJobRef();
}
-int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
- int64_t startTs, bool sync) {
+int32_t schExecJobImpl(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
+ SSchResInfo *pRes, int64_t startTs, bool sync) {
qDebug("QID:0x%" PRIx64 " job started", pDag->queryId);
if (pNodeList == NULL || taosArrayGetSize(pNodeList) <= 0) {
@@ -1238,31 +1373,68 @@ int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pDag, int
int32_t code = 0;
SSchJob *pJob = NULL;
- SCH_ERR_JRET(schInitJob(&pJob, pDag, transport, pNodeList, sql, startTs, sync));
-
- SCH_ERR_JRET(schLaunchJob(pJob));
+ SCH_ERR_RET(schInitJob(&pJob, pDag, pTrans, pNodeList, sql, pRes, startTs, sync));
*job = pJob->refId;
+ SCH_ERR_JRET(schLaunchJob(pJob));
+
if (sync) {
SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
tsem_wait(&pJob->rspSem);
+ } else {
+ pJob->userCb = SCH_EXEC_CB;
}
SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
+_return:
+
schReleaseJob(pJob->refId);
+
+ SCH_RET(code);
+}
- return TSDB_CODE_SUCCESS;
+int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
+ int64_t startTs, SSchResInfo *pRes) {
+ int32_t code = 0;
+
+ *pJob = 0;
+
+ if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
+ SCH_ERR_JRET(schExecStaticExplainJob(pTrans, pNodeList, pDag, pJob, sql, NULL, true));
+ } else {
+ SCH_ERR_JRET(schExecJobImpl(pTrans, pNodeList, pDag, pJob, sql, NULL, startTs, true));
+ }
_return:
- schFreeJobImpl(pJob);
- SCH_RET(code);
+ if (*pJob) {
+ SSchJob *job = schAcquireJob(*pJob);
+ schSetJobQueryRes(job, pRes->queryRes);
+ schReleaseJob(*pJob);
+ }
+
+ return code;
+}
+
+int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
+ int64_t startTs, SSchResInfo *pRes) {
+ int32_t code = 0;
+
+ *pJob = 0;
+
+ if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
+ SCH_ERR_RET(schExecStaticExplainJob(pTrans, pNodeList, pDag, pJob, sql, pRes, false));
+ } else {
+ SCH_ERR_RET(schExecJobImpl(pTrans, pNodeList, pDag, pJob, sql, pRes, startTs, false));
+ }
+
+ return code;
}
-int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
- bool syncSchedule) {
+int32_t schExecStaticExplainJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
+ SSchResInfo *pRes, bool sync) {
qDebug("QID:0x%" PRIx64 " job started", pDag->queryId);
int32_t code = 0;
@@ -1274,10 +1446,14 @@ int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDa
pJob->sql = sql;
pJob->attr.queryJob = true;
+ pJob->attr.syncSchedule = sync;
pJob->attr.explainMode = pDag->explainInfo.mode;
pJob->queryId = pDag->queryId;
pJob->subPlans = pDag->pSubplans;
-
+ if (pRes) {
+ pJob->userRes = *pRes;
+ }
+
SCH_ERR_JRET(qExecStaticExplain(pDag, (SRetrieveTableRsp **)&pJob->resData));
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
@@ -1288,7 +1464,7 @@ int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDa
if (NULL == schAcquireJob(refId)) {
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
- SCH_RET(TSDB_CODE_SCH_STATUS_ERROR);
+ SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
pJob->refId = refId;
@@ -1296,12 +1472,17 @@ int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDa
SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId);
pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED;
+
*job = pJob->refId;
SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
+ if (!pJob->attr.syncSchedule) {
+ code = schNotifyUserQueryRes(pJob);
+ }
+
schReleaseJob(pJob->refId);
- return TSDB_CODE_SUCCESS;
+ SCH_RET(code);
_return:
@@ -1309,4 +1490,103 @@ _return:
SCH_RET(code);
}
+int32_t schFetchRows(SSchJob *pJob) {
+ int32_t code = 0;
+
+ int8_t status = SCH_GET_JOB_STATUS(pJob);
+ if (status == JOB_TASK_STATUS_DROPPING) {
+ SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
+ }
+
+ if (!SCH_JOB_NEED_FETCH(pJob)) {
+ SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
+ SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
+ SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
+ SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
+ SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
+ } else if (status == JOB_TASK_STATUS_SUCCEED) {
+ SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
+ goto _return;
+ } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
+ SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
+ }
+
+ if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) {
+ SCH_ERR_JRET(schFetchFromRemote(pJob));
+ tsem_wait(&pJob->rspSem);
+
+ status = SCH_GET_JOB_STATUS(pJob);
+ if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
+ SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
+ }
+ }
+
+ SCH_ERR_JRET(schSetJobFetchRes(pJob, pJob->userRes.fetchRes));
+
+_return:
+
+ atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
+
+ SCH_RET(code);
+}
+
+int32_t schAsyncFetchRows(SSchJob *pJob) {
+ int32_t code = 0;
+
+ int8_t status = SCH_GET_JOB_STATUS(pJob);
+ if (status == JOB_TASK_STATUS_DROPPING) {
+ SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
+ }
+
+ if (!SCH_JOB_NEED_FETCH(pJob)) {
+ SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
+ SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
+ SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
+ SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
+ SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
+ } else if (status == JOB_TASK_STATUS_SUCCEED) {
+ SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
+ goto _return;
+ } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
+ SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
+ SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
+ }
+
+ if (pJob->attr.explainMode == EXPLAIN_MODE_STATIC) {
+ SCH_ERR_JRET(schNotifyUserFetchRes(pJob));
+
+ atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
+ } else {
+ pJob->userCb = SCH_FETCH_CB;
+
+ SCH_ERR_JRET(schFetchFromRemote(pJob));
+ }
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
+
+ SCH_RET(code);
+}
+
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index 6d9f6b435f5314f4a2f084b2f3e8bdf5a6f8e903..0ba91a1c85649463225bd5f6d82edad4b56714c9 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -31,7 +31,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
case TDMT_VND_EXPLAIN_RSP:
return TSDB_CODE_SUCCESS;
case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp
- if (lastMsgType != reqMsgType && -1 != lastMsgType && TDMT_VND_FETCH != lastMsgType) {
+ if (lastMsgType != reqMsgType && -1 != lastMsgType) {
SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType),
TMSG_INFO(msgType));
}
@@ -41,22 +41,6 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
TMSG_INFO(msgType));
}
- SCH_SET_TASK_LASTMSG_TYPE(pTask, -1);
- return TSDB_CODE_SUCCESS;
- case TDMT_VND_RES_READY_RSP:
- reqMsgType = TDMT_VND_QUERY;
- if (lastMsgType != reqMsgType && -1 != lastMsgType) {
- SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s",
- (lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType));
- SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
- }
-
- if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
- SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
- TMSG_INFO(msgType));
- SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
- }
-
SCH_SET_TASK_LASTMSG_TYPE(pTask, -1);
return TSDB_CODE_SUCCESS;
case TDMT_VND_FETCH_RSP:
@@ -110,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status),
rspCode);
+ taosMemoryFreeClear(msg);
SCH_RET(atomic_load_32(&pJob->errCode));
}
@@ -137,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -161,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -173,6 +162,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
tDecoderClear(&coder);
SCH_ERR_JRET(code);
SCH_ERR_JRET(rsp.code);
+
+ pJob->execRes.res = rsp.pMeta;
+ pJob->execRes.msgType = TDMT_VND_ALTER_TABLE;
}
SCH_ERR_JRET(rspCode);
@@ -180,6 +172,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (NULL == msg) {
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
+
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -212,8 +207,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows);
SCH_LOCK(SCH_WRITE, &pJob->resLock);
- if (pJob->queryRes) {
- SSubmitRsp *sum = pJob->queryRes;
+ if (pJob->execRes.res) {
+ SSubmitRsp *sum = pJob->execRes.res;
sum->affectedRows += rsp->affectedRows;
sum->nBlocks += rsp->nBlocks;
sum->pBlocks = taosMemoryRealloc(sum->pBlocks, sum->nBlocks * sizeof(*sum->pBlocks));
@@ -221,34 +216,20 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
taosMemoryFree(rsp->pBlocks);
taosMemoryFree(rsp);
} else {
- pJob->queryRes = rsp;
+ pJob->execRes.res = rsp;
+ pJob->execRes.msgType = TDMT_VND_SUBMIT;
}
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
}
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
case TDMT_VND_QUERY_RSP: {
- SQueryTableRsp rsp = {0};
- if (msg) {
- SCH_ERR_JRET(tDeserializeSQueryTableRsp(msg, msgSize, &rsp));
- SCH_ERR_JRET(rsp.code);
- }
-
- SCH_ERR_JRET(rspCode);
-
- if (NULL == msg) {
- SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
- }
-
- // SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, TDMT_VND_RES_READY));
-
- break;
- }
- case TDMT_VND_RES_READY_RSP: {
- SResReadyRsp *rsp = (SResReadyRsp *)msg;
+ SQueryTableRsp *rsp = (SQueryTableRsp *)msg;
SCH_ERR_JRET(rspCode);
if (NULL == msg) {
@@ -257,6 +238,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
+
+ taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
@@ -308,6 +291,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
}
+ taosMemoryFreeClear(msg);
+
return TSDB_CODE_SUCCESS;
}
@@ -315,6 +300,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schFetchFromRemote(pJob));
+ taosMemoryFreeClear(msg);
+
return TSDB_CODE_SUCCESS;
}
@@ -333,6 +320,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed);
+ msg = NULL;
+
schProcessOnDataFetched(pJob);
break;
}
@@ -355,6 +344,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
_return:
+ taosMemoryFreeClear(msg);
+
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
}
@@ -371,31 +362,16 @@ int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, in
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED);
}
- schGetTaskFromTaskList(pJob->execTasks, pParam->taskId, &pTask);
- if (NULL == pTask) {
- if (TDMT_VND_EXPLAIN_RSP == msgType) {
- schGetTaskFromTaskList(pJob->succTasks, pParam->taskId, &pTask);
- } else {
- SCH_JOB_ELOG("task not found in execTask list, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId,
- pParam->taskId);
- SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
- }
- }
-
- if (NULL == pTask) {
- SCH_JOB_ELOG("task not found in execList & succList, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId,
- pParam->taskId);
- SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
- }
+ SCH_ERR_JRET(schGetTaskInJob(pJob, pParam->taskId, &pTask));
SCH_TASK_DLOG("rsp msg received, type:%s, handle:%p, code:%s", TMSG_INFO(msgType), pMsg->handle, tstrerror(rspCode));
- SCH_SET_TASK_HANDLE(pTask, pMsg->handle);
- schUpdateTaskExecNodeHandle(pTask, pMsg->handle, rspCode);
+ SCH_ERR_JRET(schUpdateTaskHandle(pJob, pTask, msgType, pMsg->handle, rspCode));
SCH_ERR_JRET(schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode));
_return:
+
if (pJob) {
schReleaseJob(pParam->refId);
}
@@ -428,10 +404,6 @@ int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code)
return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code);
}
-int32_t schHandleReadyCallback(void *param, const SDataBuf *pMsg, int32_t code) {
- return schHandleCallback(param, pMsg, TDMT_VND_RES_READY_RSP, code);
-}
-
int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code) {
return schHandleCallback(param, pMsg, TDMT_VND_EXPLAIN_RSP, code);
}
@@ -439,6 +411,7 @@ int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code
int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) {
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code);
+ taosMemoryFreeClear(param);
return TSDB_CODE_SUCCESS;
}
@@ -450,7 +423,7 @@ int32_t schHandleLinkBrokenCallback(void *param, const SDataBuf *pMsg, int32_t c
if (head->isHbParam) {
SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param;
- SSchTrans trans = {.transInst = hbParam->transport, .transHandle = NULL};
+ SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL};
SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans));
SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId));
@@ -481,7 +454,7 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, int32_t msgType,
param->queryId = pJob->queryId;
param->refId = pJob->refId;
param->taskId = SCH_TASK_ID(pTask);
- param->transport = pJob->transport;
+ param->transport = pJob->pTrans;
msgSendInfo->param = param;
msgSendInfo->fp = fp;
@@ -516,9 +489,6 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) {
case TDMT_VND_QUERY:
*fp = schHandleQueryCallback;
break;
- case TDMT_VND_RES_READY:
- *fp = schHandleReadyCallback;
- break;
case TDMT_VND_EXPLAIN:
*fp = schHandleExplainCallback;
break;
@@ -555,8 +525,10 @@ int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) {
SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx);
param->nodeEpId.nodeId = addr->nodeId;
- memcpy(¶m->nodeEpId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp));
- param->transport = pJob->transport;
+ SEp* pEp = SCH_GET_CUR_EP(addr);
+ strcpy(param->nodeEpId.ep.fqdn, pEp->fqdn);
+ param->nodeEpId.ep.port = pEp->port;
+ param->pTrans = pJob->pTrans;
*pParam = param;
@@ -565,8 +537,9 @@ int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) {
int32_t schCloneHbRpcCtx(SRpcCtx *pSrc, SRpcCtx *pDst) {
int32_t code = 0;
- memcpy(&pDst->brokenVal, &pSrc->brokenVal, sizeof(pSrc->brokenVal));
+ memcpy(pDst, pSrc, sizeof(SRpcCtx));
pDst->brokenVal.val = NULL;
+ pDst->args = NULL;
SCH_ERR_RET(schCloneSMsgSendInfo(pSrc->brokenVal.val, &pDst->brokenVal.val));
@@ -589,7 +562,7 @@ int32_t schCloneHbRpcCtx(SRpcCtx *pSrc, SRpcCtx *pDst) {
if (taosHashPut(pDst->args, msgType, sizeof(*msgType), &dst, sizeof(dst))) {
qError("taosHashPut msg %d to rpcCtx failed", *msgType);
- (*dst.freeFunc)(dst.val);
+ (*pSrc->freeFunc)(dst.val);
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
@@ -638,18 +611,19 @@ int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
SCH_ERR_JRET(schGetCallbackFp(TDMT_VND_QUERY_HEARTBEAT, &fp));
param->nodeEpId = epId;
- param->transport = pJob->transport;
+ param->pTrans = pJob->pTrans;
pMsgSendInfo->param = param;
pMsgSendInfo->fp = fp;
- SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo, .freeFunc = schFreeRpcCtxVal};
+ SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo};
if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) {
SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType);
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, true));
+ pCtx->freeFunc = schFreeRpcCtxVal;
return TSDB_CODE_SUCCESS;
@@ -666,7 +640,7 @@ int32_t schRegisterHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *
int32_t code = 0;
SSchHbTrans hb = {0};
- hb.trans.transInst = pJob->transport;
+ hb.trans.pTrans = pJob->pTrans;
SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &hb.rpcCtx));
@@ -743,12 +717,12 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) {
__async_send_cb_fn_t fp = NULL;
SCH_ERR_JRET(schGetCallbackFp(msgType, &fp));
- param->transport = trans.transInst;
+ param->transport = trans.pTrans;
pMsgSendInfo->param = param;
pMsgSendInfo->msgInfo.pData = msg;
pMsgSendInfo->msgInfo.len = msgSize;
- pMsgSendInfo->msgInfo.handle = trans.transHandle;
+ pMsgSendInfo->msgInfo.handle = trans.pHandle;
pMsgSendInfo->msgType = msgType;
pMsgSendInfo->fp = fp;
@@ -756,13 +730,13 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) {
SEpSet epSet = {.inUse = 0, .numOfEps = 1};
memcpy(&epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep));
- qDebug("start to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d", trans.transInst, trans.transHandle,
+ qDebug("start to send hb msg, pTrans:%p, pHandle:%p, fqdn:%s, port:%d", trans.pTrans, trans.pHandle,
nodeEpId->ep.fqdn, nodeEpId->ep.port);
- code = asyncSendMsgToServerExt(trans.transInst, &epSet, &transporterId, pMsgSendInfo, true, &rpcCtx);
+ code = asyncSendMsgToServerExt(trans.pTrans, &epSet, &transporterId, pMsgSendInfo, true, &rpcCtx);
if (code) {
- qError("fail to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d, error:%x - %s", trans.transInst,
- trans.transHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code));
+ qError("fail to send hb msg, pTrans:%p, pHandle:%p, fqdn:%s, port:%d, error:%x - %s", trans.pTrans,
+ trans.pHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code));
SCH_ERR_JRET(code);
}
@@ -784,7 +758,10 @@ int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) {
SQueryNodeEpId epId = {0};
epId.nodeId = addr->nodeId;
- memcpy(&epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp));
+
+ SEp* pEp = SCH_GET_CUR_EP(addr);
+ strcpy(epId.ep.fqdn, pEp->fqdn);
+ epId.ep.port = pEp->port;
SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, &epId, sizeof(SQueryNodeEpId));
if (NULL == hb) {
@@ -812,8 +789,8 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) {
memcpy(&hb->trans, trans, sizeof(*trans));
SCH_UNLOCK(SCH_WRITE, &hb->lock);
- qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, instance:%p, handle:%p", schMgmt.sId,
- epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->transInst, trans->transHandle);
+ qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId,
+ epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->pTrans, trans->pHandle);
return TSDB_CODE_SUCCESS;
}
@@ -833,8 +810,8 @@ int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code) {
}
SSchTrans trans = {0};
- trans.transInst = pParam->transport;
- trans.transHandle = pMsg->handle;
+ trans.pTrans = pParam->transport;
+ trans.pHandle = pMsg->handle;
SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans));
@@ -879,7 +856,7 @@ int32_t schMakeCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) {
param->queryId = pJob->queryId;
param->refId = pJob->refId;
param->taskId = SCH_TASK_ID(pTask);
- param->transport = pJob->transport;
+ param->transport = pJob->pTrans;
*pParam = param;
@@ -911,7 +888,6 @@ int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *
brokenVal->msgType = msgType;
brokenVal->val = pMsgSendInfo;
brokenVal->clone = schCloneSMsgSendInfo;
- brokenVal->freeFunc = schFreeRpcCtxVal;
return TSDB_CODE_SUCCESS;
@@ -925,7 +901,6 @@ _return:
int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
int32_t code = 0;
- SMsgSendInfo *pReadyMsgSendInfo = NULL;
SMsgSendInfo *pExplainMsgSendInfo = NULL;
pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK);
@@ -934,24 +909,17 @@ int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_RES_READY, &pReadyMsgSendInfo));
SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_EXPLAIN, &pExplainMsgSendInfo));
- int32_t msgType = TDMT_VND_RES_READY_RSP;
- SRpcCtxVal ctxVal = {.val = pReadyMsgSendInfo, .clone = schCloneSMsgSendInfo, .freeFunc = schFreeRpcCtxVal};
- if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) {
- SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType);
- SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- msgType = TDMT_VND_EXPLAIN_RSP;
- ctxVal.val = pExplainMsgSendInfo;
+ int32_t msgType = TDMT_VND_EXPLAIN_RSP;
+ SRpcCtxVal ctxVal = {.val = pExplainMsgSendInfo, .clone = schCloneSMsgSendInfo};
if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) {
SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType);
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, false));
+ pCtx->freeFunc = schFreeRpcCtxVal;
return TSDB_CODE_SUCCESS;
@@ -959,11 +927,6 @@ _return:
taosHashCleanup(pCtx->args);
- if (pReadyMsgSendInfo) {
- taosMemoryFreeClear(pReadyMsgSendInfo->param);
- taosMemoryFreeClear(pReadyMsgSendInfo);
- }
-
if (pExplainMsgSendInfo) {
taosMemoryFreeClear(pExplainMsgSendInfo->param);
taosMemoryFreeClear(pExplainMsgSendInfo);
@@ -1034,15 +997,15 @@ int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet
pMsgSendInfo->msgInfo.pData = msg;
pMsgSendInfo->msgInfo.len = msgSize;
- pMsgSendInfo->msgInfo.handle = trans->transHandle;
+ pMsgSendInfo->msgInfo.handle = trans->pHandle;
pMsgSendInfo->msgType = msgType;
- qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "instance:%p, handle:%p", TMSG_INFO(msgType),
+ qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "pTrans:%p, pHandle:%p", TMSG_INFO(msgType),
ntohl(((SMsgHead *)msg)->vgId), epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port, pJob->refId,
- trans->transInst, trans->transHandle);
+ trans->pTrans, trans->pHandle);
int64_t transporterId = 0;
- code = asyncSendMsgToServerExt(trans->transInst, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx);
+ code = asyncSendMsgToServerExt(trans->pTrans, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx);
if (code) {
SCH_ERR_JRET(code);
}
@@ -1119,24 +1082,6 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
persistHandle = true;
break;
}
-
- case TDMT_VND_RES_READY: {
- msgSize = sizeof(SResReadyReq);
- msg = taosMemoryCalloc(1, msgSize);
- if (NULL == msg) {
- SCH_TASK_ELOG("calloc %d failed", msgSize);
- SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- SResReadyReq *pMsg = msg;
-
- pMsg->header.vgId = htonl(addr->nodeId);
-
- pMsg->sId = htobe64(schMgmt.sId);
- pMsg->queryId = htobe64(pJob->queryId);
- pMsg->taskId = htobe64(pTask->taskId);
- break;
- }
case TDMT_VND_FETCH: {
msgSize = sizeof(SResFetchReq);
msg = taosMemoryCalloc(1, msgSize);
@@ -1208,12 +1153,12 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
SCH_SET_TASK_LASTMSG_TYPE(pTask, msgType);
- SSchTrans trans = {.transInst = pJob->transport, .transHandle = SCH_GET_TASK_HANDLE(pTask)};
+ SSchTrans trans = {.pTrans = pJob->pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
SCH_ERR_JRET(schAsyncSendMsg(pJob, pTask, &trans, &epSet, msgType, msg, msgSize, persistHandle,
(rpcCtx.args ? &rpcCtx : NULL)));
if (msgType == TDMT_VND_QUERY) {
- SCH_ERR_RET(schRecordTaskExecNode(pJob, pTask, addr, trans.transHandle));
+ SCH_ERR_RET(schRecordTaskExecNode(pJob, pTask, addr, trans.pHandle));
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c
index 57a86ba125b5cfcc2df39e8c42cff91b6f189509..81c95ea976e0c685fa1585df6dbb42bed75fd0c8 100644
--- a/source/libs/scheduler/src/schUtil.c
+++ b/source/libs/scheduler/src/schUtil.c
@@ -41,7 +41,7 @@ uint64_t schGenUUID(void) {
static int32_t requestSerialId = 0;
if (hashId == 0) {
- char uid[64];
+ char uid[64] = {0};
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
if (code != TSDB_CODE_SUCCESS) {
qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
@@ -66,6 +66,7 @@ void schFreeRpcCtxVal(const void *arg) {
SMsgSendInfo *pMsgSendInfo = (SMsgSendInfo *)arg;
taosMemoryFreeClear(pMsgSendInfo->param);
+ taosMemoryFreeClear(pMsgSendInfo->msgInfo.pData);
taosMemoryFreeClear(pMsgSendInfo);
}
@@ -77,16 +78,14 @@ void schFreeRpcCtx(SRpcCtx *pCtx) {
while (pIter) {
SRpcCtxVal *ctxVal = (SRpcCtxVal *)pIter;
- (*ctxVal->freeFunc)(ctxVal->val);
+ (*pCtx->freeFunc)(ctxVal->val);
pIter = taosHashIterate(pCtx->args, pIter);
}
taosHashCleanup(pCtx->args);
- if (pCtx->brokenVal.freeFunc) {
- (*pCtx->brokenVal.freeFunc)(pCtx->brokenVal.val);
- }
+ (*pCtx->freeFunc)(pCtx->brokenVal.val);
}
diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c
index bd2c7e5b4926d1f2fe17252713b812ebff8c4b49..522bd8044d43709f9139092dd705ab123c7799ea 100644
--- a/source/libs/scheduler/src/scheduler.c
+++ b/source/libs/scheduler/src/scheduler.c
@@ -67,50 +67,24 @@ int32_t schedulerInit(SSchedulerCfg *cfg) {
return TSDB_CODE_SUCCESS;
}
-int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
+int32_t schedulerExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
int64_t startTs, SQueryResult *pRes) {
- if (NULL == transport || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == pRes) {
+ if (NULL == pTrans || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == pRes) {
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
- int32_t code = 0;
-
- *pJob = 0;
-
- if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
- SCH_ERR_RET(schExecStaticExplain(transport, nodeList, pDag, pJob, sql, true));
- } else {
- SCH_ERR_JRET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, startTs, true));
- }
-
-_return:
-
- if (*pJob) {
- SSchJob *job = schAcquireJob(*pJob);
-
- pRes->code = atomic_load_32(&job->errCode);
- pRes->numOfRows = job->resNumOfRows;
- pRes->res = job->queryRes;
- job->queryRes = NULL;
-
- schReleaseJob(*pJob);
- }
-
- return code;
+ SSchResInfo resInfo = {.queryRes = pRes};
+ SCH_RET(schExecJob(pTrans, pNodeList, pDag, pJob, sql, startTs, &resInfo));
}
-int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan *pDag, const char *sql, int64_t *pJob) {
- if (NULL == transport || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob) {
- SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
- }
-
- if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
- SCH_ERR_RET(schExecStaticExplain(transport, pNodeList, pDag, pJob, sql, false));
- } else {
- SCH_ERR_RET(schExecJobImpl(transport, pNodeList, pDag, pJob, sql, 0, false));
- }
-
- return TSDB_CODE_SUCCESS;
+int32_t schedulerAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql,
+ int64_t startTs, schedulerExecCallback fp, void* param) {
+ if (NULL == pTrans || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == fp || NULL == param) {
+ SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+
+ SSchResInfo resInfo = {.execFp = fp, .userParam = param};
+ SCH_RET(schAsyncExecJob(pTrans, pNodeList, pDag, pJob, sql, startTs, &resInfo));
}
int32_t schedulerFetchRows(int64_t job, void **pData) {
@@ -125,76 +99,32 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
- int8_t status = SCH_GET_JOB_STATUS(pJob);
- if (status == JOB_TASK_STATUS_DROPPING) {
- SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
- schReleaseJob(job);
- SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
- }
-
- if (!SCH_JOB_NEED_FETCH(pJob)) {
- SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
- schReleaseJob(job);
- SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
- SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
- schReleaseJob(job);
- SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
- SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
- SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
- } else if (status == JOB_TASK_STATUS_SUCCEED) {
- SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
- goto _return;
- } else if (status == JOB_TASK_STATUS_PARTIAL_SUCCEED) {
- if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) {
- SCH_ERR_JRET(schFetchFromRemote(pJob));
- tsem_wait(&pJob->rspSem);
- }
- } else {
- SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
- SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
- }
+ pJob->attr.syncSchedule = true;
+ pJob->userRes.fetchRes = pData;
+ code = schFetchRows(pJob);
- status = SCH_GET_JOB_STATUS(pJob);
-
- if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
- SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
- SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
- }
-
- if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
- SCH_ERR_JRET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
- }
+ schReleaseJob(job);
- while (true) {
- *pData = atomic_load_ptr(&pJob->resData);
- if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) {
- continue;
- }
+ SCH_RET(code);
+}
- break;
+int32_t schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param) {
+ if (NULL == fp || NULL == param) {
+ SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
- if (NULL == *pData) {
- SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, sizeof(SRetrieveTableRsp));
- if (rsp) {
- rsp->completed = 1;
- }
-
- *pData = rsp;
- SCH_JOB_DLOG("empty res and set query complete, code:%x", code);
+ int32_t code = 0;
+ SSchJob *pJob = schAcquireJob(job);
+ if (NULL == pJob) {
+ qError("acquire job from jobRef list failed, may be dropped, refId:%" PRIx64, job);
+ SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
- SCH_JOB_DLOG("fetch done, totalRows:%d, code:%s", pJob->resNumOfRows, tstrerror(code));
-
-_return:
-
- atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
+ pJob->attr.syncSchedule = false;
+ pJob->userRes.fetchFp = fp;
+ pJob->userRes.userParam = param;
+
+ code = schAsyncFetchRows(pJob);
schReleaseJob(job);
@@ -211,7 +141,7 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) {
if (pJob->status < JOB_TASK_STATUS_NOT_START || pJob->levelNum <= 0 || NULL == pJob->levels) {
qDebug("job not initialized or not executable job, refId:%" PRIx64, job);
- SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
+ SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
for (int32_t i = pJob->levelNum - 1; i >= 0; --i) {
@@ -225,7 +155,11 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) {
}
}
- return TSDB_CODE_SUCCESS;
+_return:
+
+ schReleaseJob(job);
+
+ SCH_RET(code);
}
int32_t scheduleCancelJob(int64_t job) {
diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp
index fc0e05aaf106fb11d8daa9be9a55e510aac58ff5..4bf114ad8febb30c4fac89a391f8e0bc3389a60c 100644
--- a/source/libs/scheduler/test/schedulerTests.cpp
+++ b/source/libs/scheduler/test/schedulerTests.cpp
@@ -79,7 +79,7 @@ void schtInitLogFile() {
tsAsyncLog = 0;
qDebugFlag = 159;
- strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsLogDir, TD_LOG_DIR_PATH);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@@ -87,6 +87,11 @@ void schtInitLogFile() {
}
+void schtQueryCb(SQueryResult* pResult, void* param, int32_t code) {
+ assert(TSDB_CODE_SUCCESS == code);
+ *(int32_t*)param = 1;
+}
+
void schtBuildQueryDag(SQueryPlan *dag) {
uint64_t qId = schtQueryId;
@@ -485,6 +490,7 @@ void* schtRunJobThread(void *aa) {
SHashObj *execTasks = NULL;
SDataBuf dataBuf = {0};
uint32_t jobFinished = 0;
+ int32_t queryDone = 0;
while (!schtTestStop) {
schtBuildQueryDag(&dag);
@@ -496,7 +502,8 @@ void* schtRunJobThread(void *aa) {
qnodeAddr.port = 6031;
taosArrayPush(qnodeList, &qnodeAddr);
- code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &queryJobRefId);
+ queryDone = 0;
+ code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &queryJobRefId, "select * from tb", 0, schtQueryCb, &queryDone);
assert(code == 0);
pJob = schAcquireJob(queryJobRefId);
@@ -535,27 +542,6 @@ void* schtRunJobThread(void *aa) {
pIter = taosHashIterate(execTasks, pIter);
}
-
- param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
- param->refId = queryJobRefId;
- param->queryId = pJob->queryId;
-
- pIter = taosHashIterate(execTasks, NULL);
- while (pIter) {
- SSchTask *task = (SSchTask *)pIter;
-
- param->taskId = task->taskId;
- SResReadyRsp rsp = {0};
- dataBuf.pData = &rsp;
- dataBuf.len = sizeof(rsp);
-
- code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
- assert(code == 0 || code);
-
- pIter = taosHashIterate(execTasks, pIter);
- }
-
-
param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
param->refId = queryJobRefId;
param->queryId = pJob->queryId;
@@ -576,24 +562,13 @@ void* schtRunJobThread(void *aa) {
}
- param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
- param->refId = queryJobRefId;
- param->queryId = pJob->queryId;
-
- pIter = taosHashIterate(execTasks, NULL);
- while (pIter) {
- SSchTask *task = (SSchTask *)pIter;
-
- param->taskId = task->taskId - 1;
- SResReadyRsp rsp = {0};
- dataBuf.pData = &rsp;
- dataBuf.len = sizeof(rsp);
-
- code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
- assert(code == 0 || code);
-
- pIter = taosHashIterate(execTasks, pIter);
- }
+ while (true) {
+ if (queryDone) {
+ break;
+ }
+
+ taosUsleep(10000);
+ }
atomic_store_32(&schtStartFetch, 1);
@@ -667,8 +642,9 @@ TEST(queryTest, normalCase) {
schtSetPlanToString();
schtSetExecNode();
schtSetAsyncSendMsgToServer();
-
- code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job);
+
+ int32_t queryDone = 0;
+ code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone);
ASSERT_EQ(code, 0);
@@ -685,17 +661,6 @@ TEST(queryTest, normalCase) {
pIter = taosHashIterate(pJob->execTasks, pIter);
}
- pIter = taosHashIterate(pJob->execTasks, NULL);
- while (pIter) {
- SSchTask *task = *(SSchTask **)pIter;
-
- SResReadyRsp rsp = {0};
- code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
- printf("code:%d", code);
- ASSERT_EQ(code, 0);
- pIter = taosHashIterate(pJob->execTasks, pIter);
- }
-
pIter = taosHashIterate(pJob->execTasks, NULL);
while (pIter) {
SSchTask *task = *(SSchTask **)pIter;
@@ -707,17 +672,14 @@ TEST(queryTest, normalCase) {
pIter = taosHashIterate(pJob->execTasks, pIter);
}
- pIter = taosHashIterate(pJob->execTasks, NULL);
- while (pIter) {
- SSchTask *task = *(SSchTask **)pIter;
-
- SResReadyRsp rsp = {0};
- code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
- ASSERT_EQ(code, 0);
-
- pIter = taosHashIterate(pJob->execTasks, pIter);
- }
+ while (true) {
+ if (queryDone) {
+ break;
+ }
+ taosUsleep(10000);
+ }
+
TdThreadAttr thattr;
taosThreadAttrInit(&thattr);
@@ -773,25 +735,15 @@ TEST(queryTest, readyFirstCase) {
schtSetPlanToString();
schtSetExecNode();
schtSetAsyncSendMsgToServer();
-
- code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job);
+
+ int32_t queryDone = 0;
+ code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone);
ASSERT_EQ(code, 0);
SSchJob *pJob = schAcquireJob(job);
-
- void *pIter = taosHashIterate(pJob->execTasks, NULL);
- while (pIter) {
- SSchTask *task = *(SSchTask **)pIter;
-
- SResReadyRsp rsp = {0};
- code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
- printf("code:%d", code);
- ASSERT_EQ(code, 0);
- pIter = taosHashIterate(pJob->execTasks, pIter);
- }
- pIter = taosHashIterate(pJob->execTasks, NULL);
+ void *pIter = taosHashIterate(pJob->execTasks, NULL);
while (pIter) {
SSchTask *task = *(SSchTask **)pIter;
@@ -802,17 +754,6 @@ TEST(queryTest, readyFirstCase) {
pIter = taosHashIterate(pJob->execTasks, pIter);
}
- pIter = taosHashIterate(pJob->execTasks, NULL);
- while (pIter) {
- SSchTask *task = *(SSchTask **)pIter;
-
- SResReadyRsp rsp = {0};
- code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
- ASSERT_EQ(code, 0);
-
- pIter = taosHashIterate(pJob->execTasks, pIter);
- }
-
pIter = taosHashIterate(pJob->execTasks, NULL);
while (pIter) {
SSchTask *task = *(SSchTask **)pIter;
@@ -824,6 +765,13 @@ TEST(queryTest, readyFirstCase) {
pIter = taosHashIterate(pJob->execTasks, pIter);
}
+ while (true) {
+ if (queryDone) {
+ break;
+ }
+
+ taosUsleep(10000);
+ }
TdThreadAttr thattr;
@@ -885,16 +833,17 @@ TEST(queryTest, flowCtrlCase) {
schtSetPlanToString();
schtSetExecNode();
schtSetAsyncSendMsgToServer();
-
- code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job);
+
+ int32_t queryDone = 0;
+ code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone);
ASSERT_EQ(code, 0);
SSchJob *pJob = schAcquireJob(job);
- bool queryDone = false;
+ bool qDone = false;
- while (!queryDone) {
+ while (!qDone) {
void *pIter = taosHashIterate(pJob->execTasks, NULL);
if (NULL == pIter) {
break;
@@ -909,13 +858,9 @@ TEST(queryTest, flowCtrlCase) {
SQueryTableRsp rsp = {0};
code = schHandleResponseMsg(pJob, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
- ASSERT_EQ(code, 0);
- } else if (task->lastMsgType == TDMT_VND_RES_READY) {
- SResReadyRsp rsp = {0};
- code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
ASSERT_EQ(code, 0);
} else {
- queryDone = true;
+ qDone = true;
break;
}
@@ -923,6 +868,13 @@ TEST(queryTest, flowCtrlCase) {
}
}
+ while (true) {
+ if (queryDone) {
+ break;
+ }
+
+ taosUsleep(10000);
+ }
TdThreadAttr thattr;
taosThreadAttrInit(&thattr);
diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c
index 0acec0e4e6102e0a5622abd166bd4c4025d36f69..7d406a71441faa4c8c8b15ed1b27164da6ed6f4e 100644
--- a/source/libs/stream/src/tstream.c
+++ b/source/libs/stream/src/tstream.c
@@ -35,6 +35,24 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) {
return (void*)buf;
}
+SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit) {
+ SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
+ if (pSubmitClone == NULL) {
+ return NULL;
+ }
+ streamDataSubmitRefInc(pSubmit);
+ memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit));
+ return pSubmitClone;
+}
+
+static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
+ SStreamDispatchReq req = {
+ .streamId = pTask->streamId,
+ .data = data,
+ };
+ return 0;
+}
+
static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
SStreamTaskExecReq req = {
.streamId = pTask->streamId,
@@ -59,7 +77,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
// TODO use general name rule of schemaless
- char ctbName[TSDB_TABLE_FNAME_LEN + 22];
+ char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0};
// all groupId must be the same in an array
SSDataBlock* pBlock = taosArrayGet(data, 0);
sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId);
@@ -141,13 +159,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
ASSERT(pSubmit->type == STREAM_INPUT__DATA_SUBMIT);
- qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK);
+ qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
} else if (pTask->inputType == STREAM_INPUT__DATA_BLOCK) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK);
SArray* blocks = pBlock->blocks;
- qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK);
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK, false);
}
// exec
@@ -158,109 +176,64 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
ASSERT(false);
}
if (output == NULL) break;
- taosArrayPush(pRes, output);
+ // TODO: do we need free memory?
+ SSDataBlock* outputCopy = createOneDataBlock(output, true);
+ taosArrayPush(pRes, outputCopy);
}
// destroy
if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
streamDataSubmitRefDec((SStreamDataSubmit*)data);
+ taosFreeQitem(data);
} else {
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
+ taosFreeQitem(data);
}
return 0;
}
+static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
+ while (1) {
+ void* data = NULL;
+ taosGetQitem(pTask->inputQAll, &data);
+ if (data == NULL) break;
+
+ streamTaskExecImpl(pTask, data, pRes);
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
+ qRes->type = STREAM_INPUT__DATA_BLOCK;
+ qRes->blocks = pRes;
+ taosWriteQitem(pTask->outputQ, qRes);
+ return taosArrayInit(0, sizeof(SSDataBlock));
+ }
+ }
+ return pRes;
+}
+
// TODO: handle version
int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
if (pRes == NULL) return -1;
while (1) {
int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
- void* exec = pTask->exec.executor;
if (execStatus == TASK_STATUS__IDLE) {
// first run, from qall, handle failure from last exec
- while (1) {
- void* data = NULL;
- taosGetQitem(pTask->inputQAll, &data);
- if (data == NULL) break;
-
- streamTaskExecImpl(pTask, data, pRes);
-
- taosFreeQitem(data);
-
- if (taosArrayGetSize(pRes) != 0) {
- SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
- resQ->type = STREAM_INPUT__DATA_BLOCK;
- resQ->blocks = pRes;
- taosWriteQitem(pTask->outputQ, resQ);
- pRes = taosArrayInit(0, sizeof(SSDataBlock));
- if (pRes == NULL) goto FAIL;
- }
- }
+ pRes = streamExecForQall(pTask, pRes);
+ if (pRes == NULL) goto FAIL;
+
// second run, from inputQ
taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
- while (1) {
- void* data = NULL;
- taosGetQitem(pTask->inputQAll, &data);
- if (data == NULL) break;
-
- streamTaskExecImpl(pTask, data, pRes);
-
- taosFreeQitem(data);
-
- if (taosArrayGetSize(pRes) != 0) {
- SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
- resQ->type = STREAM_INPUT__DATA_BLOCK;
- resQ->blocks = pRes;
- taosWriteQitem(pTask->outputQ, resQ);
- pRes = taosArrayInit(0, sizeof(SSDataBlock));
- if (pRes == NULL) goto FAIL;
- }
- }
- // set status closing
- atomic_store_8(&pTask->status, TASK_STATUS__CLOSING);
- // third run, make sure all inputQ is cleared
- taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
- while (1) {
- void* data = NULL;
- taosGetQitem(pTask->inputQAll, &data);
- if (data == NULL) break;
-
- streamTaskExecImpl(pTask, data, pRes);
-
- taosFreeQitem(data);
-
- if (taosArrayGetSize(pRes) != 0) {
- SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
- resQ->type = STREAM_INPUT__DATA_BLOCK;
- resQ->blocks = pRes;
- taosWriteQitem(pTask->outputQ, resQ);
- pRes = taosArrayInit(0, sizeof(SSDataBlock));
- if (pRes == NULL) goto FAIL;
- }
- }
+ pRes = streamExecForQall(pTask, pRes);
+ if (pRes == NULL) goto FAIL;
+
// set status closing
atomic_store_8(&pTask->status, TASK_STATUS__CLOSING);
- // third run, make sure all inputQ is cleared
+
+ // third run, make sure inputQ and qall are cleared
taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
- while (1) {
- void* data = NULL;
- taosGetQitem(pTask->inputQAll, &data);
- if (data == NULL) break;
-
- streamTaskExecImpl(pTask, data, pRes);
-
- taosFreeQitem(data);
-
- if (taosArrayGetSize(pRes) != 0) {
- SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
- resQ->type = STREAM_INPUT__DATA_BLOCK;
- resQ->blocks = pRes;
- taosWriteQitem(pTask->outputQ, resQ);
- pRes = taosArrayInit(0, sizeof(SSDataBlock));
- if (pRes == NULL) goto FAIL;
- }
- }
+ pRes = streamExecForQall(pTask, pRes);
+ if (pRes == NULL) goto FAIL;
atomic_store_8(&pTask->status, TASK_STATUS__IDLE);
break;
@@ -322,13 +295,13 @@ int32_t streamSink(SStreamTask* pTask, SMsgCb* pMsgCb) {
}
int32_t qType;
- if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) {
+ if (pTask->dispatchMsgType == TDMT_VND_TASK_DISPATCH || pTask->dispatchMsgType == TDMT_SND_TASK_DISPATCH) {
qType = FETCH_QUEUE;
- } else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||
- pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {
- qType = MERGE_QUEUE;
- } else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {
- qType = WRITE_QUEUE;
+ /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||*/
+ /*pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {*/
+ /*qType = MERGE_QUEUE;*/
+ /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {*/
+ /*qType = WRITE_QUEUE;*/
} else {
ASSERT(0);
}
@@ -451,6 +424,26 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp)
return 0;
}
+int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1;
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1;
+ tEndDecode(pDecoder);
+ return 0;
+}
+
int32_t tEncodeSStreamTaskExecReq(void** buf, const SStreamTaskExecReq* pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->streamId);
@@ -502,6 +495,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
if (pTask->sinkType == TASK_SINK__TABLE) {
if (tEncodeI64(pEncoder, pTask->tbSink.stbUid) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pTask->tbSink.stbFullName) < 0) return -1;
if (tEncodeSSchemaWrapper(pEncoder, pTask->tbSink.pSchemaWrapper) < 0) return -1;
} else if (pTask->sinkType == TASK_SINK__SMA) {
if (tEncodeI64(pEncoder, pTask->smaSink.smaId) < 0) return -1;
@@ -548,6 +542,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
if (pTask->sinkType == TASK_SINK__TABLE) {
if (tDecodeI64(pDecoder, &pTask->tbSink.stbUid) < 0) return -1;
+ if (tDecodeCStrTo(pDecoder, pTask->tbSink.stbFullName) < 0) return -1;
pTask->tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
if (pTask->tbSink.pSchemaWrapper == NULL) return -1;
if (tDecodeSSchemaWrapper(pDecoder, pTask->tbSink.pSchemaWrapper) < 0) return -1;
diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c
index d21dadfe559d3dca81b34e3f6ade817ab1278a3b..ada391b40a76af148e07789375a756a6590648b3 100644
--- a/source/libs/stream/src/tstreamUpdate.c
+++ b/source/libs/stream/src/tstreamUpdate.c
@@ -42,7 +42,7 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
}
static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) {
- if (count < pInfo->numSBFs - 1) {
+ if (count < pInfo->numSBFs) {
for (uint64_t i = 0; i < count; ++i) {
SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0);
tScalableBfDestroy(pTsSBFs);
@@ -72,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
return val;
}
-static int64_t adjustWatermark(int64_t interval, int64_t watermark) {
- if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) {
- watermark = MAX_NUM_SCALABLE_BF * interval;
- } else if (watermark < MIN_NUM_SCALABLE_BF * interval) {
- watermark = MIN_NUM_SCALABLE_BF * interval;
- }
+static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
+ if (watermark <= adjInterval) {
+ watermark = TMAX(originInt/adjInterval, 1) * adjInterval;
+ } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
+ watermark = MAX_NUM_SCALABLE_BF * adjInterval;
+ }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) {
+ watermark = MIN_NUM_SCALABLE_BF * adjInterval;
+ }*/ // Todo(liuyao) save window info to tdb
return watermark;
}
@@ -94,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
pInfo->pTsSBFs = NULL;
pInfo->minTS = -1;
pInfo->interval = adjustInterval(interval, precision);
- pInfo->watermark = adjustWatermark(pInfo->interval, watermark);
+ pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark);
uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval);
@@ -127,7 +129,10 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
if (pInfo->minTS < 0) {
pInfo->minTS = (TSKEY)(ts / pInfo->interval * pInfo->interval);
}
- uint64_t index = (uint64_t)((ts - pInfo->minTS) / pInfo->interval);
+ int64_t index = (int64_t)((ts - pInfo->minTS) / pInfo->interval);
+ if (index < 0) {
+ return NULL;
+ }
if (index >= pInfo->numSBFs) {
uint64_t count = index + 1 - pInfo->numSBFs;
windowSBfDelete(pInfo, count);
@@ -146,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
int32_t res = TSDB_CODE_FAILED;
uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
+ TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
+ if (ts < maxTs - pInfo->watermark) {
+ // this window has been closed.
+ return true;
+ }
+
SScalableBf *pSBf = getSBf(pInfo, ts);
// pSBf may be a null pointer
if (pSBf) {
res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY));
}
- TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
if (maxTs < ts) {
taosArraySet(pInfo->pTsBuckets, index, &ts);
return false;
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 9246041b815e401f1c7638e5cba07160048a36f4..4100aa021672602bd55738067febe80db7790e11 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -55,6 +55,8 @@ typedef struct SVotesRespond SVotesRespond;
typedef struct SSyncIndexMgr SSyncIndexMgr;
typedef struct SRaftCfg SRaftCfg;
typedef struct SSyncRespMgr SSyncRespMgr;
+typedef struct SSyncSnapshotSender SSyncSnapshotSender;
+typedef struct SSyncSnapshotReceiver SSyncSnapshotReceiver;
typedef struct SSyncNode {
// init by SSyncInfo
@@ -148,9 +150,11 @@ typedef struct SSyncNode {
SSyncRespMgr* pSyncRespMgr;
// restore state
- bool restoreFinish;
- //sem_t restoreSem;
- SSnapshot* pSnapshot;
+ // sem_t restoreSem;
+ bool restoreFinish;
+ SSnapshot* pSnapshot;
+ SSyncSnapshotSender* pSender;
+ SSyncSnapshotReceiver* pReceiver;
} SSyncNode;
@@ -182,7 +186,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
cJSON* syncNode2Json(const SSyncNode* pSyncNode);
char* syncNode2Str(const SSyncNode* pSyncNode);
char* syncNode2SimpleStr(const SSyncNode* pSyncNode);
-void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig);
+void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop);
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);
diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h
index bfc64cb7b6b02f4a693ccc82117f57c77bf7f82c..1061e8bdc4b248511eb3a580b76056cbc830f02b 100644
--- a/source/libs/sync/inc/syncRaftCfg.h
+++ b/source/libs/sync/inc/syncRaftCfg.h
@@ -27,10 +27,13 @@ extern "C" {
#include "syncInt.h"
#include "taosdef.h"
+#define CONFIG_FILE_LEN 1024
+
typedef struct SRaftCfg {
SSyncCfg cfg;
TdFilePtr pFile;
char path[TSDB_FILENAME_LEN * 2];
+ int8_t isStandBy;
} SRaftCfg;
SRaftCfg *raftCfgOpen(const char *path);
@@ -42,10 +45,12 @@ char * syncCfg2Str(SSyncCfg *pSyncCfg);
int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg);
int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg);
-cJSON *raftCfg2Json(SRaftCfg *pRaftCfg);
-char * raftCfg2Str(SRaftCfg *pRaftCfg);
+cJSON * raftCfg2Json(SRaftCfg *pRaftCfg);
+char * raftCfg2Str(SRaftCfg *pRaftCfg);
+int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg);
+int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg);
-int32_t syncCfgCreateFile(SSyncCfg *pCfg, const char *path);
+int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path);
// for debug ----------------------
void syncCfgPrint(SSyncCfg *pCfg);
diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h
index 7db62e14d597608f04fd313e597251ec2503f933..df5cd3f36c4138e608e70bd22972d54baff48a50 100644
--- a/source/libs/sync/inc/syncRaftLog.h
+++ b/source/libs/sync/inc/syncRaftLog.h
@@ -32,20 +32,21 @@ typedef struct SSyncLogStoreData {
SWal* pWal;
} SSyncLogStoreData;
-SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode);
-void logStoreDestory(SSyncLogStore* pLogStore);
-int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
-SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
-int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
-SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
-SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
-int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
-SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
-SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
-cJSON* logStore2Json(SSyncLogStore* pLogStore);
-char* logStore2Str(SSyncLogStore* pLogStore);
-cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore);
-char* logStoreSimple2Str(SSyncLogStore* pLogStore);
+SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode);
+void logStoreDestory(SSyncLogStore* pLogStore);
+cJSON* logStore2Json(SSyncLogStore* pLogStore);
+char* logStore2Str(SSyncLogStore* pLogStore);
+cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore);
+char* logStoreSimple2Str(SSyncLogStore* pLogStore);
+
+// SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
+// SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
+// SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
+// SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
+// int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
+// int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
+// int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
+// SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
// for debug
void logStorePrint(SSyncLogStore* pLogStore);
diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h
index fd2119ce659b553124aa9a310c3790b29363628c..43d1c0c0c38bc9836fdb9e3210f141af44376700 100644
--- a/source/libs/sync/inc/syncSnapshot.h
+++ b/source/libs/sync/inc/syncSnapshot.h
@@ -23,11 +23,42 @@ extern "C" {
#include
#include
#include
+#include "cJSON.h"
#include "syncInt.h"
#include "taosdef.h"
-int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
-int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
+typedef struct SSyncSnapshotSender {
+ int32_t sending;
+ int32_t received;
+ bool finish;
+ void * pCurrentBlock;
+ int32_t blockLen;
+ int64_t sendingMS;
+ SSnapshot *pSnapshot;
+ SSyncNode *pSyncNode;
+} SSyncSnapshotSender;
+
+SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode);
+void snapshotSenderDestroy(SSyncSnapshotSender *pSender);
+int32_t snapshotSend(SSyncSnapshotSender *pSender);
+cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender);
+char * snapshotSender2Str(SSyncSnapshotSender *pSender);
+
+typedef struct SSyncSnapshotReceiver {
+ bool start;
+ int32_t received;
+ int32_t progressIndex;
+ void * pCurrentBlock;
+ int32_t len;
+ SSnapshot *pSnapshot;
+ SSyncNode *pSyncNode;
+} SSyncSnapshotReceiver;
+
+SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode);
+void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver);
+int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver);
+cJSON * snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver);
+char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
#ifdef __cplusplus
}
diff --git a/source/libs/sync/inc/syncVoteMgr.h b/source/libs/sync/inc/syncVoteMgr.h
index 5bc240e9219a8bd1402683e1025ee15f32048e6b..716d2f620c09bdf0b842f7661e5f238d2821644f 100644
--- a/source/libs/sync/inc/syncVoteMgr.h
+++ b/source/libs/sync/inc/syncVoteMgr.h
@@ -42,6 +42,7 @@ typedef struct SVotesGranted {
SVotesGranted *voteGrantedCreate(SSyncNode *pSyncNode);
void voteGrantedDestroy(SVotesGranted *pVotesGranted);
+void voteGrantedUpdate(SVotesGranted *pVotesGranted, SSyncNode *pSyncNode);
bool voteGrantedMajority(SVotesGranted *pVotesGranted);
void voteGrantedVote(SVotesGranted *pVotesGranted, SyncRequestVoteReply *pMsg);
void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term);
@@ -65,6 +66,7 @@ typedef struct SVotesRespond {
SVotesRespond *votesRespondCreate(SSyncNode *pSyncNode);
void votesRespondDestory(SVotesRespond *pVotesRespond);
+void votesRespondUpdate(SVotesRespond *pVotesRespond, SSyncNode *pSyncNode);
bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId);
void votesRespondAdd(SVotesRespond *pVotesRespond, const SyncRequestVoteReply *pMsg);
void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term);
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index fa735e71c029e22d67e7b2681ff1fc7144527061..3afe7b15e213c0da3760c7a8ef1f313d145cd31f 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -89,7 +89,7 @@
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm);
syncAppendEntriesLog2(logBuf, pMsg);
@@ -107,7 +107,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SyncTerm localPreLogTerm = 0;
if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) {
- SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex);
+ SSyncRaftEntry* pEntry = ths->pLogStore->getEntry(ths->pLogStore, pMsg->prevLogIndex);
assert(pEntry != NULL);
localPreLogTerm = pEntry->term;
syncEntryDestory(pEntry);
@@ -175,7 +175,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
bool conflict = false;
SyncIndex extraIndex = pMsg->prevLogIndex + 1;
- SSyncRaftEntry* pExtraEntry = logStoreGetEntry(ths->pLogStore, extraIndex);
+ SSyncRaftEntry* pExtraEntry = ths->pLogStore->getEntry(ths->pLogStore, extraIndex);
assert(pExtraEntry != NULL);
SSyncRaftEntry* pAppendEntry = syncEntryDeserialize(pMsg->data, pMsg->dataLen);
@@ -197,7 +197,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// notice! reverse roll back!
for (SyncIndex index = delEnd; index >= delBegin; --index) {
if (ths->pFsm->FpRollBackCb != NULL) {
- SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index);
+ SSyncRaftEntry* pRollBackEntry = ths->pLogStore->getEntry(ths->pLogStore, index);
assert(pRollBackEntry != NULL);
// if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
@@ -333,7 +333,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
cbMeta.seqNum = pEntry->seqNum;
cbMeta.term = pEntry->term;
cbMeta.currentTerm = ths->pRaftStore->currentTerm;
- ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, cbMeta);
+ cbMeta.flag = 0x11;
bool needExecute = true;
if (ths->pSnapshot != NULL && cbMeta.index <= ths->pSnapshot->lastApplyIndex) {
@@ -347,23 +347,64 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// config change
if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
+ SSyncCfg oldSyncCfg = ths->pRaftCfg->cfg;
+
SSyncCfg newSyncCfg;
int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
ASSERT(ret == 0);
- syncNodeUpdateConfig(ths, &newSyncCfg);
- if (ths->state == TAOS_SYNC_STATE_LEADER) {
- syncNodeBecomeLeader(ths);
- } else {
- syncNodeBecomeFollower(ths);
+ // update new config myIndex
+ bool hit = false;
+ for (int i = 0; i < newSyncCfg.replicaNum; ++i) {
+ if (strcmp(ths->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 &&
+ ths->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) {
+ newSyncCfg.myIndex = i;
+ hit = true;
+ break;
+ }
+ }
+
+ SReConfigCbMeta cbMeta = {0};
+ bool isDrop;
+
+ // I am in newConfig
+ if (hit) {
+ syncNodeUpdateConfig(ths, &newSyncCfg, &isDrop);
+
+ // change isStandBy to normal
+ if (!isDrop) {
+ if (ths->state == TAOS_SYNC_STATE_LEADER) {
+ syncNodeBecomeLeader(ths);
+ } else {
+ syncNodeBecomeFollower(ths);
+ }
+ }
+
+ char* sOld = syncCfg2Str(&oldSyncCfg);
+ char* sNew = syncCfg2Str(&newSyncCfg);
+ sInfo("==config change== 0x11 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop);
+ taosMemoryFree(sOld);
+ taosMemoryFree(sNew);
+ }
+
+ // always call FpReConfigCb
+ if (ths->pFsm->FpReConfigCb != NULL) {
+ cbMeta.code = 0;
+ cbMeta.currentTerm = ths->pRaftStore->currentTerm;
+ cbMeta.index = pEntry->index;
+ cbMeta.term = pEntry->term;
+ cbMeta.oldCfg = oldSyncCfg;
+ cbMeta.flag = 0x11;
+ cbMeta.isDrop = isDrop;
+ ths->pFsm->FpReConfigCb(ths->pFsm, newSyncCfg, cbMeta);
}
}
// restore finish
if (pEntry->index == ths->pLogStore->getLastIndex(ths->pLogStore)) {
if (ths->restoreFinish == false) {
- if (ths->pFsm->FpRestoreFinish != NULL) {
- ths->pFsm->FpRestoreFinish(ths->pFsm);
+ if (ths->pFsm->FpRestoreFinishCb != NULL) {
+ ths->pFsm->FpRestoreFinishCb(ths->pFsm);
}
ths->restoreFinish = true;
sInfo("==syncNodeOnAppendEntriesCb== restoreFinish set true %p vgId:%d", ths, ths->vgId);
diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c
index 77d85e29151205edd31deed1c40f5dbffca90849..4e6d870e194a223bd35d5671dc17532bd5e8626e 100644
--- a/source/libs/sync/src/syncAppendEntriesReply.c
+++ b/source/libs/sync/src/syncAppendEntriesReply.c
@@ -38,7 +38,7 @@
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm);
syncAppendEntriesReplyLog2(logBuf, pMsg);
@@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term,
ths->pRaftStore->currentTerm);
syncNodeLog2(logBuf, ths);
diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c
index 18c6f8930ac73f2bdc5d9e3d860f8b2f8dec0188..4a1a40a2d7ddd47d9d6ec30a683f284dacc70fa7 100644
--- a/source/libs/sync/src/syncCommit.c
+++ b/source/libs/sync/src/syncCommit.c
@@ -111,6 +111,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
cbMeta.seqNum = pEntry->seqNum;
cbMeta.term = pEntry->term;
cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm;
+ cbMeta.flag = 0x1;
bool needExecute = true;
if (pSyncNode->pSnapshot != NULL && cbMeta.index <= pSyncNode->pSnapshot->lastApplyIndex) {
@@ -124,23 +125,63 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
// config change
if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
+ SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg;
+
SSyncCfg newSyncCfg;
int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
ASSERT(ret == 0);
- syncNodeUpdateConfig(pSyncNode, &newSyncCfg);
+ // update new config myIndex
+ bool hit = false;
+ for (int i = 0; i < newSyncCfg.replicaNum; ++i) {
+ if (strcmp(pSyncNode->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 &&
+ pSyncNode->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) {
+ newSyncCfg.myIndex = i;
+ hit = true;
+ break;
+ }
+ }
+
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
- syncNodeBecomeLeader(pSyncNode);
- } else {
- syncNodeBecomeFollower(pSyncNode);
+ ASSERT(hit == true);
+ }
+
+ bool isDrop;
+ syncNodeUpdateConfig(pSyncNode, &newSyncCfg, &isDrop);
+
+ // change isStandBy to normal
+ if (!isDrop) {
+ if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
+ syncNodeBecomeLeader(pSyncNode);
+ } else {
+ syncNodeBecomeFollower(pSyncNode);
+ }
+ }
+
+ char* sOld = syncCfg2Str(&oldSyncCfg);
+ char* sNew = syncCfg2Str(&newSyncCfg);
+ sInfo("==config change== 0x1 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop);
+ taosMemoryFree(sOld);
+ taosMemoryFree(sNew);
+
+ if (pSyncNode->pFsm->FpReConfigCb != NULL) {
+ SReConfigCbMeta cbMeta = {0};
+ cbMeta.code = 0;
+ cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm;
+ cbMeta.index = pEntry->index;
+ cbMeta.term = pEntry->term;
+ cbMeta.oldCfg = oldSyncCfg;
+ cbMeta.flag = 0x1;
+ cbMeta.isDrop = isDrop;
+ pSyncNode->pFsm->FpReConfigCb(pSyncNode->pFsm, newSyncCfg, cbMeta);
}
}
// restore finish
if (pEntry->index == pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore)) {
if (pSyncNode->restoreFinish == false) {
- if (pSyncNode->pFsm->FpRestoreFinish != NULL) {
- pSyncNode->pFsm->FpRestoreFinish(pSyncNode->pFsm);
+ if (pSyncNode->pFsm->FpRestoreFinishCb != NULL) {
+ pSyncNode->pFsm->FpRestoreFinishCb(pSyncNode->pFsm);
}
pSyncNode->restoreFinish = true;
sInfo("==syncMaybeAdvanceCommitIndex== restoreFinish set true %p vgId:%d", pSyncNode, pSyncNode->vgId);
diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c
index 39760c32e83eddc060aeb9669fb252eaca816e54..e30a39e6342c4b7df77ee9cfdbe4f29333e36c16 100644
--- a/source/libs/sync/src/syncIO.c
+++ b/source/libs/sync/src/syncIO.c
@@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
{
syncUtilMsgNtoH(pMsg->pCont);
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port);
syncRpcMsgLog2(logBuf, pMsg);
@@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg);
SRpcMsg *pTemp;
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 5809cedb9038758744d20b8e6ee2270bd0720e47..4d556d21dde7e56c2048cc314f86ad0a8949bc37 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -60,7 +60,9 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId,
return;
}
}
- assert(0);
+
+ // maybe config change
+ // assert(0);
}
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
@@ -74,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf
}
cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pSyncIndexMgr != NULL) {
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 821ed364e840f268f1c0da76f95536a6b2342676..66806dbd0c8e6b40ac9331884ff0447263f0eaaa 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -100,6 +100,21 @@ void syncStart(int64_t rid) {
if (pSyncNode == NULL) {
return;
}
+
+ if (pSyncNode->pRaftCfg->isStandBy) {
+ syncNodeStartStandBy(pSyncNode);
+ } else {
+ syncNodeStart(pSyncNode);
+ }
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+}
+
+void syncStartNormal(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return;
+ }
syncNodeStart(pSyncNode);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
@@ -126,9 +141,38 @@ void syncStop(int64_t rid) {
taosRemoveRef(tsNodeRefId, rid);
}
+int32_t syncSetStandby(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return -1;
+ }
+
+ if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return -1;
+ }
+
+ // state change
+ pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER;
+ syncNodeStopHeartbeatTimer(pSyncNode);
+
+ // reset elect timer, long enough
+ int32_t electMS = TIMER_MAX_MS;
+ int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS);
+ ASSERT(ret == 0);
+
+ pSyncNode->pRaftCfg->isStandBy = 1;
+ raftCfgPersist(pSyncNode->pRaftCfg);
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return 0;
+}
+
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg) {
int32_t ret = 0;
char* configChange = syncCfg2Str((SSyncCfg*)pSyncCfg);
+ sInfo("==syncReconfig== newconfig:%s", configChange);
+
SRpcMsg rpcMsg = {0};
rpcMsg.msgType = TDMT_VND_SYNC_CONFIG_CHANGE;
rpcMsg.info.noResp = 1;
@@ -157,6 +201,18 @@ ESyncState syncGetMyRole(int64_t rid) {
return state;
}
+bool syncIsRestoreFinish(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return false;
+ }
+ assert(rid == pSyncNode->rid);
+ bool b = pSyncNode->restoreFinish;
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return b;
+}
+
const char* syncGetMyRoleStr(int64_t rid) {
const char* s = syncUtilState2String(syncGetMyRole(rid));
return s;
@@ -306,10 +362,9 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
sTrace("syncPropose msgType:%d ", pMsg->msgType);
int32_t ret = TAOS_SYNC_PROPOSE_SUCCESS;
- SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
- if (pSyncNode == NULL) {
- return TAOS_SYNC_PROPOSE_OTHER_ERROR;
- }
+ SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) return TAOS_SYNC_PROPOSE_OTHER_ERROR;
+
assert(rid == pSyncNode->rid);
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
@@ -321,14 +376,13 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
SyncClientRequest* pSyncMsg = syncClientRequestBuild2(pMsg, seqNum, isWeak, pSyncNode->vgId);
SRpcMsg rpcMsg;
syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg);
- if (pSyncNode->FpEqMsg != NULL) {
- pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
+
+ if (pSyncNode->FpEqMsg != NULL && (*pSyncNode->FpEqMsg)(pSyncNode->msgcb, &rpcMsg) == 0) {
+ ret = TAOS_SYNC_PROPOSE_SUCCESS;
} else {
sTrace("syncPropose pSyncNode->FpEqMsg is NULL");
}
syncClientRequestDestroy(pSyncMsg);
- ret = TAOS_SYNC_PROPOSE_SUCCESS;
-
} else {
sTrace("syncPropose not leader, %s", syncUtilState2String(pSyncNode->state));
ret = TAOS_SYNC_PROPOSE_NOT_LEADER;
@@ -339,7 +393,9 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
}
// open/close --------------
-SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
+SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
+ SSyncInfo* pSyncInfo = (SSyncInfo*)pOldSyncInfo;
+
SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
assert(pSyncNode != NULL);
memset(pSyncNode, 0, sizeof(SSyncNode));
@@ -351,11 +407,25 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr());
return NULL;
}
+ }
+ snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
+ if (!taosCheckExistFile(pSyncNode->configPath)) {
// create raft config file
- snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
- ret = syncCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncNode->configPath);
+ ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncInfo->isStandBy, pSyncNode->configPath);
assert(ret == 0);
+
+ } else {
+ // update syncCfg by raft_config.json
+ pSyncNode->pRaftCfg = raftCfgOpen(pSyncNode->configPath);
+ assert(pSyncNode->pRaftCfg != NULL);
+ pSyncInfo->syncCfg = pSyncNode->pRaftCfg->cfg;
+
+ char* seralized = raftCfg2Str(pSyncNode->pRaftCfg);
+ sInfo("syncNodeOpen update config :%s", seralized);
+ taosMemoryFree(seralized);
+
+ raftCfgClose(pSyncNode->pRaftCfg);
}
// init by SSyncInfo
@@ -499,7 +569,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
pSyncNode->pSnapshot = taosMemoryMalloc(sizeof(SSnapshot));
pSyncNode->pFsm->FpGetSnapshot(pSyncNode->pFsm, pSyncNode->pSnapshot);
}
- //tsem_init(&(pSyncNode->restoreSem), 0, 0);
+ // tsem_init(&(pSyncNode->restoreSem), 0, 0);
// start in syncNodeStart
// start raft
@@ -596,7 +666,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
taosMemoryFree(pSyncNode->pSnapshot);
}
- //tsem_destroy(&pSyncNode->restoreSem);
+ // tsem_destroy(&pSyncNode->restoreSem);
// free memory in syncFreeNode
// taosMemoryFree(pSyncNode);
@@ -745,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
}
cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pSyncNode != NULL) {
@@ -900,19 +970,20 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) {
int len = 256;
char* s = (char*)taosMemoryMalloc(len);
snprintf(s, len,
- "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, electTimerLogicClock:%lu, "
+ "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, isStandBy:%d, "
+ "electTimerLogicClock:%lu, "
"electTimerLogicClockUser:%lu, "
"electTimerMS:%d",
pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, pSyncNode->state,
- syncUtilState2String(pSyncNode->state), pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser,
- pSyncNode->electTimerMS);
+ syncUtilState2String(pSyncNode->state), pSyncNode->pRaftCfg->isStandBy, pSyncNode->electTimerLogicClock,
+ pSyncNode->electTimerLogicClockUser, pSyncNode->electTimerMS);
return s;
}
-void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) {
+void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop) {
+ SSyncCfg oldConfig = pSyncNode->pRaftCfg->cfg;
pSyncNode->pRaftCfg->cfg = *newConfig;
- int32_t ret = raftCfgPersist(pSyncNode->pRaftCfg);
- ASSERT(ret == 0);
+ int32_t ret = 0;
// init internal
pSyncNode->myNodeInfo = pSyncNode->pRaftCfg->cfg.nodeInfo[pSyncNode->pRaftCfg->cfg.myIndex];
@@ -939,7 +1010,34 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) {
syncIndexMgrUpdate(pSyncNode->pNextIndex, pSyncNode);
syncIndexMgrUpdate(pSyncNode->pMatchIndex, pSyncNode);
+ voteGrantedUpdate(pSyncNode->pVotesGranted, pSyncNode);
+ votesRespondUpdate(pSyncNode->pVotesRespond, pSyncNode);
+
+ // isDrop
+ *isDrop = true;
+ bool IamInOld, IamInNew;
+ for (int i = 0; i < oldConfig.replicaNum; ++i) {
+ if (strcmp((oldConfig.nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 &&
+ (oldConfig.nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) {
+ *isDrop = false;
+ break;
+ }
+ }
+
+ for (int i = 0; i < newConfig->replicaNum; ++i) {
+ if (strcmp((newConfig->nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 &&
+ (newConfig->nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) {
+ *isDrop = false;
+ break;
+ }
+ }
+
+ if (!(*isDrop)) {
+ // change isStandBy to normal
+ pSyncNode->pRaftCfg->isStandBy = 0;
+ }
+ raftCfgPersist(pSyncNode->pRaftCfg);
syncNodeLog2("==syncNodeUpdateConfig==", pSyncNode);
}
@@ -1240,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
// on message ----
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
// log state
- char logBuf[1024];
+ char logBuf[1024] = {0};
snprintf(logBuf, sizeof(logBuf),
"==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, "
"electTimerLogicClockUser:%lu, electTimerMS:%d",
@@ -1352,12 +1450,12 @@ static void syncFreeNode(void* param) {
const char* syncStr(ESyncState state) {
switch (state) {
case TAOS_SYNC_STATE_FOLLOWER:
- return "FOLLOWER";
+ return "follower";
case TAOS_SYNC_STATE_CANDIDATE:
- return "CANDIDATE";
+ return "candidate";
case TAOS_SYNC_STATE_LEADER:
- return "LEADER";
+ return "leader";
default:
- return "ERROR";
+ return "error";
}
}
diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c
index 04a989439a678dbd95b80a3a3a7ef1d65897b636..fae069f2e6b13c0073c6309f889dc7f8f92c8c6e 100644
--- a/source/libs/sync/src/syncMessage.c
+++ b/source/libs/sync/src/syncMessage.c
@@ -210,11 +210,12 @@ void syncTimeoutFromRpcMsg(const SRpcMsg* pRpcMsg, SyncTimeout* pMsg) {
SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncTimeout* pMsg = syncTimeoutDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -436,11 +437,12 @@ void syncPingFromRpcMsg(const SRpcMsg* pRpcMsg, SyncPing* pMsg) {
SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncPing* pMsg = syncPingDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncPing2Json(const SyncPing* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -454,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -469,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -695,11 +697,12 @@ void syncPingReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncPingReply* pMsg) {
SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncPingReply* pMsg = syncPingReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -713,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -728,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -861,11 +864,12 @@ void syncClientRequestFromRpcMsg(const SRpcMsg* pRpcMsg, SyncClientRequest* pMsg
// step 3. RpcMsg => SyncClientRequest, from queue
SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncClientRequest* pMsg = syncClientRequestDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -986,11 +990,12 @@ void syncRequestVoteFromRpcMsg(const SRpcMsg* pRpcMsg, SyncRequestVote* pMsg) {
SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncRequestVote* pMsg = syncRequestVoteDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1004,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1018,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1134,11 +1139,12 @@ void syncRequestVoteReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncRequestVoteReply
SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncRequestVoteReply* pMsg = syncRequestVoteReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1152,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1166,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1281,11 +1287,12 @@ void syncAppendEntriesFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntries* pMsg
SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncAppendEntries* pMsg = syncAppendEntriesDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1299,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1314,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1444,11 +1451,12 @@ void syncAppendEntriesReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesR
SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncAppendEntriesReply* pMsg = syncAppendEntriesReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ assert(pMsg != NULL);
return pMsg;
}
cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1462,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1477,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1616,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc
}
cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c
index dc540424ec48ae1489a48f27c8bcbc168e09f83a..3e1931e2c37e626b7ab049299a9b83b8a78a2cf1 100644
--- a/source/libs/sync/src/syncRaftCfg.c
+++ b/source/libs/sync/src/syncRaftCfg.c
@@ -28,11 +28,11 @@ SRaftCfg *raftCfgOpen(const char *path) {
taosLSeekFile(pCfg->pFile, 0, SEEK_SET);
- char buf[1024];
+ char buf[1024] = {0};
int len = taosReadFile(pCfg->pFile, buf, sizeof(buf));
assert(len > 0);
- int32_t ret = syncCfgFromStr(buf, &(pCfg->cfg));
+ int32_t ret = raftCfgFromStr(buf, pCfg);
assert(ret == 0);
return pCfg;
@@ -48,18 +48,26 @@ int32_t raftCfgClose(SRaftCfg *pRaftCfg) {
int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
assert(pRaftCfg != NULL);
- char *s = syncCfg2Str(&(pRaftCfg->cfg));
+ char *s = raftCfg2Str(pRaftCfg);
taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET);
- int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
- assert(ret == strlen(s) + 1);
- taosMemoryFree(s);
+ char buf[CONFIG_FILE_LEN] = {0};
+ memset(buf, 0, sizeof(buf));
+ ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
+ snprintf(buf, sizeof(buf), "%s", s);
+ int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf));
+ assert(ret == sizeof(buf));
+
+ // int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
+ // assert(ret == strlen(s) + 1);
+
+ taosMemoryFree(s);
taosFsyncFile(pRaftCfg->pFile);
return 0;
}
cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pSyncCfg != NULL) {
@@ -76,9 +84,12 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
}
}
+ return pRoot;
+ /*
cJSON *pJson = cJSON_CreateObject();
cJSON_AddItemToObject(pJson, "SSyncCfg", pRoot);
return pJson;
+ */
}
char *syncCfg2Str(SSyncCfg *pSyncCfg) {
@@ -90,7 +101,8 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg) {
memset(pSyncCfg, 0, sizeof(SSyncCfg));
- cJSON *pJson = cJSON_GetObjectItem(pRoot, "SSyncCfg");
+ // cJSON *pJson = cJSON_GetObjectItem(pRoot, "SSyncCfg");
+ const cJSON *pJson = pRoot;
cJSON *pReplicaNum = cJSON_GetObjectItem(pJson, "replicaNum");
assert(cJSON_IsNumber(pReplicaNum));
@@ -133,30 +145,73 @@ int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg) {
}
cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
- cJSON *pJson = syncCfg2Json(&(pRaftCfg->cfg));
+ cJSON *pRoot = cJSON_CreateObject();
+ cJSON_AddItemToObject(pRoot, "SSyncCfg", syncCfg2Json(&(pRaftCfg->cfg)));
+ cJSON_AddNumberToObject(pRoot, "isStandBy", pRaftCfg->isStandBy);
+
+ cJSON *pJson = cJSON_CreateObject();
+ cJSON_AddItemToObject(pJson, "RaftCfg", pRoot);
return pJson;
}
char *raftCfg2Str(SRaftCfg *pRaftCfg) {
- char *s = syncCfg2Str(&(pRaftCfg->cfg));
- return s;
+ cJSON *pJson = raftCfg2Json(pRaftCfg);
+ char * serialized = cJSON_Print(pJson);
+ cJSON_Delete(pJson);
+ return serialized;
}
-int32_t syncCfgCreateFile(SSyncCfg *pCfg, const char *path) {
+int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) {
assert(pCfg != NULL);
TdFilePtr pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE);
assert(pFile != NULL);
- char * s = syncCfg2Str(pCfg);
- int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
- assert(ret == strlen(s) + 1);
+ SRaftCfg raftCfg;
+ raftCfg.cfg = *pCfg;
+ raftCfg.isStandBy = isStandBy;
+ char *s = raftCfg2Str(&raftCfg);
+
+ char buf[CONFIG_FILE_LEN] = {0};
+ memset(buf, 0, sizeof(buf));
+ ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
+ snprintf(buf, sizeof(buf), "%s", s);
+ int64_t ret = taosWriteFile(pFile, buf, sizeof(buf));
+ assert(ret == sizeof(buf));
+
+ // int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
+ // assert(ret == strlen(s) + 1);
taosMemoryFree(s);
taosCloseFile(&pFile);
return 0;
}
+int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
+ // memset(pRaftCfg, 0, sizeof(SRaftCfg));
+ cJSON *pJson = cJSON_GetObjectItem(pRoot, "RaftCfg");
+
+ cJSON *pJsonIsStandBy = cJSON_GetObjectItem(pJson, "isStandBy");
+ pRaftCfg->isStandBy = cJSON_GetNumberValue(pJsonIsStandBy);
+
+ cJSON * pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
+ int32_t code = syncCfgFromJson(pJsonSyncCfg, &(pRaftCfg->cfg));
+ ASSERT(code == 0);
+
+ return code;
+}
+
+int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg) {
+ cJSON *pRoot = cJSON_Parse(s);
+ assert(pRoot != NULL);
+
+ int32_t ret = raftCfgFromJson(pRoot, pRaftCfg);
+ assert(ret == 0);
+
+ cJSON_Delete(pRoot);
+ return 0;
+}
+
// for debug ----------------------
void syncCfgPrint(SSyncCfg *pCfg) {
char *serialized = syncCfg2Str(pCfg);
diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c
index 21ee35eaf9c276636d754048095d6b2d44f18796..8755f71654382f3913a3c81b6ee1e9b6e91dbb69 100644
--- a/source/libs/sync/src/syncRaftEntry.c
+++ b/source/libs/sync/src/syncRaftEntry.c
@@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) {
}
cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pEntry != NULL) {
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 07a9397a580332f427ab3b206359de3ec0accf40..a6397f8cba24694d6f36847af5e877c72bd1a920 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -16,6 +16,15 @@
#include "syncRaftLog.h"
#include "wal.h"
+static SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
+static SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
+static SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
+static SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
+static int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
+static int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
+static int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
+static SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
+
SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore));
assert(pLogStore != NULL);
@@ -78,7 +87,9 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) {
SWalReadHandle* pWalHandle = walOpenReadHandle(pWal);
- int32_t code = walReadWithHandle(pWalHandle, index);
+ ASSERT(pWalHandle != NULL);
+
+ int32_t code = walReadWithHandle(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
@@ -179,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) {
}
cJSON* logStore2Json(SSyncLogStore* pLogStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
cJSON* pRoot = cJSON_CreateObject();
@@ -216,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) {
}
cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
cJSON* pRoot = cJSON_CreateObject();
diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c
index d6f2e91de7739efd535a23427168180fe2aabc86..52e815292607d69e7d364f6a11c31c184f07914a 100644
--- a/source/libs/sync/src/syncRaftStore.c
+++ b/source/libs/sync/src/syncRaftStore.c
@@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) {
memset(pRaftStore, 0, sizeof(*pRaftStore));
snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path);
- char storeBuf[RAFT_STORE_BLOCK_SIZE];
+ char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
memset(storeBuf, 0, sizeof(storeBuf));
if (!raftStoreFileExist(pRaftStore->path)) {
@@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) {
assert(pRaftStore != NULL);
int32_t ret;
- char storeBuf[RAFT_STORE_BLOCK_SIZE];
+ char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
assert(ret == 0);
@@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON *pRoot = cJSON_CreateObject();
- char u64Buf[128];
+ char u64Buf[128] = {0};
snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm);
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
@@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
uint64_t u64 = pRaftStore->voteFor.addr;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pRoot, "addr_host", host);
@@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) {
int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; }
cJSON *raftStore2Json(SRaftStore *pRaftStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pRaftStore != NULL) {
@@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
{
uint64_t u64 = pRaftStore->voteFor.addr;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pVoteFor, "addr_host", host);
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index 2fdb8a0e177f0f985c40b136ea29ce9f968c0fad..d17e64d936737ba7ea0dc5f33db407cfdf4bf205 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -75,7 +75,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
// SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex);
SyncAppendEntries* pMsg = NULL;
- SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex);
+ SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, nextIndex);
if (pEntry != NULL) {
pMsg = syncAppendEntriesBuild(pEntry->bytes, pSyncNode->vgId);
assert(pMsg != NULL);
diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c
index 619a1546a96ad9642272b7227466d99be833be9f..265677129213c6887012ee72da9066aad25adc09 100644
--- a/source/libs/sync/src/syncRequestVote.c
+++ b/source/libs/sync/src/syncRequestVote.c
@@ -44,7 +44,7 @@
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm);
syncRequestVoteLog2(logBuf, pMsg);
diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c
index a6348dff50132f860ada45e9cc3bddfabd6d62d0..75236aee2bcec1ca9c7ae07165c427edbc1e0a04 100644
--- a/source/libs/sync/src/syncRequestVoteReply.c
+++ b/source/libs/sync/src/syncRequestVoteReply.c
@@ -39,7 +39,7 @@
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm);
syncRequestVoteReplyLog2(logBuf, pMsg);
@@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term,
ths->pRaftStore->currentTerm);
syncNodePrint2(logBuf, ths);
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 42b2bd993b515789934268f4400fece4f040f7c5..ccb0e6071b82e43bd23a9334e294a421a336e57b 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -15,6 +15,22 @@
#include "syncSnapshot.h"
-int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; }
+SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode) { return NULL; }
-int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; }
\ No newline at end of file
+void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {}
+
+int32_t snapshotSend(SSyncSnapshotSender *pSender) { return 0; }
+
+cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { return NULL; }
+
+char *snapshotSender2Str(SSyncSnapshotSender *pSender) { return NULL; }
+
+SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode) { return NULL; }
+
+void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {}
+
+int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver) { return 0; }
+
+cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { return NULL; }
+
+char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { return NULL; }
diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c
index cf045a692611a64e75c2f4c595180f1e324e75f9..d754acd9f831ac18ce7e28b5ef2fda4b2d8650db 100644
--- a/source/libs/sync/src/syncUtil.c
+++ b/source/libs/sync/src/syncUtil.c
@@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) {
}
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
- char host[TSDB_FQDN_LEN];
+ char host[TSDB_FQDN_LEN] = {0};
uint16_t port;
syncUtilU642Addr(raftId->addr, host, sizeof(host), &port);
@@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) {
uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn);
assert(ipv4 != 0xFFFFFFFF);
- char ipbuf[128];
+ char ipbuf[128] = {0};
tinet_ntoa(ipbuf, ipv4);
raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort);
raftId->vgId = vgId;
@@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) {
int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; }
cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn);
@@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
}
cJSON* syncUtilRaftId2Json(const SRaftId* p) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr);
cJSON_AddStringToObject(pRoot, "addr", u64buf);
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(p->addr, host, sizeof(host), &port);
cJSON_AddStringToObject(pRoot, "host", host);
diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c
index 733dfd05b6deb88ed08df78858f358822bebbda7..528c2f26c85c17f33f0a783def69ef9f26798b1b 100644
--- a/source/libs/sync/src/syncVoteMgr.c
+++ b/source/libs/sync/src/syncVoteMgr.c
@@ -45,6 +45,17 @@ void voteGrantedDestroy(SVotesGranted *pVotesGranted) {
}
}
+void voteGrantedUpdate(SVotesGranted *pVotesGranted, SSyncNode *pSyncNode) {
+ pVotesGranted->replicas = &(pSyncNode->replicasId);
+ pVotesGranted->replicaNum = pSyncNode->replicaNum;
+ voteGrantedClearVotes(pVotesGranted);
+
+ pVotesGranted->term = 0;
+ pVotesGranted->quorum = pSyncNode->quorum;
+ pVotesGranted->toLeader = false;
+ pVotesGranted->pSyncNode = pSyncNode;
+}
+
bool voteGrantedMajority(SVotesGranted *pVotesGranted) {
bool ret = pVotesGranted->votes >= pVotesGranted->quorum;
return ret;
@@ -79,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) {
}
cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pVotesGranted != NULL) {
@@ -168,6 +179,13 @@ void votesRespondDestory(SVotesRespond *pVotesRespond) {
}
}
+void votesRespondUpdate(SVotesRespond *pVotesRespond, SSyncNode *pSyncNode) {
+ pVotesRespond->replicas = &(pSyncNode->replicasId);
+ pVotesRespond->replicaNum = pSyncNode->replicaNum;
+ pVotesRespond->term = 0;
+ pVotesRespond->pSyncNode = pSyncNode;
+}
+
bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId) {
bool ret = false;
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
@@ -202,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) {
}
cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pVotesRespond != NULL) {
diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp
index 0850ef6343d2ce5b6719f7eb92eccc55cdafc41d..1ab3ce203ad4a3968bc45ab2382108fa7d97f40c 100644
--- a/source/libs/sync/test/syncConfigChangeTest.cpp
+++ b/source/libs/sync/test/syncConfigChangeTest.cpp
@@ -42,9 +42,10 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
}
if (cbMeta.index > beginIndex) {
- char logBuf[256];
- snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
- pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ char logBuf[256] = {0};
+ snprintf(logBuf, sizeof(logBuf),
+ "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
+ cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
} else {
sTrace("==callback== ==CommitCb== do not apply again %ld", cbMeta.index);
@@ -52,17 +53,18 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
}
void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
- "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
- cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
+ cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
}
void RollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
char logBuf[256];
- snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
- pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ snprintf(logBuf, sizeof(logBuf),
+ "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
+ cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
}
@@ -73,17 +75,27 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) {
return 0;
}
-void FpRestoreFinishCb(struct SSyncFSM* pFsm) {
- sTrace("==callback== ==FpRestoreFinishCb==");
+void RestoreFinishCb(struct SSyncFSM* pFsm) { sTrace("==callback== ==RestoreFinishCb=="); }
+
+void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
+ sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu",
+ cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term);
}
SSyncFSM* createFsm() {
SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM));
+ memset(pFsm, 0, sizeof(*pFsm));
+
pFsm->FpCommitCb = CommitCb;
pFsm->FpPreCommitCb = PreCommitCb;
pFsm->FpRollBackCb = RollBackCb;
+
pFsm->FpGetSnapshot = GetSnapshotCb;
- pFsm->FpRestoreFinish = FpRestoreFinishCb;
+ pFsm->FpRestoreFinishCb = RestoreFinishCb;
+
+
+ pFsm->FpReConfigCb = ReConfigCb;
+
return pFsm;
}
@@ -111,6 +123,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal*
syncInfo.pFsm = createFsm();
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);
syncInfo.pWal = pWal;
+ syncInfo.isStandBy = isStandBy;
SSyncCfg* pCfg = &syncInfo.syncCfg;
@@ -182,7 +195,7 @@ SRpcMsg* createRpcMsg(int i, int count, int myIndex) {
int main(int argc, char** argv) {
tsAsyncLog = 0;
- sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_INFO;
if (argc != 7) {
usage(argv[0]);
exit(-1);
@@ -214,17 +227,21 @@ int main(int argc, char** argv) {
int64_t rid = createSyncNode(replicaNum, myIndex, gVgId, pWal, (char*)gDir, isStandBy);
assert(rid > 0);
- if (isStandBy) {
- syncStartStandBy(rid);
- } else {
- syncStart(rid);
- }
+ syncStart(rid);
+
+ /*
+ if (isStandBy) {
+ syncStartStandBy(rid);
+ } else {
+ syncStart(rid);
+ }
+ */
SSyncNode* pSyncNode = (SSyncNode*)syncNodeAcquire(rid);
assert(pSyncNode != NULL);
if (isConfigChange) {
- configChange(rid, 3, myIndex);
+ configChange(rid, 2, myIndex);
}
//---------------------------
diff --git a/source/libs/sync/test/syncRaftCfgTest.cpp b/source/libs/sync/test/syncRaftCfgTest.cpp
index d3c06fa83e88488eb410c77c68e4ea18aff590fd..f5b24db651f9ed94a290aa2e1ea9611a11f74a04 100644
--- a/source/libs/sync/test/syncRaftCfgTest.cpp
+++ b/source/libs/sync/test/syncRaftCfgTest.cpp
@@ -15,6 +15,21 @@ void logTest() {
sFatal("--- sync log test: fatal");
}
+SRaftCfg* createRaftCfg() {
+ SRaftCfg* pCfg = (SRaftCfg*)taosMemoryMalloc(sizeof(SRaftCfg));
+ memset(pCfg, 0, sizeof(SRaftCfg));
+
+ pCfg->cfg.replicaNum = 3;
+ pCfg->cfg.myIndex = 1;
+ for (int i = 0; i < pCfg->cfg.replicaNum; ++i) {
+ ((pCfg->cfg.nodeInfo)[i]).nodePort = i * 100;
+ snprintf(((pCfg->cfg.nodeInfo)[i]).nodeFqdn, sizeof(((pCfg->cfg.nodeInfo)[i]).nodeFqdn), "100.200.300.%d", i);
+ }
+ pCfg->isStandBy = taosGetTimestampSec() % 100;
+
+ return pCfg;
+}
+
SSyncCfg* createSyncCfg() {
SSyncCfg* pCfg = (SSyncCfg*)taosMemoryMalloc(sizeof(SSyncCfg));
memset(pCfg, 0, sizeof(SSyncCfg));
@@ -56,7 +71,7 @@ void test3() {
if (taosCheckExistFile(s)) {
printf("%s file: %s already exist! \n", (char*)__FUNCTION__, s);
} else {
- syncCfgCreateFile(pCfg, s);
+ raftCfgCreateFile(pCfg, 7, s);
printf("%s create json file: %s \n", (char*)__FUNCTION__, s);
}
@@ -78,6 +93,7 @@ void test5() {
assert(pCfg != NULL);
pCfg->cfg.myIndex = taosGetTimestampSec();
+ pCfg->isStandBy += 2;
raftCfgPersist(pCfg);
printf("%s update json file: %s myIndex->%d \n", (char*)__FUNCTION__, "./test3_raft_cfg.json", pCfg->cfg.myIndex);
diff --git a/source/libs/sync/test/syncSnapshotTest.cpp b/source/libs/sync/test/syncSnapshotTest.cpp
index 8ccd69890708781dbfb5b4a3ae835acc5c17d15c..820500e2d8f8b57427fec1f20741755a2ddc2d5c 100644
--- a/source/libs/sync/test/syncSnapshotTest.cpp
+++ b/source/libs/sync/test/syncSnapshotTest.cpp
@@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
void initFsm() {
pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM));
+ memset(pFsm, 0, sizeof(*pFsm));
pFsm->FpCommitCb = CommitCb;
pFsm->FpPreCommitCb = PreCommitCb;
pFsm->FpRollBackCb = RollBackCb;
diff --git a/source/libs/sync/test/syncTest.cpp b/source/libs/sync/test/syncTest.cpp
index 76024e061effc99fe744fac4d7266a1fd94a9207..ffe8b81571beae6ead52398f1a0f1faf7067ebf0 100644
--- a/source/libs/sync/test/syncTest.cpp
+++ b/source/libs/sync/test/syncTest.cpp
@@ -49,7 +49,7 @@ void test4() {
logTest((char*)__FUNCTION__);
}
-int main() {
+int main(int argc, char** argv) {
// taosInitLog("tmp/syncTest.log", 100);
tsAsyncLog = 0;
@@ -58,6 +58,14 @@ int main() {
test3();
test4();
+ if (argc == 2) {
+ bool bTaosDirExist = taosDirExist(argv[1]);
+ printf("%s bTaosDirExist:%d \n", argv[1], bTaosDirExist);
+
+ bool bTaosCheckExistFile = taosCheckExistFile(argv[1]);
+ printf("%s bTaosCheckExistFile:%d \n", argv[1], bTaosCheckExistFile);
+ }
+
// taosCloseLog();
return 0;
}
diff --git a/source/libs/tdb/src/db/tdbTable.c b/source/libs/tdb/src/db/tdbTable.c
index 7211fe492630b4bf036c52067ca7c7ae175823b9..239aa5d7ef786b0941e857bf9e3f73a655f65d5a 100644
--- a/source/libs/tdb/src/db/tdbTable.c
+++ b/source/libs/tdb/src/db/tdbTable.c
@@ -16,7 +16,7 @@
#include "tdbInt.h"
struct STTB {
- TDB *pEnv;
+ TDB * pEnv;
SBTree *pBt;
};
@@ -25,11 +25,11 @@ struct STBC {
};
int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprFn, TDB *pEnv, TTB **ppTb) {
- TTB *pTb;
+ TTB * pTb;
SPager *pPager;
int ret;
char fFullName[TDB_FILENAME_LEN];
- SPage *pPage;
+ SPage * pPage;
SPgno pgno;
*ppTb = NULL;
@@ -145,4 +145,4 @@ int tdbTbcClose(TBC *pTbc) {
return 0;
}
-int tdbTbcIsValid(TBC *pTbc) { return tdbBtcIsValid(&pTbc->btc); }
\ No newline at end of file
+int tdbTbcIsValid(TBC *pTbc) { return tdbBtcIsValid(&pTbc->btc); }
diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h
index 9f0267da93fca6db1b35844e77fdf8877eb33847..6524e3c9bcd873180378b5cfea2404b1a461ac7b 100644
--- a/source/libs/tdb/src/inc/tdbInt.h
+++ b/source/libs/tdb/src/inc/tdbInt.h
@@ -55,8 +55,8 @@ typedef u32 SPgno;
#define TDB_PUT_U24(p, v) \
do { \
int tv = (v); \
- (p)[2] = tv & 0xff; \
- (p)[1] = (tv >> 8) & 0xff; \
+ (p)[1] = tv & 0xff; \
+ (p)[2] = (tv >> 8) & 0xff; \
(p)[0] = (tv >> 16) & 0xff; \
} while (0)
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 30f799f39ec046b4819a35f9adaec06ff8f6b81f..e680e3004283684b0d95c1fd0124f33e99d59d3b 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -95,8 +95,8 @@ typedef void* queue[2];
#define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field))))
#define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit
-#define TRANS_RETRY_INTERVAL 15 // ms retry interval
-#define TRANS_CONN_TIMEOUT 3 // connect timeout
+#define TRANS_RETRY_INTERVAL 15 // ms retry interval
+#define TRANS_CONN_TIMEOUT 3 // connect timeout
typedef SRpcMsg STransMsg;
typedef SRpcCtx STransCtx;
@@ -104,8 +104,16 @@ typedef SRpcCtxVal STransCtxVal;
typedef SRpcInfo STrans;
typedef SRpcConnInfo STransHandleInfo;
+/*convet from fqdn to ip */
+typedef struct SCvtAddr {
+ char ip[TSDB_FQDN_LEN];
+ char fqdn[TSDB_FQDN_LEN];
+ bool cvt;
+} SCvtAddr;
+
typedef struct {
- SEpSet epSet; // ip list provided by app
+ SEpSet epSet; // ip list provided by app
+ SEpSet origEpSet;
void* ahandle; // handle provided by app
tmsg_t msgType; // message type
int8_t connType; // connection type cli/srv
@@ -115,6 +123,7 @@ typedef struct {
STransCtx appCtx; //
STransMsg* pRsp; // for synchronous API
tsem_t* pSem; // for synchronous API
+ SCvtAddr cvtAddr;
int hThrdIdx;
} STransConnCtx;
@@ -155,7 +164,7 @@ typedef struct {
#pragma pack(pop)
-typedef enum { Normal, Quit, Release, Register } STransMsgType;
+typedef enum { Normal, Quit, Release, Register, Update } STransMsgType;
typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } ConnStatus;
#define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member)))
@@ -209,6 +218,22 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
void transDestroyAsyncPool(SAsyncPool* pool);
int transSendAsync(SAsyncPool* pool, queue* mq);
+#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
+ do { \
+ for (int i = 0; i < pool->nAsync; i++) { \
+ uv_async_t* async = &(pool->asyncs[i]); \
+ SAsyncItem* item = async->data; \
+ while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
+ tTrace("destroy msg in async pool "); \
+ queue* h = QUEUE_HEAD(&item->qmsg); \
+ QUEUE_REMOVE(h); \
+ msgType* msg = QUEUE_DATA(h, msgType, q); \
+ if (msg != NULL) { \
+ freeFunc(msg); \
+ } \
+ } \
+ } \
+ } while (0)
int transInitBuffer(SConnBuffer* buf);
int transClearBuffer(SConnBuffer* buf);
int transDestroyBuffer(SConnBuffer* buf);
@@ -231,6 +256,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransM
void transSendResponse(const STransMsg* msg);
void transRegisterMsg(const STransMsg* msg);
int transGetConnInfo(void* thandle, STransHandleInfo* pInfo);
+void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle);
@@ -318,11 +344,30 @@ void transDQDestroy(SDelayQueue* queue);
int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs);
+void transPrintEpSet(SEpSet* pEpSet);
+bool transEpSetIsEqual(SEpSet* a, SEpSet* b);
/*
* init global func
*/
void transThreadOnce();
+// ref mgt
+// handle
+typedef struct SExHandle {
+ void* handle;
+ int64_t refId;
+ void* pThrd;
+} SExHandle;
+
+void transInitEnv();
+int32_t transOpenExHandleMgt(int size);
+void transCloseExHandleMgt(int32_t mgt);
+int64_t transAddExHandle(int32_t mgt, void* p);
+int32_t transRemoveExHandle(int32_t mgt, int64_t refId);
+SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId);
+int32_t transReleaseExHandle(int32_t mgt, int64_t refId);
+void transDestoryExHandle(void* handle);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h
index 8aeae1b5ade26a1a320dae37cbfe67f676f66eeb..c328629c4b1ba18564918ede4b5b9e4ecc62ad83 100644
--- a/source/libs/transport/inc/transportInt.h
+++ b/source/libs/transport/inc/transportInt.h
@@ -22,13 +22,13 @@
#include "lz4.h"
#include "os.h"
#include "taoserror.h"
+#include "tglobal.h"
#include "thash.h"
-#include "tref.h"
#include "tmsg.h"
#include "transLog.h"
+#include "tref.h"
#include "trpc.h"
#include "tutil.h"
-#include "tglobal.h"
#ifdef __cplusplus
extern "C" {
@@ -55,9 +55,9 @@ typedef struct {
bool (*retry)(int32_t code);
int index;
- int32_t refCount;
void* parent;
void* tcphandle; // returned handle from TCP initialization
+ int32_t refMgt;
TdThreadMutex mutex;
} SRpcInfo;
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 9e71c87fa5289d2af6d71639c313d208fe6d9b37..925de2f3219672e40f270b92b754718a93f23f02 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -27,7 +27,17 @@ void (*taosUnRefHandle[])(void* handle) = {transUnrefSrvHandle, transUnrefCliHan
void (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle};
+static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) {
+ *ip = taosGetIpv4FromFqdn(localFqdn);
+ if (*ip == 0xFFFFFFFF) {
+ terrno = TSDB_CODE_RPC_FQDN_ERROR;
+ return -1;
+ }
+ return 0;
+}
void* rpcOpen(const SRpcInit* pInit) {
+ transInitEnv();
+
SRpcInfo* pRpc = taosMemoryCalloc(1, sizeof(SRpcInfo));
if (pRpc == NULL) {
return NULL;
@@ -35,7 +45,6 @@ void* rpcOpen(const SRpcInit* pInit) {
if (pInit->label) {
tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1);
}
-
// register callback handle
pRpc->cfp = pInit->cfp;
pRpc->retry = pInit->rfp;
@@ -48,10 +57,8 @@ void* rpcOpen(const SRpcInit* pInit) {
uint32_t ip = 0;
if (pInit->connType == TAOS_CONN_SERVER) {
- ip = taosGetIpv4FromFqdn(pInit->localFqdn);
- if (ip == 0xFFFFFFFF) {
- tError("invalid fqdn: %s", pInit->localFqdn);
- terrno = TSDB_CODE_RPC_FQDN_ERROR;
+ if (transValidLocalFqdn(pInit->localFqdn, &ip) != 0) {
+ tError("invalid fqdn: %s, errmsg: %s", pInit->localFqdn, terrstr());
taosMemoryFree(pRpc);
return NULL;
}
@@ -74,7 +81,9 @@ void* rpcOpen(const SRpcInit* pInit) {
void rpcClose(void* arg) {
SRpcInfo* pRpc = (SRpcInfo*)arg;
(*taosCloseHandle[pRpc->connType])(pRpc->tcphandle);
+ transCloseExHandleMgt(pRpc->refMgt);
taosMemoryFree(pRpc);
+
return;
}
@@ -149,6 +158,11 @@ void rpcReleaseHandle(void* handle, int8_t type) {
(*transReleaseHandle[type])(handle);
}
+void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
+ // later
+ transSetDefaultAddr(thandle, ip, fqdn);
+}
+
int32_t rpcInit() {
// impl later
return 0;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 92c5e9faf70f95741c52803be1680b97d33f21fa..580ab30b7800c05d6f8463700cc208cd850b9a8e 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -15,6 +15,9 @@
#ifdef USE_UV
#include "transComm.h"
+static int32_t transSCliInst = 0;
+static int32_t refMgt = 0;
+
typedef struct SCliConn {
T_REF_DECLARE()
uv_connect_t connReq;
@@ -63,7 +66,10 @@ typedef struct SCliThrdObj {
SDelayQueue* delayQueue;
uint64_t nextTimeout; // next timeout
void* pTransInst; //
- bool quit;
+
+ SCvtAddr cvtAddr;
+
+ bool quit;
} SCliThrdObj;
typedef struct SCliObj {
@@ -103,6 +109,7 @@ static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle o
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
/*
* set TCP connection timeout per-socket level
*/
@@ -116,7 +123,9 @@ static void cliHandleExcept(SCliConn* conn);
static void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd);
static void cliHandleQuit(SCliMsg* pMsg, SCliThrdObj* pThrd);
static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd);
-static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease};
+static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd);
+static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease,
+ NULL, cliHandleUpdate};
static void cliSendQuit(SCliThrdObj* thrd);
static void destroyUserdata(STransMsg* userdata);
@@ -131,6 +140,19 @@ static void destroyThrdObj(SCliThrdObj* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
+static void cliReleaseUnfinishedMsg(SCliConn* conn) {
+ SCliMsg* pMsg = NULL;
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ pMsg = transQueueGet(&conn->cliMsgs, i);
+ if (pMsg != NULL && pMsg->ctx != NULL) {
+ if (conn->ctx.freeFunc != NULL) {
+ conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ }
+ }
+ destroyCmsg(pMsg);
+ }
+}
+
#define CLI_RELEASE_UV(loop) \
do { \
uv_walk(loop, cliWalkCb, NULL); \
@@ -161,6 +183,7 @@ static void cliWalkCb(uv_handle_t* handle, void* arg);
transUnrefCliHandle(conn); \
} \
destroyCmsg(pMsg); \
+ cliReleaseUnfinishedMsg(conn); \
addConnToPool(((SCliThrdObj*)conn->hostThrd)->pool, conn); \
return; \
} \
@@ -465,8 +488,8 @@ static void addConnToPool(void* pool, SCliConn* conn) {
STrans* pTransInst = ((SCliThrdObj*)conn->hostThrd)->pTransInst;
conn->expireTime = taosGetTimestampMs() + CONN_PERSIST_TIME(pTransInst->idleTime);
- transCtxCleanup(&conn->ctx);
transQueueClear(&conn->cliMsgs);
+ transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
char key[128] = {0};
@@ -683,6 +706,12 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd) {
transUnrefCliHandle(conn);
}
}
+static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd) {
+ STransConnCtx* pCtx = pMsg->ctx;
+
+ pThrd->cvtAddr = pCtx->cvtAddr;
+ destroyCmsg(pMsg);
+}
SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) {
SCliConn* conn = NULL;
@@ -702,7 +731,17 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) {
}
return conn;
}
-
+void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
+ if (pCvtAddr->cvt == false) {
+ return;
+ }
+ for (int i = 0; i < pEpSet->numOfEps && pEpSet->numOfEps == 1; i++) {
+ if (strncmp(pEpSet->eps[i].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) {
+ memset(pEpSet->eps[i].fqdn, 0, TSDB_FQDN_LEN);
+ memcpy(pEpSet->eps[i].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN);
+ }
+ }
+}
void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) {
uint64_t et = taosGetTimestampUs();
uint64_t el = et - pMsg->st;
@@ -712,6 +751,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) {
STransConnCtx* pCtx = pMsg->ctx;
STrans* pTransInst = pThrd->pTransInst;
+ cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr);
+
SCliConn* conn = cliGetConn(pMsg, pThrd);
if (conn != NULL) {
conn->hThrdIdx = pCtx->hThrdIdx;
@@ -808,6 +849,11 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
}
cli->pThreadObj[i] = pThrd;
}
+ int ref = atomic_add_fetch_32(&transSCliInst, 1);
+ if (ref == 1) {
+ refMgt = transOpenExHandleMgt(50000);
+ }
+
return cli;
}
@@ -841,7 +887,6 @@ static SCliThrdObj* createThrdObj() {
pThrd->timer.data = pThrd;
pThrd->pool = createConnPool(4);
-
transDQCreate(pThrd->loop, &pThrd->delayQueue);
pThrd->quit = false;
@@ -855,6 +900,7 @@ static void destroyThrdObj(SCliThrdObj* pThrd) {
taosThreadJoin(pThrd->thread, NULL);
CLI_RELEASE_UV(pThrd->loop);
taosThreadMutexDestroy(&pThrd->msgMtx);
+ TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg);
transDestroyAsyncPool(pThrd->asyncPool);
transDQDestroy(pThrd->delayQueue);
@@ -907,16 +953,22 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
STransConnCtx* pCtx = pMsg->ctx;
SEpSet* pEpSet = &pCtx->epSet;
+ transPrintEpSet(pEpSet);
+
+ if (pCtx->retryCount == 0) {
+ pCtx->origEpSet = pCtx->epSet;
+ }
/*
* upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL
*/
tmsg_t msgType = pCtx->msgType;
- if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) ||
- ((pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && msgType == TDMT_MND_CONNECT)) {
+ if ((pTransInst->retry != NULL && pEpSet->numOfEps > 1 && (pTransInst->retry(pResp->code))) ||
+ (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pResp->code == TSDB_CODE_APP_NOT_READY ||
+ pResp->code == TSDB_CODE_NODE_NOT_DEPLOYED || pResp->code == TSDB_CODE_SYN_NOT_LEADER)) {
pMsg->sent = 0;
pMsg->st = taosGetTimestampUs();
pCtx->retryCount += 1;
- if (msgType == TDMT_MND_CONNECT && pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
if (pCtx->retryCount < pEpSet->numOfEps) {
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
@@ -931,9 +983,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
if (pResp->contLen == 0) {
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
} else {
- SMEpSet emsg = {0};
- tDeserializeSMEpSet(pResp->pCont, pResp->contLen, &emsg);
- pCtx->epSet = emsg.epSet;
+ SEpSet epSet = {0};
+ tDeserializeSEpSet(pResp->pCont, pResp->contLen, &epSet);
+ pCtx->epSet = epSet;
}
addConnToPool(pThrd->pool, pConn);
tTrace("use remote epset, current in use: %d, retry count:%d, try limit: %d", pEpSet->inUse, pCtx->retryCount + 1,
@@ -958,7 +1010,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
pCtx->pRsp = NULL;
} else {
tTrace("%s cli conn %p handle resp", pTransInst->label, pConn);
- pTransInst->cfp(pTransInst->parent, pResp, pEpSet);
+ if (pResp->code != 0 || pCtx->retryCount == 0 || transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) {
+ pTransInst->cfp(pTransInst->parent, pResp, NULL);
+ } else {
+ pTransInst->cfp(pTransInst->parent, pResp, pEpSet);
+ }
}
return 0;
}
@@ -971,6 +1027,10 @@ void transCloseClient(void* arg) {
}
taosMemoryFree(cli->pThreadObj);
taosMemoryFree(cli);
+ int ref = atomic_sub_fetch_32(&transSCliInst, 1);
+ if (ref == 0) {
+ transCloseExHandleMgt(refMgt);
+ }
}
void transRefCliHandle(void* handle) {
if (handle == NULL) {
@@ -1067,4 +1127,32 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM
taosMemoryFree(pSem);
}
+/*
+ *
+ **/
+void transSetDefaultAddr(void* ahandle, const char* ip, const char* fqdn) {
+ STrans* pTransInst = ahandle;
+
+ SCvtAddr cvtAddr = {0};
+ if (ip != NULL && fqdn != NULL) {
+ memcpy(cvtAddr.ip, ip, strlen(ip));
+ memcpy(cvtAddr.fqdn, fqdn, strlen(fqdn));
+ cvtAddr.cvt = true;
+ }
+ for (int i = 0; i < pTransInst->numOfThreads; i++) {
+ STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx));
+ pCtx->hThrdIdx = i;
+ pCtx->cvtAddr = cvtAddr;
+
+ SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg));
+ cliMsg->ctx = pCtx;
+ cliMsg->type = Update;
+
+ SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i];
+ tDebug("update epset at thread:%d, threadID:%" PRId64 "", i, thrd->thread);
+
+ tsem_t* pSem = pCtx->pSem;
+ transSendAsync(thrd->asyncPool, &(cliMsg->q));
+ }
+}
#endif
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index 7014cc481f6f3908793ea2f6fc074a04fbe7472b..a04e8b5fca05b7e02b58bbd4b5abfc528a5289e9 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -190,6 +190,7 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
}
return pool;
}
+
void transDestroyAsyncPool(SAsyncPool* pool) {
for (int i = 0; i < pool->nAsync; i++) {
uv_async_t* async = &(pool->asyncs[i]);
@@ -233,7 +234,7 @@ void transCtxCleanup(STransCtx* ctx) {
STransCtxVal* iter = taosHashIterate(ctx->args, NULL);
while (iter) {
- iter->freeFunc(iter->val);
+ ctx->freeFunc(iter->val);
iter = taosHashIterate(ctx->args, iter);
}
@@ -245,6 +246,7 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) {
if (dst->args == NULL) {
dst->args = src->args;
dst->brokenVal = src->brokenVal;
+ dst->freeFunc = src->freeFunc;
src->args = NULL;
return;
}
@@ -257,7 +259,7 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) {
STransCtxVal* dVal = taosHashGet(dst->args, key, klen);
if (dVal) {
- dVal->freeFunc(dVal->val);
+ dst->freeFunc(dVal->val);
}
taosHashPut(dst->args, key, klen, sVal, sizeof(*sVal));
iter = taosHashIterate(src->args, iter);
@@ -445,4 +447,64 @@ int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_
uv_timer_start(queue->timer, transDQTimeout, timeoutMs, 0);
return 0;
}
+
+void transPrintEpSet(SEpSet* pEpSet) {
+ if (pEpSet == NULL) {
+ tTrace("NULL epset");
+ return;
+ }
+ tTrace("epset begin inUse: %d", pEpSet->inUse);
+ for (int i = 0; i < pEpSet->numOfEps; i++) {
+ tTrace("ip: %s, port: %d", pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
+ }
+ tTrace("epset end");
+}
+bool transEpSetIsEqual(SEpSet* a, SEpSet* b) {
+ if (a->numOfEps != b->numOfEps || a->inUse != b->inUse) {
+ return false;
+ }
+ for (int i = 0; i < a->numOfEps; i++) {
+ if (strncmp(a->eps[i].fqdn, b->eps[i].fqdn, TSDB_FQDN_LEN) != 0 || a->eps[i].port != b->eps[i].port) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void transInitEnv() {
+ //
+ uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
+}
+int32_t transOpenExHandleMgt(int size) {
+ // added into once later
+ return taosOpenRef(size, transDestoryExHandle);
+}
+void transCloseExHandleMgt(int32_t mgt) {
+ // close ref
+ taosCloseRef(mgt);
+}
+int64_t transAddExHandle(int32_t mgt, void* p) {
+ // acquire extern handle
+ return taosAddRef(mgt, p);
+}
+int32_t transRemoveExHandle(int32_t mgt, int64_t refId) {
+ // acquire extern handle
+ return taosRemoveRef(mgt, refId);
+}
+
+SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId) {
+ // acquire extern handle
+ return (SExHandle*)taosAcquireRef(mgt, refId);
+}
+
+int32_t transReleaseExHandle(int32_t mgt, int64_t refId) {
+ // release extern handle
+ return taosReleaseRef(mgt, refId);
+}
+void transDestoryExHandle(void* handle) {
+ if (handle == NULL) {
+ return;
+ }
+ taosMemoryFree(handle);
+}
#endif
diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSvr.c
similarity index 85%
rename from source/libs/transport/src/transSrv.c
rename to source/libs/transport/src/transSvr.c
index 36f5cf98150e5636b43eb35b819d5bcd9288fe6a..608fd00b2cda7c9508275cd4487496295b9e0711 100644
--- a/source/libs/transport/src/transSrv.c
+++ b/source/libs/transport/src/transSvr.c
@@ -19,16 +19,17 @@
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
-static char* notify = "a";
-static int transSrvInst = 0;
+static char* notify = "a";
+static int32_t tranSSvrInst = 0;
+static int32_t refMgt = 0;
typedef struct {
int notifyCount; //
int init; // init or not
STransMsg msg;
-} SSrvRegArg;
+} SSvrRegArg;
-typedef struct SSrvConn {
+typedef struct SSvrConn {
T_REF_DECLARE()
uv_tcp_t* pTcp;
uv_write_t pWriter;
@@ -42,7 +43,7 @@ typedef struct SSrvConn {
void* hostThrd;
STransQueue srvMsgs;
- SSrvRegArg regArg;
+ SSvrRegArg regArg;
bool broken; // conn broken;
ConnStatus status;
@@ -55,14 +56,14 @@ typedef struct SSrvConn {
char user[TSDB_UNI_LEN]; // user ID for the link
char secret[TSDB_PASSWORD_LEN];
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
-} SSrvConn;
+} SSvrConn;
-typedef struct SSrvMsg {
- SSrvConn* pConn;
+typedef struct SSvrMsg {
+ SSvrConn* pConn;
STransMsg msg;
queue q;
STransMsgType type;
-} SSrvMsg;
+} SSvrMsg;
typedef struct SWorkThrdObj {
TdThread thread;
@@ -99,13 +100,6 @@ typedef struct SServerObj {
bool inited;
} SServerObj;
-// handle
-typedef struct SExHandle {
- void* handle;
- int64_t refId;
- SWorkThrdObj* pThrd;
-} SExHandle;
-
static void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
static void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
static void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
@@ -127,37 +121,37 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespInternal(SSrvMsg* smsg);
-static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb);
-static void uvStartSendResp(SSrvMsg* msg);
+static void uvStartSendRespInternal(SSvrMsg* smsg);
+static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
+static void uvStartSendResp(SSvrMsg* msg);
-static void uvNotifyLinkBrokenToApp(SSrvConn* conn);
+static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
-static void destroySmsg(SSrvMsg* smsg);
+static void destroySmsg(SSvrMsg* smsg);
// check whether already read complete packet
-static SSrvConn* createConn(void* hThrd);
-static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/);
-static void destroyConnRegArg(SSrvConn* conn);
+static SSvrConn* createConn(void* hThrd);
+static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
+static void destroyConnRegArg(SSvrConn* conn);
-static int reallocConnRefHandle(SSrvConn* conn);
+static int reallocConnRefHandle(SSvrConn* conn);
-static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
- uvHandleRegister};
+static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
+ uvHandleRegister, NULL};
static int32_t exHandlesMgt;
-void uvInitEnv();
-void uvOpenExHandleMgt(int size);
-void uvCloseExHandleMgt();
-int64_t uvAddExHandle(void* p);
-int32_t uvRemoveExHandle(int64_t refId);
-int32_t uvReleaseExHandle(int64_t refId);
-void uvDestoryExHandle(void* handle);
-SExHandle* uvAcquireExHandle(int64_t refId);
+// void uvInitEnv();
+// void uvOpenExHandleMgt(int size);
+// void uvCloseExHandleMgt();
+// int64_t uvAddExHandle(void* p);
+// int32_t uvRemoveExHandle(int64_t refId);
+// int32_t uvReleaseExHandle(int64_t refId);
+// void uvDestoryExHandle(void* handle);
+// SExHandle* uvAcquireExHandle(int64_t refId);
static void uvDestroyConn(uv_handle_t* handle);
@@ -178,7 +172,7 @@ static bool addHandleToAcceptloop(void* arg);
tTrace("server conn %p received release request", conn); \
\
STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \
+ SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \
srvMsg->msg = tmsg; \
srvMsg->type = Release; \
srvMsg->pConn = conn; \
@@ -210,7 +204,7 @@ static bool addHandleToAcceptloop(void* arg);
do { \
if (refId > 0) { \
tTrace("server handle step1"); \
- SExHandle* exh2 = uvAcquireExHandle(refId); \
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \
if (exh2 == NULL || refId != exh2->refId) { \
tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \
exh2 ? exh2->refId : 0, refId); \
@@ -218,7 +212,7 @@ static bool addHandleToAcceptloop(void* arg);
} \
} else if (refId == 0) { \
tTrace("server handle step2"); \
- SExHandle* exh2 = uvAcquireExHandle(refId); \
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \
if (exh2 == NULL || refId != exh2->refId) { \
tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \
refId, exh2 ? exh2->refId : 0); \
@@ -233,18 +227,18 @@ static bool addHandleToAcceptloop(void* arg);
} while (0)
void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
SConnBuffer* pBuf = &conn->readBuf;
transAllocBuffer(pBuf, buf);
}
// refers specifically to query or insert timeout
static void uvHandleActivityTimeout(uv_timer_t* handle) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
tDebug("%p timeout since no activity", conn);
}
-static void uvHandleReq(SSrvConn* pConn) {
+static void uvHandleReq(SSvrConn* pConn) {
SConnBuffer* pBuf = &pConn->readBuf;
char* msg = pBuf->buf;
uint32_t msgLen = pBuf->len;
@@ -300,14 +294,14 @@ static void uvHandleReq(SSrvConn* pConn) {
// 2. once send out data, cli conn released to conn pool immediately
// 3. not mixed with persist
- transMsg.info.handle = (void*)uvAcquireExHandle(pConn->refId);
+ transMsg.info.handle = (void*)transAcquireExHandle(refMgt, pConn->refId);
transMsg.info.refId = pConn->refId;
tTrace("server handle %p conn: %p translated to app, refId: %" PRIu64 "", transMsg.info.handle, pConn, pConn->refId);
assert(transMsg.info.handle != NULL);
if (pHead->noResp == 1) {
transMsg.info.refId = -1;
}
- uvReleaseExHandle(pConn->refId);
+ transReleaseExHandle(refMgt, pConn->refId);
STrans* pTransInst = pConn->pTransInst;
(*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
@@ -316,7 +310,7 @@ static void uvHandleReq(SSrvConn* pConn) {
void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
// opt
- SSrvConn* conn = cli->data;
+ SSvrConn* conn = cli->data;
SConnBuffer* pBuf = &conn->readBuf;
if (nread > 0) {
pBuf->len += nread;
@@ -354,17 +348,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b
void uvOnTimeoutCb(uv_timer_t* handle) {
// opt
- SSrvConn* pConn = handle->data;
+ SSvrConn* pConn = handle->data;
tError("server conn %p time out", pConn);
}
void uvOnSendCb(uv_write_t* req, int status) {
- SSrvConn* conn = req->data;
+ SSvrConn* conn = req->data;
// transClearBuffer(&conn->readBuf);
if (status == 0) {
tTrace("server conn %p data already was written on stream", conn);
if (!transQueueEmpty(&conn->srvMsgs)) {
- SSrvMsg* msg = transQueuePop(&conn->srvMsgs);
+ SSvrMsg* msg = transQueuePop(&conn->srvMsgs);
// if (msg->type == Release && conn->status != ConnNormal) {
// conn->status = ConnNormal;
// transUnrefSrvHandle(conn);
@@ -376,7 +370,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
destroySmsg(msg);
// send second data, just use for push
if (!transQueueEmpty(&conn->srvMsgs)) {
- msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
+ msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg->type == Register && conn->status == ConnAcquire) {
conn->regArg.notifyCount = 0;
conn->regArg.init = 1;
@@ -389,7 +383,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
transQueuePop(&conn->srvMsgs);
taosMemoryFree(msg);
- msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
+ msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg != NULL) {
uvStartSendRespInternal(msg);
}
@@ -415,10 +409,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
taosMemoryFree(req);
}
-static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
+static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
tTrace("server conn %p prepare to send resp", smsg->pConn);
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
STransMsg* pMsg = &smsg->msg;
if (pMsg->pCont == 0) {
pMsg->pCont = (void*)rpcMallocCont(0);
@@ -455,17 +449,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespInternal(SSrvMsg* smsg) {
+static void uvStartSendRespInternal(SSvrMsg* smsg) {
uv_buf_t wb;
uvPrepareSendData(smsg, &wb);
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
// uv_timer_stop(&pConn->pTimer);
uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb);
}
-static void uvStartSendResp(SSrvMsg* smsg) {
+static void uvStartSendResp(SSvrMsg* smsg) {
// impl
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
@@ -485,7 +479,7 @@ static void uvStartSendResp(SSrvMsg* smsg) {
return;
}
-static void destroySmsg(SSrvMsg* smsg) {
+static void destroySmsg(SSvrMsg* smsg) {
if (smsg == NULL) {
return;
}
@@ -499,7 +493,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
QUEUE_REMOVE(h);
QUEUE_INIT(h);
- SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue);
+ SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue);
while (T_REF_VAL_GET(c) >= 2) {
transUnrefSrvHandle(c);
}
@@ -509,7 +503,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
void uvWorkerAsyncCb(uv_async_t* handle) {
SAsyncItem* item = handle->data;
SWorkThrdObj* pThrd = item->pThrd;
- SSrvConn* conn = NULL;
+ SSvrConn* conn = NULL;
queue wq;
// batch process to avoid to lock/unlock frequently
@@ -521,7 +515,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
queue* head = QUEUE_HEAD(&wq);
QUEUE_REMOVE(head);
- SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q);
+ SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q);
if (msg == NULL) {
tError("unexcept occurred, continue");
continue;
@@ -535,15 +529,15 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
SExHandle* exh1 = transMsg.info.handle;
int64_t refId = transMsg.info.refId;
- SExHandle* exh2 = uvAcquireExHandle(refId);
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId);
if (exh2 == NULL || exh1 != exh2) {
tTrace("server handle except msg %p, ignore it", exh1);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
destroySmsg(msg);
continue;
}
msg->pConn = exh1->handle;
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
(*transAsyncHandle[msg->type])(msg, pThrd);
}
}
@@ -649,7 +643,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
uv_handle_type pending = uv_pipe_pending_type(pipe);
assert(pending == UV_TCP);
- SSrvConn* pConn = createConn(pThrd);
+ SSvrConn* pConn = createConn(pThrd);
pConn->pTransInst = pThrd->pTransInst;
/* init conn timer*/
@@ -768,10 +762,10 @@ void* transWorkerThread(void* arg) {
return NULL;
}
-static SSrvConn* createConn(void* hThrd) {
+static SSvrConn* createConn(void* hThrd) {
SWorkThrdObj* pThrd = hThrd;
- SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn));
+ SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
QUEUE_INIT(&pConn->queue);
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
@@ -785,8 +779,8 @@ static SSrvConn* createConn(void* hThrd) {
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
exh->handle = pConn;
exh->pThrd = pThrd;
- exh->refId = uvAddExHandle(exh);
- uvAcquireExHandle(exh->refId);
+ exh->refId = transAddExHandle(refMgt, exh);
+ transAcquireExHandle(refMgt, exh->refId);
pConn->refId = exh->refId;
transRefSrvHandle(pConn);
@@ -794,7 +788,7 @@ static SSrvConn* createConn(void* hThrd) {
return pConn;
}
-static void destroyConn(SSrvConn* conn, bool clear) {
+static void destroyConn(SSvrConn* conn, bool clear) {
if (conn == NULL) {
return;
}
@@ -808,34 +802,34 @@ static void destroyConn(SSrvConn* conn, bool clear) {
// uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb);
}
}
-static void destroyConnRegArg(SSrvConn* conn) {
+static void destroyConnRegArg(SSvrConn* conn) {
if (conn->regArg.init == 1) {
transFreeMsg(conn->regArg.msg.pCont);
conn->regArg.init = 0;
}
}
-static int reallocConnRefHandle(SSrvConn* conn) {
- uvReleaseExHandle(conn->refId);
- uvRemoveExHandle(conn->refId);
+static int reallocConnRefHandle(SSvrConn* conn) {
+ transReleaseExHandle(refMgt, conn->refId);
+ transRemoveExHandle(refMgt, conn->refId);
// avoid app continue to send msg on invalid handle
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
exh->handle = conn;
exh->pThrd = conn->hostThrd;
- exh->refId = uvAddExHandle(exh);
- uvAcquireExHandle(exh->refId);
+ exh->refId = transAddExHandle(refMgt, exh);
+ transAcquireExHandle(refMgt, exh->refId);
conn->refId = exh->refId;
return 0;
}
static void uvDestroyConn(uv_handle_t* handle) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
if (conn == NULL) {
return;
}
SWorkThrdObj* thrd = conn->hostThrd;
- uvReleaseExHandle(conn->refId);
- uvRemoveExHandle(conn->refId);
+ transReleaseExHandle(refMgt, conn->refId);
+ transRemoveExHandle(refMgt, conn->refId);
tDebug("server conn %p destroy", conn);
// uv_timer_stop(&conn->pTimer);
@@ -883,8 +877,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- taosThreadOnce(&transModuleInit, uvInitEnv);
- transSrvInst++;
+ // taosThreadOnce(&transModuleInit, uvInitEnv);
+ int ref = atomic_add_fetch_32(&tranSSvrInst, 1);
+ if (ref == 1) {
+ refMgt = transOpenExHandleMgt(50000);
+ }
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
#ifdef WINDOWS
@@ -923,7 +920,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
}
if (false == taosValidIpAndPort(srv->ip, srv->port)) {
terrno = TAOS_SYSTEM_ERROR(errno);
- tError("invalid ip/port, reason: %s", terrstr());
+ tError("invalid ip/port, %d:%d, reason: %s", srv->ip, srv->port, terrstr());
goto End;
}
if (false == addHandleToAcceptloop(srv)) {
@@ -944,44 +941,7 @@ End:
return NULL;
}
-void uvInitEnv() {
- uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
- uvOpenExHandleMgt(10000);
-}
-void uvOpenExHandleMgt(int size) {
- // added into once later
- exHandlesMgt = taosOpenRef(size, uvDestoryExHandle);
-}
-void uvCloseExHandleMgt() {
- // close ref
- taosCloseRef(exHandlesMgt);
-}
-int64_t uvAddExHandle(void* p) {
- // acquire extern handle
- return taosAddRef(exHandlesMgt, p);
-}
-int32_t uvRemoveExHandle(int64_t refId) {
- // acquire extern handle
- return taosRemoveRef(exHandlesMgt, refId);
-}
-
-SExHandle* uvAcquireExHandle(int64_t refId) {
- // acquire extern handle
- return (SExHandle*)taosAcquireRef(exHandlesMgt, refId);
-}
-
-int32_t uvReleaseExHandle(int64_t refId) {
- // release extern handle
- return taosReleaseRef(exHandlesMgt, refId);
-}
-void uvDestoryExHandle(void* handle) {
- if (handle == NULL) {
- return;
- }
- taosMemoryFree(handle);
-}
-
-void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
+void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) {
thrd->quit = true;
if (QUEUE_IS_EMPTY(&thrd->conn)) {
uv_walk(thrd->loop, uvWalkCb, NULL);
@@ -990,8 +950,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
taosMemoryFree(msg);
}
-void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
- SSrvConn* conn = msg->pConn;
+void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) {
+ SSvrConn* conn = msg->pConn;
if (conn->status == ConnAcquire) {
reallocConnRefHandle(conn);
if (!transQueuePush(&conn->srvMsgs, msg)) {
@@ -1004,13 +964,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
destroySmsg(msg);
}
-void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) {
+void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) {
// send msg to client
tDebug("server conn %p start to send resp (2/2)", msg->pConn);
uvStartSendResp(msg);
}
-void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) {
- SSrvConn* conn = msg->pConn;
+void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) {
+ SSvrConn* conn = msg->pConn;
tDebug("server conn %p register brokenlink callback", conn);
if (conn->status == ConnAcquire) {
if (!transQueuePush(&conn->srvMsgs, msg)) {
@@ -1036,12 +996,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) {
}
taosThreadJoin(pThrd->thread, NULL);
SRV_RELEASE_UV(pThrd->loop);
+ TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
transDestroyAsyncPool(pThrd->asyncPool);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
void sendQuitToWorkThrd(SWorkThrdObj* pThrd) {
- SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg));
+ SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg));
msg->type = Quit;
tDebug("server send quit msg to work thread");
transSendAsync(pThrd->asyncPool, &msg->q);
@@ -1074,11 +1035,11 @@ void transCloseServer(void* arg) {
taosMemoryFree(srv);
- transSrvInst--;
- if (transSrvInst == 0) {
- TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
- memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
- uvCloseExHandleMgt();
+ int ref = atomic_sub_fetch_32(&tranSSvrInst, 1);
+ if (ref == 0) {
+ // TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
+ // memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
+ transCloseExHandleMgt(refMgt);
}
}
@@ -1086,7 +1047,7 @@ void transRefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
- int ref = T_REF_INC((SSrvConn*)handle);
+ int ref = T_REF_INC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
}
@@ -1094,10 +1055,10 @@ void transUnrefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
- int ref = T_REF_DEC((SSrvConn*)handle);
+ int ref = T_REF_DEC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
if (ref == 0) {
- destroyConn((SSrvConn*)handle, true);
+ destroyConn((SSvrConn*)handle, true);
}
}
@@ -1112,17 +1073,17 @@ void transReleaseSrvHandle(void* handle) {
STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId};
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Release;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Release;
tTrace("server conn %p start to release", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
- uvReleaseExHandle(refId);
+ transSendAsync(pThrd->asyncPool, &m->q);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send to release handle", exh);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send to release handle", exh);
@@ -1140,17 +1101,17 @@ void transSendResponse(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Normal;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Normal;
tDebug("server conn %p start to send resp (1/2)", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
- uvReleaseExHandle(refId);
+ transSendAsync(pThrd->asyncPool, &m->q);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send resp", exh);
rpcFreeCont(msg->pCont);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send resp", exh);
@@ -1168,18 +1129,18 @@ void transRegisterMsg(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Register;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Register;
tTrace("server conn %p start to register brokenlink callback", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
- uvReleaseExHandle(refId);
+ transSendAsync(pThrd->asyncPool, &m->q);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send to register brokenlink", exh);
rpcFreeCont(msg->pCont);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send to register brokenlink", exh);
@@ -1192,7 +1153,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) {
return -1;
}
SExHandle* ex = thandle;
- SSrvConn* pConn = ex->handle;
+ SSvrConn* pConn = ex->handle;
struct sockaddr_in addr = pConn->addr;
pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr);
diff --git a/source/libs/transport/test/transportTests.cpp b/source/libs/transport/test/transportTests.cpp
index a84bd94a00000b9a412b030e223e574a7a5b9794..6c8b30b6e4d5727bd7c0a0f8c6d850fb772262ad 100644
--- a/source/libs/transport/test/transportTests.cpp
+++ b/source/libs/transport/test/transportTests.cpp
@@ -156,80 +156,80 @@ int32_t cloneVal(void *src, void **dst) {
memcpy(*dst, src, sz);
return 0;
}
-TEST_F(TransCtxEnv, mergeTest) {
- int key = 1;
- {
- STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
- transCtxInit(src);
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryMalloc(12);
-
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryMalloc(12);
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- transCtxMerge(ctx, src);
- taosMemoryFree(src);
- }
- EXPECT_EQ(2, taosHashGetSize(ctx->args));
- {
- STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
- transCtxInit(src);
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryMalloc(12);
-
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryMalloc(12);
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- transCtxMerge(ctx, src);
- taosMemoryFree(src);
- }
- std::string val("Hello");
- EXPECT_EQ(4, taosHashGetSize(ctx->args));
- {
- key = 1;
- STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
- transCtxInit(src);
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryCalloc(1, 11);
- val1.clone = cloneVal;
- memcpy(val1.val, val.c_str(), val.size());
-
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- {
- STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
- val1.val = taosMemoryCalloc(1, 11);
- val1.clone = cloneVal;
- memcpy(val1.val, val.c_str(), val.size());
- taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
- key++;
- }
- transCtxMerge(ctx, src);
- taosMemoryFree(src);
- }
- EXPECT_EQ(4, taosHashGetSize(ctx->args));
-
- char *skey = (char *)transCtxDumpVal(ctx, 1);
- EXPECT_EQ(0, strcmp(skey, val.c_str()));
- taosMemoryFree(skey);
-
- skey = (char *)transCtxDumpVal(ctx, 2);
- EXPECT_EQ(0, strcmp(skey, val.c_str()));
-}
+// TEST_F(TransCtxEnv, mergeTest) {
+// int key = 1;
+// {
+// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
+// transCtxInit(src);
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryMalloc(12);
+//
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryMalloc(12);
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// transCtxMerge(ctx, src);
+// taosMemoryFree(src);
+// }
+// EXPECT_EQ(2, taosHashGetSize(ctx->args));
+// {
+// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
+// transCtxInit(src);
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryMalloc(12);
+//
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryMalloc(12);
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// transCtxMerge(ctx, src);
+// taosMemoryFree(src);
+// }
+// std::string val("Hello");
+// EXPECT_EQ(4, taosHashGetSize(ctx->args));
+// {
+// key = 1;
+// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx));
+// transCtxInit(src);
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryCalloc(1, 11);
+// val1.clone = cloneVal;
+// memcpy(val1.val, val.c_str(), val.size());
+//
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// {
+// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree};
+// val1.val = taosMemoryCalloc(1, 11);
+// val1.clone = cloneVal;
+// memcpy(val1.val, val.c_str(), val.size());
+// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1));
+// key++;
+// }
+// transCtxMerge(ctx, src);
+// taosMemoryFree(src);
+// }
+// EXPECT_EQ(4, taosHashGetSize(ctx->args));
+//
+// char *skey = (char *)transCtxDumpVal(ctx, 1);
+// EXPECT_EQ(0, strcmp(skey, val.c_str()));
+// taosMemoryFree(skey);
+//
+// skey = (char *)transCtxDumpVal(ctx, 2);
+// EXPECT_EQ(0, strcmp(skey, val.c_str()));
+//}
#endif
diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c
index d2a43c410708249983295dca44ca06f6f75a2b70..a9a8f8a1f40fbfd6fda3ec43d4fd1bfdf61025dc 100644
--- a/source/libs/wal/src/walWrite.c
+++ b/source/libs/wal/src/walWrite.c
@@ -18,6 +18,14 @@
#include "tchecksum.h"
#include "walInt.h"
+void walRestoreFromSnapshot(SWal *pWal, int64_t ver) {
+ /*pWal->vers.firstVer = -1;*/
+ pWal->vers.lastVer = ver;
+ pWal->vers.commitVer = ver - 1;
+ pWal->vers.snapshotVer = ver - 1;
+ pWal->vers.verInSnapshotting = -1;
+}
+
int32_t walCommit(SWal *pWal, int64_t ver) {
ASSERT(pWal->vers.commitVer >= pWal->vers.snapshotVer);
ASSERT(pWal->vers.commitVer <= pWal->vers.lastVer);
diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt
index b6e131d4ccc670f0d3b35e00483f33f072a314e2..e15627fe6682bb7a94f96d4e7e341a3b3b4c0637 100644
--- a/source/os/CMakeLists.txt
+++ b/source/os/CMakeLists.txt
@@ -10,7 +10,11 @@ target_include_directories(
PUBLIC "${TD_SOURCE_DIR}/contrib/msvcregex"
)
# iconv
-find_path(IconvApiIncludes iconv.h PATHS)
+if(TD_WINDOWS)
+ find_path(IconvApiIncludes iconv.h "${TD_SOURCE_DIR}/contrib/iconv")
+else()
+ find_path(IconvApiIncludes iconv.h PATHS)
+endif(TD_WINDOWS)
if(NOT IconvApiIncludes)
add_definitions(-DDISALLOW_NCHAR_WITHOUT_ICONV)
endif ()
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index c4b7c9386e93fb5fd87d148ff1d3e369d9871de2..cfb7b8a0e255cf32301984f9135f2d4711144d74 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -107,13 +107,14 @@ int32_t taosMkDir(const char *dirname) {
int32_t taosMulMkDir(const char *dirname) {
if (dirname == NULL) return -1;
char temp[1024];
+ char * pos = temp;
+ int32_t code = 0;
#ifdef WINDOWS
taosRealPath(dirname, temp, sizeof(temp));
+ if (temp[1] == ':') pos += 3;
#else
strcpy(temp, dirname);
#endif
- char * pos = temp;
- int32_t code = 0;
if (taosDirExist(temp)) return code;
@@ -203,7 +204,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
wordexp_t full_path;
if (0 != wordexp(dirname, &full_path, 0)) {
- // printf("failed to expand path:%s since %s", dirname, strerror(errno));
+ printf("failed to expand path:%s since %s", dirname, strerror(errno));
wordfree(&full_path);
return -1;
}
diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c
index 6746025f78be619868e53267588f8f4defe1d5cb..6ae3d8a0c0d655ae6be8bf1a23b36309962b7a65 100644
--- a/source/os/src/osEnv.c
+++ b/source/os/src/osEnv.c
@@ -70,11 +70,11 @@ void osDefaultInit() {
#elif defined(_TD_DARWIN_64)
if (configDir[0] == 0) {
- strcpy(configDir, "/tmp/taosd");
+ strcpy(configDir, "/usr/local/etc/taos");
}
strcpy(tsDataDir, "/usr/local/var/lib/taos");
strcpy(tsLogDir, "/usr/local/var/log/taos");
- strcpy(tsTempDir, "/usr/local/etc/taos");
+ strcpy(tsTempDir, "/tmp/taosd");
strcpy(tsOsName, "Darwin");
#else
diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c
index e08b6681630e68d3985df19f18994890cc5abf83..c75cca79f6b82e2989b7199068db297c7b91a1eb 100644
--- a/source/os/src/osFile.c
+++ b/source/os/src/osFile.c
@@ -69,7 +69,6 @@ void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, cha
}
strcpy(tmpPath + len, tdengineTmpFileNamePrefix);
- strcat(tmpPath, tdengineTmpFileNamePrefix);
if (strlen(tmpPath) + strlen(fileNamePrefix) + strlen("-%d-%s") < PATH_MAX) {
strcat(tmpPath, fileNamePrefix);
strcat(tmpPath, "-%d-%s");
diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c
index d4cfe4fc39a83586a10e7b70b06c22f8e9066bb7..3b68073c7eba39fbb5434144d06757507f37a559 100644
--- a/source/os/src/osSemaphore.c
+++ b/source/os/src/osSemaphore.c
@@ -50,10 +50,15 @@ int32_t taosGetAppName(char* name, int32_t* len) {
if (sub != NULL) {
*sub = '\0';
}
- strcpy(name, filepath);
+ char* end = strrchr(filepath, TD_DIRSEP[0]);
+ if (end == NULL) {
+ end = filepath;
+ }
+
+ strcpy(name, end);
if (len != NULL) {
- *len = (int32_t)strlen(filepath);
+ *len = (int32_t)strlen(end);
}
return 0;
diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c
index 572e2db6fdf8c9045a95ef4b7c9fbcf014f9784b..4a0d9e286629dfb4c788eb489ab41f9c6802d831 100644
--- a/source/os/src/osSocket.c
+++ b/source/os/src/osSocket.c
@@ -889,11 +889,11 @@ uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
#ifdef WINDOWS
// Initialize Winsock
WSADATA wsaData;
- int iResult;
+ int iResult;
iResult = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (iResult != 0) {
- printf("WSAStartup failed: %d\n", iResult);
- return 1;
+ // printf("WSAStartup failed: %d\n", iResult);
+ return 1;
}
#endif
struct addrinfo hints = {0};
@@ -913,12 +913,12 @@ uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
} else {
#ifdef EAI_SYSTEM
if (ret == EAI_SYSTEM) {
- printf("failed to get the ip address, fqdn:%s, errno:%d, since:%s", fqdn, errno, strerror(errno));
+ // printf("failed to get the ip address, fqdn:%s, errno:%d, since:%s", fqdn, errno, strerror(errno));
} else {
- printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
+ // printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
}
#else
- printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
+ // printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
#endif
return 0xFFFFFFFF;
}
@@ -928,7 +928,7 @@ int32_t taosGetFqdn(char *fqdn) {
char hostname[1024];
hostname[1023] = '\0';
if (gethostname(hostname, 1023) == -1) {
- printf("failed to get hostname, reason:%s", strerror(errno));
+ // printf("failed to get hostname, reason:%s", strerror(errno));
assert(0);
return -1;
}
@@ -946,7 +946,7 @@ int32_t taosGetFqdn(char *fqdn) {
#endif // __APPLE__
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
if (!result) {
- printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
+ // printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
assert(0);
return -1;
}
@@ -993,9 +993,7 @@ void tinet_ntoa(char *ipstr, uint32_t ip) {
sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
}
-void taosIgnSIGPIPE() {
- signal(SIGPIPE, SIG_IGN);
-}
+void taosIgnSIGPIPE() { signal(SIGPIPE, SIG_IGN); }
void taosSetMaskSIGPIPE() {
#ifdef WINDOWS
diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c
index 872d8e740c651cd494efbe5c783390637e95efe7..dc9527c2f2a052cf2d87b6b6c374019139390beb 100644
--- a/source/os/src/osTimezone.c
+++ b/source/os/src/osTimezone.c
@@ -32,6 +32,700 @@
#pragma warning(disable : 4091)
#include
#pragma warning(pop)
+
+char *win_tz[139][2]={{"China Standard Time", "Asia/Shanghai"},
+ {"AUS Central Standard Time", "Australia/Darwin"},
+ {"AUS Eastern Standard Time", "Australia/Sydney"},
+ {"Afghanistan Standard Time", "Asia/Kabul"},
+ {"Alaskan Standard Time", "America/Anchorage"},
+ {"Aleutian Standard Time", "America/Adak"},
+ {"Altai Standard Time", "Asia/Barnaul"},
+ {"Arab Standard Time", "Asia/Riyadh"},
+ {"Arabian Standard Time", "Asia/Dubai"},
+ {"Arabic Standard Time", "Asia/Baghdad"},
+ {"Argentina Standard Time", "America/Buenos_Aires"},
+ {"Astrakhan Standard Time", "Europe/Astrakhan"},
+ {"Atlantic Standard Time", "America/Halifax"},
+ {"Aus Central W. Standard Time", "Australia/Eucla"},
+ {"Azerbaijan Standard Time", "Asia/Baku"},
+ {"Azores Standard Time", "Atlantic/Azores"},
+ {"Bahia Standard Time", "America/Bahia"},
+ {"Bangladesh Standard Time", "Asia/Dhaka"},
+ {"Belarus Standard Time", "Europe/Minsk"},
+ {"Bougainville Standard Time", "Pacific/Bougainville"},
+ {"Canada Central Standard Time", "America/Regina"},
+ {"Cape Verde Standard Time", "Atlantic/Cape_Verde"},
+ {"Caucasus Standard Time", "Asia/Yerevan"},
+ {"Cen. Australia Standard Time", "Australia/Adelaide"},
+ {"Central America Standard Time", "America/Guatemala"},
+ {"Central Asia Standard Time", "Asia/Almaty"},
+ {"Central Brazilian Standard Time", "America/Cuiaba"},
+ {"Central Europe Standard Time", "Europe/Budapest"},
+ {"Central European Standard Time", "Europe/Warsaw"},
+ {"Central Pacific Standard Time", "Pacific/Guadalcanal"},
+ {"Central Standard Time", "America/Chicago"},
+ {"Central Standard Time (Mexico)", "America/Mexico_City"},
+ {"Chatham Islands Standard Time", "Pacific/Chatham"},
+ {"Cuba Standard Time", "America/Havana"},
+ {"Dateline Standard Time", "Etc/GMT+12"},
+ {"E. Africa Standard Time", "Africa/Nairobi"},
+ {"E. Australia Standard Time", "Australia/Brisbane"},
+ {"E. Europe Standard Time", "Europe/Chisinau"},
+ {"E. South America Standard Time", "America/Sao_Paulo"},
+ {"Easter Island Standard Time", "Pacific/Easter"},
+ {"Eastern Standard Time", "America/New_York"},
+ {"Eastern Standard Time (Mexico)", "America/Cancun"},
+ {"Egypt Standard Time", "Africa/Cairo"},
+ {"Ekaterinburg Standard Time", "Asia/Yekaterinburg"},
+ {"FLE Standard Time", "Europe/Kiev"},
+ {"Fiji Standard Time", "Pacific/Fiji"},
+ {"GMT Standard Time", "Europe/London"},
+ {"GTB Standard Time", "Europe/Bucharest"},
+ {"Georgian Standard Time", "Asia/Tbilisi"},
+ {"Greenland Standard Time", "America/Godthab"},
+ {"Greenwich Standard Time", "Atlantic/Reykjavik"},
+ {"Haiti Standard Time", "America/Port-au-Prince"},
+ {"Hawaiian Standard Time", "Pacific/Honolulu"},
+ {"India Standard Time", "Asia/Calcutta"},
+ {"Iran Standard Time", "Asia/Tehran"},
+ {"Israel Standard Time", "Asia/Jerusalem"},
+ {"Jordan Standard Time", "Asia/Amman"},
+ {"Kaliningrad Standard Time", "Europe/Kaliningrad"},
+ {"Korea Standard Time", "Asia/Seoul"},
+ {"Libya Standard Time", "Africa/Tripoli"},
+ {"Line Islands Standard Time", "Pacific/Kiritimati"},
+ {"Lord Howe Standard Time", "Australia/Lord_Howe"},
+ {"Magadan Standard Time", "Asia/Magadan"},
+ {"Magallanes Standard Time", "America/Punta_Arenas"},
+ {"Marquesas Standard Time", "Pacific/Marquesas"},
+ {"Mauritius Standard Time", "Indian/Mauritius"},
+ {"Middle East Standard Time", "Asia/Beirut"},
+ {"Montevideo Standard Time", "America/Montevideo"},
+ {"Morocco Standard Time", "Africa/Casablanca"},
+ {"Mountain Standard Time", "America/Denver"},
+ {"Mountain Standard Time (Mexico)", "America/Chihuahua"},
+ {"Myanmar Standard Time", "Asia/Rangoon"},
+ {"N. Central Asia Standard Time", "Asia/Novosibirsk"},
+ {"Namibia Standard Time", "Africa/Windhoek"},
+ {"Nepal Standard Time", "Asia/Katmandu"},
+ {"New Zealand Standard Time", "Pacific/Auckland"},
+ {"Newfoundland Standard Time", "America/St_Johns"},
+ {"Norfolk Standard Time", "Pacific/Norfolk"},
+ {"North Asia East Standard Time", "Asia/Irkutsk"},
+ {"North Asia Standard Time", "Asia/Krasnoyarsk"},
+ {"North Korea Standard Time", "Asia/Pyongyang"},
+ {"Omsk Standard Time", "Asia/Omsk"},
+ {"Pacific SA Standard Time", "America/Santiago"},
+ {"Pacific Standard Time", "America/Los_Angeles"},
+ {"Pacific Standard Time (Mexico)", "America/Tijuana"},
+ {"Pakistan Standard Time", "Asia/Karachi"},
+ {"Paraguay Standard Time", "America/Asuncion"},
+ {"Qyzylorda Standard Time", "Asia/Qyzylorda"},
+ {"Romance Standard Time", "Europe/Paris"},
+ {"Russia Time Zone 10", "Asia/Srednekolymsk"},
+ {"Russia Time Zone 11", "Asia/Kamchatka"},
+ {"Russia Time Zone 3", "Europe/Samara"},
+ {"Russian Standard Time", "Europe/Moscow"},
+ {"SA Eastern Standard Time", "America/Cayenne"},
+ {"SA Pacific Standard Time", "America/Bogota"},
+ {"SA Western Standard Time", "America/La_Paz"},
+ {"SE Asia Standard Time", "Asia/Bangkok"},
+ {"Saint Pierre Standard Time", "America/Miquelon"},
+ {"Sakhalin Standard Time", "Asia/Sakhalin"},
+ {"Samoa Standard Time", "Pacific/Apia"},
+ {"Sao Tome Standard Time", "Africa/Sao_Tome"},
+ {"Saratov Standard Time", "Europe/Saratov"},
+ {"Singapore Standard Time", "Asia/Singapore"},
+ {"South Africa Standard Time", "Africa/Johannesburg"},
+ {"South Sudan Standard Time", "Africa/Juba"},
+ {"Sri Lanka Standard Time", "Asia/Colombo"},
+ {"Sudan Standard Time", "Africa/Khartoum"},
+ {"Syria Standard Time", "Asia/Damascus"},
+ {"Taipei Standard Time", "Asia/Taipei"},
+ {"Tasmania Standard Time", "Australia/Hobart"},
+ {"Tocantins Standard Time", "America/Araguaina"},
+ {"Tokyo Standard Time", "Asia/Tokyo"},
+ {"Tomsk Standard Time", "Asia/Tomsk"},
+ {"Tonga Standard Time", "Pacific/Tongatapu"},
+ {"Transbaikal Standard Time", "Asia/Chita"},
+ {"Turkey Standard Time", "Europe/Istanbul"},
+ {"Turks And Caicos Standard Time", "America/Grand_Turk"},
+ {"US Eastern Standard Time", "America/Indianapolis"},
+ {"US Mountain Standard Time", "America/Phoenix"},
+ {"UTC", "Etc/UTC"},
+ {"UTC+12", "Etc/GMT-12"},
+ {"UTC+13", "Etc/GMT-13"},
+ {"UTC-02", "Etc/GMT+2"},
+ {"UTC-08", "Etc/GMT+8"},
+ {"UTC-09", "Etc/GMT+9"},
+ {"UTC-11", "Etc/GMT+11"},
+ {"Ulaanbaatar Standard Time", "Asia/Ulaanbaatar"},
+ {"Venezuela Standard Time", "America/Caracas"},
+ {"Vladivostok Standard Time", "Asia/Vladivostok"},
+ {"Volgograd Standard Time", "Europe/Volgograd"},
+ {"W. Australia Standard Time", "Australia/Perth"},
+ {"W. Central Africa Standard Time", "Africa/Lagos"},
+ {"W. Europe Standard Time", "Europe/Berlin"},
+ {"W. Mongolia Standard Time", "Asia/Hovd"},
+ {"West Asia Standard Time", "Asia/Tashkent"},
+ {"West Bank Standard Time", "Asia/Hebron"},
+ {"West Pacific Standard Time", "Pacific/Port_Moresby"},
+ {"Yakutsk Standard Time", "Asia/Yakutsk"},
+ {"Yukon Standard Time", "America/Whitehorse"}};
+char *tz_win[554][2]={{"Asia/Shanghai", "China Standard Time"},
+{"Africa/Abidjan", "Greenwich Standard Time"},
+{"Africa/Accra", "Greenwich Standard Time"},
+{"Africa/Addis_Ababa", "E. Africa Standard Time"},
+{"Africa/Algiers", "W. Central Africa Standard Time"},
+{"Africa/Asmera", "E. Africa Standard Time"},
+{"Africa/Bamako", "Greenwich Standard Time"},
+{"Africa/Bangui", "W. Central Africa Standard Time"},
+{"Africa/Banjul", "Greenwich Standard Time"},
+{"Africa/Bissau", "Greenwich Standard Time"},
+{"Africa/Blantyre", "South Africa Standard Time"},
+{"Africa/Brazzaville", "W. Central Africa Standard Time"},
+{"Africa/Bujumbura", "South Africa Standard Time"},
+{"Africa/Cairo", "Egypt Standard Time"},
+{"Africa/Casablanca", "Morocco Standard Time"},
+{"Africa/Ceuta", "Romance Standard Time"},
+{"Africa/Conakry", "Greenwich Standard Time"},
+{"Africa/Dakar", "Greenwich Standard Time"},
+{"Africa/Dar_es_Salaam", "E. Africa Standard Time"},
+{"Africa/Djibouti", "E. Africa Standard Time"},
+{"Africa/Douala", "W. Central Africa Standard Time"},
+{"Africa/El_Aaiun", "Morocco Standard Time"},
+{"Africa/Freetown", "Greenwich Standard Time"},
+{"Africa/Gaborone", "South Africa Standard Time"},
+{"Africa/Harare", "South Africa Standard Time"},
+{"Africa/Johannesburg", "South Africa Standard Time"},
+{"Africa/Juba", "South Sudan Standard Time"},
+{"Africa/Kampala", "E. Africa Standard Time"},
+{"Africa/Khartoum", "Sudan Standard Time"},
+{"Africa/Kigali", "South Africa Standard Time"},
+{"Africa/Kinshasa", "W. Central Africa Standard Time"},
+{"Africa/Lagos", "W. Central Africa Standard Time"},
+{"Africa/Libreville", "W. Central Africa Standard Time"},
+{"Africa/Lome", "Greenwich Standard Time"},
+{"Africa/Luanda", "W. Central Africa Standard Time"},
+{"Africa/Lubumbashi", "South Africa Standard Time"},
+{"Africa/Lusaka", "South Africa Standard Time"},
+{"Africa/Malabo", "W. Central Africa Standard Time"},
+{"Africa/Maputo", "South Africa Standard Time"},
+{"Africa/Maseru", "South Africa Standard Time"},
+{"Africa/Mbabane", "South Africa Standard Time"},
+{"Africa/Mogadishu", "E. Africa Standard Time"},
+{"Africa/Monrovia", "Greenwich Standard Time"},
+{"Africa/Nairobi", "E. Africa Standard Time"},
+{"Africa/Ndjamena", "W. Central Africa Standard Time"},
+{"Africa/Niamey", "W. Central Africa Standard Time"},
+{"Africa/Nouakchott", "Greenwich Standard Time"},
+{"Africa/Ouagadougou", "Greenwich Standard Time"},
+{"Africa/Porto-Novo", "W. Central Africa Standard Time"},
+{"Africa/Sao_Tome", "Sao Tome Standard Time"},
+{"Africa/Timbuktu", "Greenwich Standard Time"},
+{"Africa/Tripoli", "Libya Standard Time"},
+{"Africa/Tunis", "W. Central Africa Standard Time"},
+{"Africa/Windhoek", "Namibia Standard Time"},
+{"America/Adak", "Aleutian Standard Time"},
+{"America/Anchorage", "Alaskan Standard Time"},
+{"America/Anguilla", "SA Western Standard Time"},
+{"America/Antigua", "SA Western Standard Time"},
+{"America/Araguaina", "Tocantins Standard Time"},
+{"America/Argentina/La_Rioja", "Argentina Standard Time"},
+{"America/Argentina/Rio_Gallegos", "Argentina Standard Time"},
+{"America/Argentina/Salta", "Argentina Standard Time"},
+{"America/Argentina/San_Juan", "Argentina Standard Time"},
+{"America/Argentina/San_Luis", "Argentina Standard Time"},
+{"America/Argentina/Tucuman", "Argentina Standard Time"},
+{"America/Argentina/Ushuaia", "Argentina Standard Time"},
+{"America/Aruba", "SA Western Standard Time"},
+{"America/Asuncion", "Paraguay Standard Time"},
+{"America/Atka", "Aleutian Standard Time"},
+{"America/Bahia", "Bahia Standard Time"},
+{"America/Bahia_Banderas", "Central Standard Time (Mexico)"},
+{"America/Barbados", "SA Western Standard Time"},
+{"America/Belem", "SA Eastern Standard Time"},
+{"America/Belize", "Central America Standard Time"},
+{"America/Blanc-Sablon", "SA Western Standard Time"},
+{"America/Boa_Vista", "SA Western Standard Time"},
+{"America/Bogota", "SA Pacific Standard Time"},
+{"America/Boise", "Mountain Standard Time"},
+{"America/Buenos_Aires", "Argentina Standard Time"},
+{"America/Cambridge_Bay", "Mountain Standard Time"},
+{"America/Campo_Grande", "Central Brazilian Standard Time"},
+{"America/Cancun", "Eastern Standard Time (Mexico)"},
+{"America/Caracas", "Venezuela Standard Time"},
+{"America/Catamarca", "Argentina Standard Time"},
+{"America/Cayenne", "SA Eastern Standard Time"},
+{"America/Cayman", "SA Pacific Standard Time"},
+{"America/Chicago", "Central Standard Time"},
+{"America/Chihuahua", "Mountain Standard Time (Mexico)"},
+{"America/Coral_Harbour", "SA Pacific Standard Time"},
+{"America/Cordoba", "Argentina Standard Time"},
+{"America/Costa_Rica", "Central America Standard Time"},
+{"America/Creston", "US Mountain Standard Time"},
+{"America/Cuiaba", "Central Brazilian Standard Time"},
+{"America/Curacao", "SA Western Standard Time"},
+{"America/Danmarkshavn", "Greenwich Standard Time"},
+{"America/Dawson", "Yukon Standard Time"},
+{"America/Dawson_Creek", "US Mountain Standard Time"},
+{"America/Denver", "Mountain Standard Time"},
+{"America/Detroit", "Eastern Standard Time"},
+{"America/Dominica", "SA Western Standard Time"},
+{"America/Edmonton", "Mountain Standard Time"},
+{"America/Eirunepe", "SA Pacific Standard Time"},
+{"America/El_Salvador", "Central America Standard Time"},
+{"America/Ensenada", "Pacific Standard Time (Mexico)"},
+{"America/Fort_Nelson", "US Mountain Standard Time"},
+{"America/Fortaleza", "SA Eastern Standard Time"},
+{"America/Glace_Bay", "Atlantic Standard Time"},
+{"America/Godthab", "Greenland Standard Time"},
+{"America/Goose_Bay", "Atlantic Standard Time"},
+{"America/Grand_Turk", "Turks And Caicos Standard Time"},
+{"America/Grenada", "SA Western Standard Time"},
+{"America/Guadeloupe", "SA Western Standard Time"},
+{"America/Guatemala", "Central America Standard Time"},
+{"America/Guayaquil", "SA Pacific Standard Time"},
+{"America/Guyana", "SA Western Standard Time"},
+{"America/Halifax", "Atlantic Standard Time"},
+{"America/Havana", "Cuba Standard Time"},
+{"America/Hermosillo", "US Mountain Standard Time"},
+{"America/Indiana/Knox", "Central Standard Time"},
+{"America/Indiana/Marengo", "US Eastern Standard Time"},
+{"America/Indiana/Petersburg", "Eastern Standard Time"},
+{"America/Indiana/Tell_City", "Central Standard Time"},
+{"America/Indiana/Vevay", "US Eastern Standard Time"},
+{"America/Indiana/Vincennes", "Eastern Standard Time"},
+{"America/Indiana/Winamac", "Eastern Standard Time"},
+{"America/Indianapolis", "US Eastern Standard Time"},
+{"America/Inuvik", "Mountain Standard Time"},
+{"America/Iqaluit", "Eastern Standard Time"},
+{"America/Jamaica", "SA Pacific Standard Time"},
+{"America/Jujuy", "Argentina Standard Time"},
+{"America/Juneau", "Alaskan Standard Time"},
+{"America/Kentucky/Monticello", "Eastern Standard Time"},
+{"America/Knox_IN", "Central Standard Time"},
+{"America/Kralendijk", "SA Western Standard Time"},
+{"America/La_Paz", "SA Western Standard Time"},
+{"America/Lima", "SA Pacific Standard Time"},
+{"America/Los_Angeles", "Pacific Standard Time"},
+{"America/Louisville", "Eastern Standard Time"},
+{"America/Lower_Princes", "SA Western Standard Time"},
+{"America/Maceio", "SA Eastern Standard Time"},
+{"America/Managua", "Central America Standard Time"},
+{"America/Manaus", "SA Western Standard Time"},
+{"America/Marigot", "SA Western Standard Time"},
+{"America/Martinique", "SA Western Standard Time"},
+{"America/Matamoros", "Central Standard Time"},
+{"America/Mazatlan", "Mountain Standard Time (Mexico)"},
+{"America/Mendoza", "Argentina Standard Time"},
+{"America/Menominee", "Central Standard Time"},
+{"America/Merida", "Central Standard Time (Mexico)"},
+{"America/Metlakatla", "Alaskan Standard Time"},
+{"America/Mexico_City", "Central Standard Time (Mexico)"},
+{"America/Miquelon", "Saint Pierre Standard Time"},
+{"America/Moncton", "Atlantic Standard Time"},
+{"America/Monterrey", "Central Standard Time (Mexico)"},
+{"America/Montevideo", "Montevideo Standard Time"},
+{"America/Montreal", "Eastern Standard Time"},
+{"America/Montserrat", "SA Western Standard Time"},
+{"America/Nassau", "Eastern Standard Time"},
+{"America/New_York", "Eastern Standard Time"},
+{"America/Nipigon", "Eastern Standard Time"},
+{"America/Nome", "Alaskan Standard Time"},
+{"America/Noronha", "UTC-02"},
+{"America/North_Dakota/Beulah", "Central Standard Time"},
+{"America/North_Dakota/Center", "Central Standard Time"},
+{"America/North_Dakota/New_Salem", "Central Standard Time"},
+{"America/Ojinaga", "Mountain Standard Time"},
+{"America/Panama", "SA Pacific Standard Time"},
+{"America/Pangnirtung", "Eastern Standard Time"},
+{"America/Paramaribo", "SA Eastern Standard Time"},
+{"America/Phoenix", "US Mountain Standard Time"},
+{"America/Port-au-Prince", "Haiti Standard Time"},
+{"America/Port_of_Spain", "SA Western Standard Time"},
+{"America/Porto_Acre", "SA Pacific Standard Time"},
+{"America/Porto_Velho", "SA Western Standard Time"},
+{"America/Puerto_Rico", "SA Western Standard Time"},
+{"America/Punta_Arenas", "Magallanes Standard Time"},
+{"America/Rainy_River", "Central Standard Time"},
+{"America/Rankin_Inlet", "Central Standard Time"},
+{"America/Recife", "SA Eastern Standard Time"},
+{"America/Regina", "Canada Central Standard Time"},
+{"America/Resolute", "Central Standard Time"},
+{"America/Rio_Branco", "SA Pacific Standard Time"},
+{"America/Santa_Isabel", "Pacific Standard Time (Mexico)"},
+{"America/Santarem", "SA Eastern Standard Time"},
+{"America/Santiago", "Pacific SA Standard Time"},
+{"America/Santo_Domingo", "SA Western Standard Time"},
+{"America/Sao_Paulo", "E. South America Standard Time"},
+{"America/Scoresbysund", "Azores Standard Time"},
+{"America/Shiprock", "Mountain Standard Time"},
+{"America/Sitka", "Alaskan Standard Time"},
+{"America/St_Barthelemy", "SA Western Standard Time"},
+{"America/St_Johns", "Newfoundland Standard Time"},
+{"America/St_Kitts", "SA Western Standard Time"},
+{"America/St_Lucia", "SA Western Standard Time"},
+{"America/St_Thomas", "SA Western Standard Time"},
+{"America/St_Vincent", "SA Western Standard Time"},
+{"America/Swift_Current", "Canada Central Standard Time"},
+{"America/Tegucigalpa", "Central America Standard Time"},
+{"America/Thule", "Atlantic Standard Time"},
+{"America/Thunder_Bay", "Eastern Standard Time"},
+{"America/Tijuana", "Pacific Standard Time (Mexico)"},
+{"America/Toronto", "Eastern Standard Time"},
+{"America/Tortola", "SA Western Standard Time"},
+{"America/Vancouver", "Pacific Standard Time"},
+{"America/Virgin", "SA Western Standard Time"},
+{"America/Whitehorse", "Yukon Standard Time"},
+{"America/Winnipeg", "Central Standard Time"},
+{"America/Yakutat", "Alaskan Standard Time"},
+{"America/Yellowknife", "Mountain Standard Time"},
+{"Antarctica/Casey", "Central Pacific Standard Time"},
+{"Antarctica/Davis", "SE Asia Standard Time"},
+{"Antarctica/DumontDUrville", "West Pacific Standard Time"},
+{"Antarctica/Macquarie", "Tasmania Standard Time"},
+{"Antarctica/Mawson", "West Asia Standard Time"},
+{"Antarctica/McMurdo", "New Zealand Standard Time"},
+{"Antarctica/Palmer", "SA Eastern Standard Time"},
+{"Antarctica/Rothera", "SA Eastern Standard Time"},
+{"Antarctica/South_Pole", "New Zealand Standard Time"},
+{"Antarctica/Syowa", "E. Africa Standard Time"},
+{"Antarctica/Vostok", "Central Asia Standard Time"},
+{"Arctic/Longyearbyen", "W. Europe Standard Time"},
+{"Asia/Aden", "Arab Standard Time"},
+{"Asia/Almaty", "Central Asia Standard Time"},
+{"Asia/Amman", "Jordan Standard Time"},
+{"Asia/Anadyr", "Russia Time Zone 11"},
+{"Asia/Aqtau", "West Asia Standard Time"},
+{"Asia/Aqtobe", "West Asia Standard Time"},
+{"Asia/Ashgabat", "West Asia Standard Time"},
+{"Asia/Ashkhabad", "West Asia Standard Time"},
+{"Asia/Atyrau", "West Asia Standard Time"},
+{"Asia/Baghdad", "Arabic Standard Time"},
+{"Asia/Bahrain", "Arab Standard Time"},
+{"Asia/Baku", "Azerbaijan Standard Time"},
+{"Asia/Bangkok", "SE Asia Standard Time"},
+{"Asia/Barnaul", "Altai Standard Time"},
+{"Asia/Beirut", "Middle East Standard Time"},
+{"Asia/Bishkek", "Central Asia Standard Time"},
+{"Asia/Brunei", "Singapore Standard Time"},
+{"Asia/Calcutta", "India Standard Time"},
+{"Asia/Chita", "Transbaikal Standard Time"},
+{"Asia/Choibalsan", "Ulaanbaatar Standard Time"},
+{"Asia/Chongqing", "China Standard Time"},
+{"Asia/Chungking", "China Standard Time"},
+{"Asia/Colombo", "Sri Lanka Standard Time"},
+{"Asia/Dacca", "Bangladesh Standard Time"},
+{"Asia/Damascus", "Syria Standard Time"},
+{"Asia/Dhaka", "Bangladesh Standard Time"},
+{"Asia/Dili", "Tokyo Standard Time"},
+{"Asia/Dubai", "Arabian Standard Time"},
+{"Asia/Dushanbe", "West Asia Standard Time"},
+{"Asia/Famagusta", "GTB Standard Time"},
+{"Asia/Gaza", "West Bank Standard Time"},
+{"Asia/Harbin", "China Standard Time"},
+{"Asia/Hebron", "West Bank Standard Time"},
+{"Asia/Hong_Kong", "China Standard Time"},
+{"Asia/Hovd", "W. Mongolia Standard Time"},
+{"Asia/Irkutsk", "North Asia East Standard Time"},
+{"Asia/Jakarta", "SE Asia Standard Time"},
+{"Asia/Jayapura", "Tokyo Standard Time"},
+{"Asia/Jerusalem", "Israel Standard Time"},
+{"Asia/Kabul", "Afghanistan Standard Time"},
+{"Asia/Kamchatka", "Russia Time Zone 11"},
+{"Asia/Karachi", "Pakistan Standard Time"},
+{"Asia/Kashgar", "Central Asia Standard Time"},
+{"Asia/Katmandu", "Nepal Standard Time"},
+{"Asia/Khandyga", "Yakutsk Standard Time"},
+{"Asia/Krasnoyarsk", "North Asia Standard Time"},
+{"Asia/Kuala_Lumpur", "Singapore Standard Time"},
+{"Asia/Kuching", "Singapore Standard Time"},
+{"Asia/Kuwait", "Arab Standard Time"},
+{"Asia/Macao", "China Standard Time"},
+{"Asia/Macau", "China Standard Time"},
+{"Asia/Magadan", "Magadan Standard Time"},
+{"Asia/Makassar", "Singapore Standard Time"},
+{"Asia/Manila", "Singapore Standard Time"},
+{"Asia/Muscat", "Arabian Standard Time"},
+{"Asia/Nicosia", "GTB Standard Time"},
+{"Asia/Novokuznetsk", "North Asia Standard Time"},
+{"Asia/Novosibirsk", "N. Central Asia Standard Time"},
+{"Asia/Omsk", "Omsk Standard Time"},
+{"Asia/Oral", "West Asia Standard Time"},
+{"Asia/Phnom_Penh", "SE Asia Standard Time"},
+{"Asia/Pontianak", "SE Asia Standard Time"},
+{"Asia/Pyongyang", "North Korea Standard Time"},
+{"Asia/Qatar", "Arab Standard Time"},
+{"Asia/Qostanay", "Central Asia Standard Time"},
+{"Asia/Qyzylorda", "Qyzylorda Standard Time"},
+{"Asia/Rangoon", "Myanmar Standard Time"},
+{"Asia/Riyadh", "Arab Standard Time"},
+{"Asia/Saigon", "SE Asia Standard Time"},
+{"Asia/Sakhalin", "Sakhalin Standard Time"},
+{"Asia/Samarkand", "West Asia Standard Time"},
+{"Asia/Seoul", "Korea Standard Time"},
+{"Asia/Singapore", "Singapore Standard Time"},
+{"Asia/Srednekolymsk", "Russia Time Zone 10"},
+{"Asia/Taipei", "Taipei Standard Time"},
+{"Asia/Tashkent", "West Asia Standard Time"},
+{"Asia/Tbilisi", "Georgian Standard Time"},
+{"Asia/Tehran", "Iran Standard Time"},
+{"Asia/Tel_Aviv", "Israel Standard Time"},
+{"Asia/Thimbu", "Bangladesh Standard Time"},
+{"Asia/Thimphu", "Bangladesh Standard Time"},
+{"Asia/Tokyo", "Tokyo Standard Time"},
+{"Asia/Tomsk", "Tomsk Standard Time"},
+{"Asia/Ujung_Pandang", "Singapore Standard Time"},
+{"Asia/Ulaanbaatar", "Ulaanbaatar Standard Time"},
+{"Asia/Ulan_Bator", "Ulaanbaatar Standard Time"},
+{"Asia/Urumqi", "Central Asia Standard Time"},
+{"Asia/Ust-Nera", "Vladivostok Standard Time"},
+{"Asia/Vientiane", "SE Asia Standard Time"},
+{"Asia/Vladivostok", "Vladivostok Standard Time"},
+{"Asia/Yakutsk", "Yakutsk Standard Time"},
+{"Asia/Yekaterinburg", "Ekaterinburg Standard Time"},
+{"Asia/Yerevan", "Caucasus Standard Time"},
+{"Atlantic/Azores", "Azores Standard Time"},
+{"Atlantic/Bermuda", "Atlantic Standard Time"},
+{"Atlantic/Canary", "GMT Standard Time"},
+{"Atlantic/Cape_Verde", "Cape Verde Standard Time"},
+{"Atlantic/Faeroe", "GMT Standard Time"},
+{"Atlantic/Jan_Mayen", "W. Europe Standard Time"},
+{"Atlantic/Madeira", "GMT Standard Time"},
+{"Atlantic/Reykjavik", "Greenwich Standard Time"},
+{"Atlantic/South_Georgia", "UTC-02"},
+{"Atlantic/St_Helena", "Greenwich Standard Time"},
+{"Atlantic/Stanley", "SA Eastern Standard Time"},
+{"Australia/ACT", "AUS Eastern Standard Time"},
+{"Australia/Adelaide", "Cen. Australia Standard Time"},
+{"Australia/Brisbane", "E. Australia Standard Time"},
+{"Australia/Broken_Hill", "Cen. Australia Standard Time"},
+{"Australia/Canberra", "AUS Eastern Standard Time"},
+{"Australia/Currie", "Tasmania Standard Time"},
+{"Australia/Darwin", "AUS Central Standard Time"},
+{"Australia/Eucla", "Aus Central W. Standard Time"},
+{"Australia/Hobart", "Tasmania Standard Time"},
+{"Australia/LHI", "Lord Howe Standard Time"},
+{"Australia/Lindeman", "E. Australia Standard Time"},
+{"Australia/Lord_Howe", "Lord Howe Standard Time"},
+{"Australia/Melbourne", "AUS Eastern Standard Time"},
+{"Australia/NSW", "AUS Eastern Standard Time"},
+{"Australia/North", "AUS Central Standard Time"},
+{"Australia/Perth", "W. Australia Standard Time"},
+{"Australia/Queensland", "E. Australia Standard Time"},
+{"Australia/South", "Cen. Australia Standard Time"},
+{"Australia/Sydney", "AUS Eastern Standard Time"},
+{"Australia/Tasmania", "Tasmania Standard Time"},
+{"Australia/Victoria", "AUS Eastern Standard Time"},
+{"Australia/West", "W. Australia Standard Time"},
+{"Australia/Yancowinna", "Cen. Australia Standard Time"},
+{"Brazil/Acre", "SA Pacific Standard Time"},
+{"Brazil/DeNoronha", "UTC-02"},
+{"Brazil/East", "E. South America Standard Time"},
+{"Brazil/West", "SA Western Standard Time"},
+{"CST6CDT", "Central Standard Time"},
+{"Canada/Atlantic", "Atlantic Standard Time"},
+{"Canada/Central", "Central Standard Time"},
+{"Canada/Eastern", "Eastern Standard Time"},
+{"Canada/Mountain", "Mountain Standard Time"},
+{"Canada/Newfoundland", "Newfoundland Standard Time"},
+{"Canada/Pacific", "Pacific Standard Time"},
+{"Canada/Saskatchewan", "Canada Central Standard Time"},
+{"Canada/Yukon", "Yukon Standard Time"},
+{"Chile/Continental", "Pacific SA Standard Time"},
+{"Chile/EasterIsland", "Easter Island Standard Time"},
+{"Cuba", "Cuba Standard Time"},
+{"EST5EDT", "Eastern Standard Time"},
+{"Egypt", "Egypt Standard Time"},
+{"Eire", "GMT Standard Time"},
+{"Etc/GMT", "UTC"},
+{"Etc/GMT+1", "Cape Verde Standard Time"},
+{"Etc/GMT+10", "Hawaiian Standard Time"},
+{"Etc/GMT+11", "UTC-11"},
+{"Etc/GMT+12", "Dateline Standard Time"},
+{"Etc/GMT+2", "UTC-02"},
+{"Etc/GMT+3", "SA Eastern Standard Time"},
+{"Etc/GMT+4", "SA Western Standard Time"},
+{"Etc/GMT+5", "SA Pacific Standard Time"},
+{"Etc/GMT+6", "Central America Standard Time"},
+{"Etc/GMT+7", "US Mountain Standard Time"},
+{"Etc/GMT+8", "UTC-08"},
+{"Etc/GMT+9", "UTC-09"},
+{"Etc/GMT-1", "W. Central Africa Standard Time"},
+{"Etc/GMT-10", "West Pacific Standard Time"},
+{"Etc/GMT-11", "Central Pacific Standard Time"},
+{"Etc/GMT-12", "UTC+12"},
+{"Etc/GMT-13", "UTC+13"},
+{"Etc/GMT-14", "Line Islands Standard Time"},
+{"Etc/GMT-2", "South Africa Standard Time"},
+{"Etc/GMT-3", "E. Africa Standard Time"},
+{"Etc/GMT-4", "Arabian Standard Time"},
+{"Etc/GMT-5", "West Asia Standard Time"},
+{"Etc/GMT-6", "Central Asia Standard Time"},
+{"Etc/GMT-7", "SE Asia Standard Time"},
+{"Etc/GMT-8", "Singapore Standard Time"},
+{"Etc/GMT-9", "Tokyo Standard Time"},
+{"Etc/UCT", "UTC"},
+{"Etc/UTC", "UTC"},
+{"Europe/Amsterdam", "W. Europe Standard Time"},
+{"Europe/Andorra", "W. Europe Standard Time"},
+{"Europe/Astrakhan", "Astrakhan Standard Time"},
+{"Europe/Athens", "GTB Standard Time"},
+{"Europe/Belfast", "GMT Standard Time"},
+{"Europe/Belgrade", "Central Europe Standard Time"},
+{"Europe/Berlin", "W. Europe Standard Time"},
+{"Europe/Bratislava", "Central Europe Standard Time"},
+{"Europe/Brussels", "Romance Standard Time"},
+{"Europe/Bucharest", "GTB Standard Time"},
+{"Europe/Budapest", "Central Europe Standard Time"},
+{"Europe/Busingen", "W. Europe Standard Time"},
+{"Europe/Chisinau", "E. Europe Standard Time"},
+{"Europe/Copenhagen", "Romance Standard Time"},
+{"Europe/Dublin", "GMT Standard Time"},
+{"Europe/Gibraltar", "W. Europe Standard Time"},
+{"Europe/Guernsey", "GMT Standard Time"},
+{"Europe/Helsinki", "FLE Standard Time"},
+{"Europe/Isle_of_Man", "GMT Standard Time"},
+{"Europe/Istanbul", "Turkey Standard Time"},
+{"Europe/Jersey", "GMT Standard Time"},
+{"Europe/Kaliningrad", "Kaliningrad Standard Time"},
+{"Europe/Kiev", "FLE Standard Time"},
+{"Europe/Kirov", "Russian Standard Time"},
+{"Europe/Lisbon", "GMT Standard Time"},
+{"Europe/Ljubljana", "Central Europe Standard Time"},
+{"Europe/London", "GMT Standard Time"},
+{"Europe/Luxembourg", "W. Europe Standard Time"},
+{"Europe/Madrid", "Romance Standard Time"},
+{"Europe/Malta", "W. Europe Standard Time"},
+{"Europe/Mariehamn", "FLE Standard Time"},
+{"Europe/Minsk", "Belarus Standard Time"},
+{"Europe/Monaco", "W. Europe Standard Time"},
+{"Europe/Moscow", "Russian Standard Time"},
+{"Europe/Oslo", "W. Europe Standard Time"},
+{"Europe/Paris", "Romance Standard Time"},
+{"Europe/Podgorica", "Central Europe Standard Time"},
+{"Europe/Prague", "Central Europe Standard Time"},
+{"Europe/Riga", "FLE Standard Time"},
+{"Europe/Rome", "W. Europe Standard Time"},
+{"Europe/Samara", "Russia Time Zone 3"},
+{"Europe/San_Marino", "W. Europe Standard Time"},
+{"Europe/Sarajevo", "Central European Standard Time"},
+{"Europe/Saratov", "Saratov Standard Time"},
+{"Europe/Simferopol", "Russian Standard Time"},
+{"Europe/Skopje", "Central European Standard Time"},
+{"Europe/Sofia", "FLE Standard Time"},
+{"Europe/Stockholm", "W. Europe Standard Time"},
+{"Europe/Tallinn", "FLE Standard Time"},
+{"Europe/Tirane", "Central Europe Standard Time"},
+{"Europe/Tiraspol", "E. Europe Standard Time"},
+{"Europe/Ulyanovsk", "Astrakhan Standard Time"},
+{"Europe/Uzhgorod", "FLE Standard Time"},
+{"Europe/Vaduz", "W. Europe Standard Time"},
+{"Europe/Vatican", "W. Europe Standard Time"},
+{"Europe/Vienna", "W. Europe Standard Time"},
+{"Europe/Vilnius", "FLE Standard Time"},
+{"Europe/Volgograd", "Volgograd Standard Time"},
+{"Europe/Warsaw", "Central European Standard Time"},
+{"Europe/Zagreb", "Central European Standard Time"},
+{"Europe/Zaporozhye", "FLE Standard Time"},
+{"Europe/Zurich", "W. Europe Standard Time"},
+{"GB", "GMT Standard Time"},
+{"GB-Eire", "GMT Standard Time"},
+{"GMT+0", "UTC"},
+{"GMT-0", "UTC"},
+{"GMT0", "UTC"},
+{"Greenwich", "UTC"},
+{"Hongkong", "China Standard Time"},
+{"Iceland", "Greenwich Standard Time"},
+{"Indian/Antananarivo", "E. Africa Standard Time"},
+{"Indian/Chagos", "Central Asia Standard Time"},
+{"Indian/Christmas", "SE Asia Standard Time"},
+{"Indian/Cocos", "Myanmar Standard Time"},
+{"Indian/Comoro", "E. Africa Standard Time"},
+{"Indian/Kerguelen", "West Asia Standard Time"},
+{"Indian/Mahe", "Mauritius Standard Time"},
+{"Indian/Maldives", "West Asia Standard Time"},
+{"Indian/Mauritius", "Mauritius Standard Time"},
+{"Indian/Mayotte", "E. Africa Standard Time"},
+{"Indian/Reunion", "Mauritius Standard Time"},
+{"Iran", "Iran Standard Time"},
+{"Israel", "Israel Standard Time"},
+{"Jamaica", "SA Pacific Standard Time"},
+{"Japan", "Tokyo Standard Time"},
+{"Kwajalein", "UTC+12"},
+{"Libya", "Libya Standard Time"},
+{"MST7MDT", "Mountain Standard Time"},
+{"Mexico/BajaNorte", "Pacific Standard Time (Mexico)"},
+{"Mexico/BajaSur", "Mountain Standard Time (Mexico)"},
+{"Mexico/General", "Central Standard Time (Mexico)"},
+{"NZ", "New Zealand Standard Time"},
+{"NZ-CHAT", "Chatham Islands Standard Time"},
+{"Navajo", "Mountain Standard Time"},
+{"PRC", "China Standard Time"},
+{"PST8PDT", "Pacific Standard Time"},
+{"Pacific/Apia", "Samoa Standard Time"},
+{"Pacific/Auckland", "New Zealand Standard Time"},
+{"Pacific/Bougainville", "Bougainville Standard Time"},
+{"Pacific/Chatham", "Chatham Islands Standard Time"},
+{"Pacific/Easter", "Easter Island Standard Time"},
+{"Pacific/Efate", "Central Pacific Standard Time"},
+{"Pacific/Enderbury", "UTC+13"},
+{"Pacific/Fakaofo", "UTC+13"},
+{"Pacific/Fiji", "Fiji Standard Time"},
+{"Pacific/Funafuti", "UTC+12"},
+{"Pacific/Galapagos", "Central America Standard Time"},
+{"Pacific/Gambier", "UTC-09"},
+{"Pacific/Guadalcanal", "Central Pacific Standard Time"},
+{"Pacific/Guam", "West Pacific Standard Time"},
+{"Pacific/Honolulu", "Hawaiian Standard Time"},
+{"Pacific/Johnston", "Hawaiian Standard Time"},
+{"Pacific/Kiritimati", "Line Islands Standard Time"},
+{"Pacific/Kosrae", "Central Pacific Standard Time"},
+{"Pacific/Kwajalein", "UTC+12"},
+{"Pacific/Majuro", "UTC+12"},
+{"Pacific/Marquesas", "Marquesas Standard Time"},
+{"Pacific/Midway", "UTC-11"},
+{"Pacific/Nauru", "UTC+12"},
+{"Pacific/Niue", "UTC-11"},
+{"Pacific/Norfolk", "Norfolk Standard Time"},
+{"Pacific/Noumea", "Central Pacific Standard Time"},
+{"Pacific/Pago_Pago", "UTC-11"},
+{"Pacific/Palau", "Tokyo Standard Time"},
+{"Pacific/Pitcairn", "UTC-08"},
+{"Pacific/Ponape", "Central Pacific Standard Time"},
+{"Pacific/Port_Moresby", "West Pacific Standard Time"},
+{"Pacific/Rarotonga", "Hawaiian Standard Time"},
+{"Pacific/Saipan", "West Pacific Standard Time"},
+{"Pacific/Samoa", "UTC-11"},
+{"Pacific/Tahiti", "Hawaiian Standard Time"},
+{"Pacific/Tarawa", "UTC+12"},
+{"Pacific/Tongatapu", "Tonga Standard Time"},
+{"Pacific/Truk", "West Pacific Standard Time"},
+{"Pacific/Wake", "UTC+12"},
+{"Pacific/Wallis", "UTC+12"},
+{"Poland", "Central European Standard Time"},
+{"Portugal", "GMT Standard Time"},
+{"ROC", "Taipei Standard Time"},
+{"ROK", "Korea Standard Time"},
+{"Singapore", "Singapore Standard Time"},
+{"Turkey", "Turkey Standard Time"},
+{"UCT", "UTC"},
+{"US/Alaska", "Alaskan Standard Time"},
+{"US/Aleutian", "Aleutian Standard Time"},
+{"US/Arizona", "US Mountain Standard Time"},
+{"US/Central", "Central Standard Time"},
+{"US/Eastern", "Eastern Standard Time"},
+{"US/Hawaii", "Hawaiian Standard Time"},
+{"US/Indiana-Starke", "Central Standard Time"},
+{"US/Michigan", "Eastern Standard Time"},
+{"US/Mountain", "Mountain Standard Time"},
+{"US/Pacific", "Pacific Standard Time"},
+{"US/Samoa", "UTC-11"},
+{"UTC", "UTC"},
+{"Universal", "UTC"},
+{"W-SU", "Russian Standard Time"},
+{"Zulu", "UTC"}};
#elif defined(_TD_DARWIN_64)
#include
#include
@@ -61,19 +755,33 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8
#ifdef WINDOWS
char winStr[TD_LOCALE_LEN * 2];
- sprintf(winStr, "TZ=%s", buf);
- putenv(winStr);
- tzset();
- /*
- * get CURRENT time zone.
- * system current time zone is affected by daylight saving time(DST)
- *
- * e.g., the local time zone of London in DST is GMT+01:00,
- * otherwise is GMT+00:00
- */
+ memset(winStr, 0, sizeof(winStr));
+ for (size_t i = 0; i < 554; i++) {
+ if (strcmp(tz_win[i][0],buf) == 0) {
+ char keyPath[100];
+ char keyValue[100];
+ DWORD keyValueSize = sizeof(keyValue);
+ sprintf(keyPath, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\%s",tz_win[i][1]);
+ RegGetValue(HKEY_LOCAL_MACHINE, keyPath, "Display", RRF_RT_ANY, NULL, (PVOID)&keyValue, &keyValueSize);
+ if (keyValueSize > 0) {
+ keyValue[4] = (keyValue[4] == '+' ? '-' : '+');
+ keyValue[10] = 0;
+ sprintf(winStr, "TZ=%s:00", &(keyValue[1]));
+ }
+ break;
+ }
+ }
+ char *p = strchr(inTimezoneStr, '+');
+ if (p == NULL) p = strchr(inTimezoneStr, '-');
+ if (p == NULL) {
+ sprintf(winStr, "TZ=UTC+00:00:00");
+ } else {
+ sprintf(winStr, "TZ=UTC%c%c%c:%c%c:00", (p[0] == '+' ? '-' : '+'), p[1], p[2], p[3], p[4]);
+ }
+ _putenv(winStr);
+ _tzset();
#ifdef _MSC_VER
#if _MSC_VER >= 1900
- // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char **tzname = _tzname;
@@ -83,11 +791,6 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8
int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR);
*tsTimezone = tz;
tz += daylight;
- /*
- * format:
- * (CST, +0800)
- * (BST, +0100)
- */
sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz));
*outDaylight = daylight;
@@ -117,14 +820,36 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8
}
void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
-#ifdef WINDOWS
- char *tz = getenv("TZ");
- if (tz == NULL || strlen(tz) == 0) {
+#ifdef WINDOWS
+ char value[100];
+ DWORD bufferSize = sizeof(value);
+ char *buf = getenv("TZ");
+ if (buf == NULL || strlen(buf) == 0) {
+ RegGetValue(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation", "TimeZoneKeyName", RRF_RT_ANY, NULL, (PVOID)&value, &bufferSize);
strcpy(outTimezoneStr, "not configured");
+ if (bufferSize > 0) {
+ for (size_t i = 0; i < 139; i++) {
+ if (strcmp(win_tz[i][0],value) == 0) {
+ strcpy(outTimezoneStr, win_tz[i][1]);
+ break;
+ }
+ }
+ }
} else {
- strcpy(outTimezoneStr, tz);
+ strcpy(outTimezoneStr, buf);
}
-
+#ifdef _MSC_VER
+#if _MSC_VER >= 1900
+ // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char **tzname = _tzname;
+#endif
+#endif
+ int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR);
+ *tsTimezone = tz;
+ tz += daylight;
+ sprintf(outTimezoneStr, "%s (%s, %s%02d00)", outTimezoneStr, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz));
#elif defined(_TD_DARWIN_64)
char buf[4096] = {0};
char *tz = NULL;
diff --git a/source/util/src/tdigest.c b/source/util/src/tdigest.c
new file mode 100644
index 0000000000000000000000000000000000000000..56b113fd8f166aae397e05ef3fed40e4df00309a
--- /dev/null
+++ b/source/util/src/tdigest.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+/*
+ * src/tdigest.c
+ *
+ * Implementation of the t-digest data structure used to compute accurate percentiles.
+ *
+ * It is based on the MergingDigest implementation found at:
+ * https://github.com/tdunning/t-digest/blob/master/src/main/java/com/tdunning/math/stats/MergingDigest.java
+ *
+ * Copyright (c) 2016, Usman Masood
+ */
+
+#include "os.h"
+#include "osMath.h"
+#include "tdigest.h"
+
+#define INTERPOLATE(x, x0, x1) (((x) - (x0)) / ((x1) - (x0)))
+//#define INTEGRATED_LOCATION(compression, q) ((compression) * (asin(2 * (q) - 1) + M_PI / 2) / M_PI)
+#define INTEGRATED_LOCATION(compression, q) ((compression) * (asin(2 * (double)(q) - 1)/M_PI + (double)1/2))
+#define FLOAT_EQ(f1, f2) (fabs((f1) - (f2)) <= FLT_EPSILON)
+
+typedef struct SMergeArgs {
+ TDigest *t;
+ SCentroid *centroids;
+ int32_t idx;
+ double weight_so_far;
+ double k1;
+ double min;
+ double max;
+}SMergeArgs;
+
+void tdigestAutoFill(TDigest* t, int32_t compression) {
+ t->centroids = (SCentroid*)((char*)t + sizeof(TDigest));
+ t->buffered_pts = (SPt*) ((char*)t + sizeof(TDigest) + sizeof(SCentroid) * (int32_t)GET_CENTROID(compression));
+}
+
+TDigest *tdigestNewFrom(void* pBuf, int32_t compression) {
+ memset(pBuf, 0, (size_t)TDIGEST_SIZE(compression));
+ TDigest* t = (TDigest*)pBuf;
+ tdigestAutoFill(t, compression);
+
+ t->compression = compression;
+ t->size = (int64_t)GET_CENTROID(compression);
+ t->threshold = (int32_t)GET_THRESHOLD(compression);
+ t->min = DOUBLE_MAX;
+ t->max = -DOUBLE_MAX;
+
+ return t;
+}
+
+static int32_t cmpCentroid(const void *a, const void *b) {
+ SCentroid *c1 = (SCentroid *) a;
+ SCentroid *c2 = (SCentroid *) b;
+ if (c1->mean < c2->mean)
+ return -1;
+ if (c1->mean > c2->mean)
+ return 1;
+ return 0;
+}
+
+
+static void mergeCentroid(SMergeArgs *args, SCentroid *merge) {
+ double k2;
+ SCentroid *c = &args->centroids[args->idx];
+
+ args->weight_so_far += merge->weight;
+ k2 = INTEGRATED_LOCATION(args->t->size,
+ args->weight_so_far / args->t->total_weight);
+ //idx++
+ if(k2 - args->k1 > 1 && c->weight > 0) {
+ if(args->idx + 1 < args->t->size
+ && merge->mean != args->centroids[args->idx].mean) {
+ args->idx++;
+ }
+ args->k1 = k2;
+ }
+
+ c = &args->centroids[args->idx];
+ if(c->mean == merge->mean) {
+ c->weight += merge->weight;
+ } else {
+ c->weight += merge->weight;
+ c->mean += (merge->mean - c->mean) * merge->weight / c->weight;
+
+ if (merge->weight > 0) {
+ args->min = TMIN(merge->mean, args->min);
+ args->max = TMAX(merge->mean, args->max);
+ }
+ }
+}
+
+void tdigestCompress(TDigest *t) {
+ SCentroid *unmerged_centroids;
+ int64_t unmerged_weight = 0;
+ int32_t num_unmerged = t->num_buffered_pts;
+ int32_t i, j;
+ SMergeArgs args;
+
+ if (t->num_buffered_pts <= 0)
+ return;
+
+ unmerged_centroids = (SCentroid*)taosMemoryMalloc(sizeof(SCentroid) * t->num_buffered_pts);
+ for (i = 0; i < num_unmerged; i++) {
+ SPt *p = t->buffered_pts + i;
+ SCentroid *c = &unmerged_centroids[i];
+ c->mean = p->value;
+ c->weight = p->weight;
+ unmerged_weight += c->weight;
+ }
+ t->num_buffered_pts = 0;
+ t->total_weight += unmerged_weight;
+
+ qsort(unmerged_centroids, num_unmerged, sizeof(SCentroid), cmpCentroid);
+ memset(&args, 0, sizeof(SMergeArgs));
+ args.centroids = (SCentroid*)taosMemoryMalloc((size_t)(sizeof(SCentroid) * t->size));
+ memset(args.centroids, 0, (size_t)(sizeof(SCentroid) * t->size));
+
+ args.t = t;
+ args.min = DOUBLE_MAX;
+ args.max = -DOUBLE_MAX;
+
+ i = 0;
+ j = 0;
+ while (i < num_unmerged && j < t->num_centroids) {
+ SCentroid *a = &unmerged_centroids[i];
+ SCentroid *b = &t->centroids[j];
+
+ if (a->mean <= b->mean) {
+ mergeCentroid(&args, a);
+ assert(args.idx < t->size);
+ i++;
+ } else {
+ mergeCentroid(&args, b);
+ assert(args.idx < t->size);
+ j++;
+ }
+ }
+
+ while (i < num_unmerged) {
+ mergeCentroid(&args, &unmerged_centroids[i++]);
+ assert(args.idx < t->size);
+ }
+ taosMemoryFree((void*)unmerged_centroids);
+
+ while (j < t->num_centroids) {
+ mergeCentroid(&args, &t->centroids[j++]);
+ assert(args.idx < t->size);
+ }
+
+ if (t->total_weight > 0) {
+ t->min = TMIN(t->min, args.min);
+ if (args.centroids[args.idx].weight <= 0) {
+ args.idx--;
+ }
+ t->num_centroids = args.idx + 1;
+ t->max = TMAX(t->max, args.max);
+ }
+
+ memcpy(t->centroids, args.centroids, sizeof(SCentroid) * t->num_centroids);
+ taosMemoryFree((void*)args.centroids);
+}
+
+void tdigestAdd(TDigest* t, double x, int64_t w) {
+ if (w == 0)
+ return;
+
+ int32_t i = t->num_buffered_pts;
+ if(i > 0 && t->buffered_pts[i-1].value == x ) {
+ t->buffered_pts[i].weight = w;
+ } else {
+ t->buffered_pts[i].value = x;
+ t->buffered_pts[i].weight = w;
+ t->num_buffered_pts++;
+ }
+
+
+ if (t->num_buffered_pts >= t->threshold)
+ tdigestCompress(t);
+}
+
+double tdigestCDF(TDigest *t, double x) {
+ if (t == NULL)
+ return 0;
+
+ int32_t i;
+ double left, right;
+ int64_t weight_so_far;
+ SCentroid *a, *b, tmp;
+
+ tdigestCompress(t);
+ if (t->num_centroids == 0)
+ return NAN;
+ if (x < t->min)
+ return 0;
+ if (x > t->max)
+ return 1;
+ if (t->num_centroids == 1) {
+ if (FLOAT_EQ(t->max, t->min))
+ return 0.5;
+
+ return INTERPOLATE(x, t->min, t->max);
+ }
+
+ weight_so_far = 0;
+ a = b = &tmp;
+ b->mean = t->min;
+ b->weight = 0;
+ right = 0;
+
+ for (i = 0; i < t->num_centroids; i++) {
+ SCentroid *c = &t->centroids[i];
+
+ left = b->mean - (a->mean + right);
+ a = b;
+ b = c;
+ right = (b->mean - a->mean) * a->weight / (a->weight + b->weight);
+
+ if (x < a->mean + right) {
+ double cdf = (weight_so_far
+ + a->weight
+ * INTERPOLATE(x, a->mean - left, a->mean + right))
+ / t->total_weight;
+ return TMAX(cdf, 0.0);
+ }
+
+ weight_so_far += a->weight;
+ }
+
+ left = b->mean - (a->mean + right);
+ a = b;
+ right = t->max - a->mean;
+
+ if (x < a->mean + right) {
+ return (weight_so_far + a->weight * INTERPOLATE(x, a->mean - left, a->mean + right))
+ / t->total_weight;
+ }
+
+ return 1;
+}
+
+double tdigestQuantile(TDigest *t, double q) {
+ if (t == NULL)
+ return 0;
+
+ int32_t i;
+ double left, right, idx;
+ int64_t weight_so_far;
+ SCentroid *a, *b, tmp;
+
+ tdigestCompress(t);
+ if (t->num_centroids == 0)
+ return NAN;
+ if (t->num_centroids == 1)
+ return t->centroids[0].mean;
+ if (FLOAT_EQ(q, 0.0))
+ return t->min;
+ if (FLOAT_EQ(q, 1.0))
+ return t->max;
+
+ idx = q * t->total_weight;
+ weight_so_far = 0;
+ b = &tmp;
+ b->mean = t->min;
+ b->weight = 0;
+ right = t->min;
+
+ for (i = 0; i < t->num_centroids; i++) {
+ SCentroid *c = &t->centroids[i];
+ a = b;
+ left = right;
+
+ b = c;
+ right = (b->weight * a->mean + a->weight * b->mean)/ (a->weight + b->weight);
+ if (idx < weight_so_far + a->weight) {
+ double p = (idx - weight_so_far) / a->weight;
+ return left * (1 - p) + right * p;
+ }
+ weight_so_far += a->weight;
+ }
+
+ left = right;
+ a = b;
+ right = t->max;
+
+ if (idx < weight_so_far + a->weight && a->weight != 0) {
+ double p = (idx - weight_so_far) / a->weight;
+ return left * (1 - p) + right * p;
+ }
+
+ return t->max;
+}
+
+void tdigestMerge(TDigest *t1, TDigest *t2) {
+ // SPoints
+ int32_t num_pts = t2->num_buffered_pts;
+ for(int32_t i = num_pts - 1; i >= 0; i--) {
+ SPt* p = t2->buffered_pts + i;
+ tdigestAdd(t1, p->value, p->weight);
+ t2->num_buffered_pts --;
+ }
+ // centroids
+ for (int32_t i = 0; i < t2->num_centroids; i++) {
+ tdigestAdd(t1, t2->centroids[i].mean, t2->centroids[i].weight);
+ }
+}
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 7c4f0fa2dd5d170f60f583aa87723a73f72be146..1e6aec147f491ea7cf9f93c7513b9de464bf8a7b 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -74,6 +74,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization
TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash")
TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed")
TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue")
+TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format")
+TAOS_DEFINE_ERROR(TSDB_CODE_MSG_DECODE_ERROR, "Msg decode error")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs")
@@ -88,6 +90,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, "Authentication failur
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, "Unable to establish connection")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use")
+TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL, "Unable to establish connection")
//client
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation")
@@ -185,9 +188,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SNODE_ALREADY_EXIST, "Snode already exists"
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SNODE_NOT_EXIST, "Snode not there")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BNODE_ALREADY_EXIST, "Bnode already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BNODE_NOT_EXIST, "Bnode not there")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_FEW_MNODES, "Too few mnodes")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_DEPLOYED, "Mnode deployed in this dnode")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_CANT_DROP_MASTER, "Can't drop mnode which is master")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_FEW_MNODES, "The replicas of mnode cannot less than 1")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_MNODES, "The replicas of mnode cannot exceed 3")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_CANT_DROP_MASTER, "Can't drop mnode which is leader")
// mnode-acct
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, "Account already exists")
@@ -242,7 +245,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single st
// mnode-infoSchema
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SYS_TABLENAME, "Invalid system table name")
-
// mnode-func
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_NOT_EXIST, "Func not exists")
@@ -269,9 +271,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC, "Invalid topic")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_QUERY, "Topic with invalid query")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_OPTION, "Topic with invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_EXIST, "Consumer not exist")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer waiting for rebalance")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option")
+
// mnode-sma
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_NOT_EXIST, "SMA does not exist")
@@ -311,6 +317,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_VND_READ_END, "Read end")
// tsdb
@@ -318,9 +325,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_TYPE, "Invalid table type")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION, "Invalid table schema version")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_ALREADY_EXIST, "Table already exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CONFIG, "Invalid configuration")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INIT_FAILED, "Tsdb init failed")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISKSPACE, "No diskspace for tsdb")
@@ -442,9 +449,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order")
// parser
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TABLE_NOT_EXIST, "Table does not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied")
+TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
//planner
-TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error")
+TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "Planner internal error")
//udf
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping")
@@ -462,6 +470,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_OUTPUT_TYPE, "udf invalid output ty
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DATA, "Invalid data type")
+TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DB_CONF, "Invalid schemaless db config")
#ifdef TAOS_ERROR_C
};
diff --git a/source/util/src/thash.c b/source/util/src/thash.c
index 551c3b67c8642b8bceab70c9cae75aca78f73769..f564ae45b63c0d24ac649cad4ef6ae3ecb907bcd 100644
--- a/source/util/src/thash.c
+++ b/source/util/src/thash.c
@@ -708,7 +708,7 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s
pNewNode->removed = 0;
pNewNode->next = NULL;
- memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize);
+ if (pData) memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize);
memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen);
return pNewNode;
@@ -774,7 +774,7 @@ static void *taosHashReleaseNode(SHashObj *pHashObj, void *p, int *slot) {
ASSERT(prevNode->next != prevNode);
} else {
pe->next = pOld->next;
- SHashNode* x = pe->next;
+ SHashNode *x = pe->next;
if (x != NULL) {
ASSERT(x->next != x);
}
diff --git a/source/util/src/tlist.c b/source/util/src/tlist.c
index 1d17b4a9e17aa7cafdd89ba273770e8751f09066..b1c018805157fe05ef6be97fa7be6df0255d5d5b 100644
--- a/source/util/src/tlist.c
+++ b/source/util/src/tlist.c
@@ -95,7 +95,7 @@ SListNode *tdListPopTail(SList *list) {
SListNode *tdListGetHead(SList *list) { return TD_DLIST_HEAD(list); }
-SListNode *tsListGetTail(SList *list) { return TD_DLIST_TAIL(list); }
+SListNode *tdListGetTail(SList *list) { return TD_DLIST_TAIL(list); }
SListNode *tdListPopNode(SList *list, SListNode *node) {
TD_DLIST_POP(list, node);
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index c1fc2c48c04b1fe42ea886516772ab63eac91556..353e94a49096822fe581d7faa0df8a29a6494c12 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -39,7 +39,7 @@
#define LOG_BUF_MUTEX(x) ((x)->buffMutex)
typedef struct {
- char *buffer;
+ char * buffer;
int32_t buffStart;
int32_t buffEnd;
int32_t buffSize;
@@ -58,7 +58,7 @@ typedef struct {
int32_t openInProgress;
pid_t pid;
char logName[LOG_FILE_NAME_LEN];
- SLogBuff *logHandle;
+ SLogBuff * logHandle;
TdThreadMutex logMutex;
} SLogObj;
@@ -96,6 +96,7 @@ int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t fnDebugFlag = 135;
int32_t smaDebugFlag = 135;
+int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
int64_t dbgWN = 0;
@@ -103,7 +104,7 @@ int64_t dbgSmallWN = 0;
int64_t dbgBigWN = 0;
int64_t dbgWSize = 0;
-static void *taosAsyncOutputLog(void *param);
+static void * taosAsyncOutputLog(void *param);
static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen);
static SLogBuff *taosLogBuffNew(int32_t bufSize);
static void taosCloseLogByFd(TdFilePtr pFile);
@@ -226,7 +227,7 @@ static void *taosThreadToOpenNewFile(void *param) {
tsLogObj.logHandle->pFile = pFile;
tsLogObj.lines = 0;
tsLogObj.openInProgress = 0;
- taosSsleep(10);
+ taosSsleep(20);
taosCloseLogByFd(pOldFile);
uInfo(" new log file:%d is opened", tsLogObj.flag);
@@ -490,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) {
if (!osLogSpaceAvailable()) return;
taosUpdateLogNums(DEBUG_DUMP);
- char temp[256];
+ char temp[256] = {0};
int32_t i, pos = 0, c = 0;
for (i = 0; i < len; ++i) {
@@ -701,7 +702,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) {
int32_t compressSize = 163840;
int32_t ret = 0;
int32_t len = 0;
- char *data = taosMemoryMalloc(compressSize);
+ char * data = taosMemoryMalloc(compressSize);
// gzFile dstFp = NULL;
// srcFp = fopen(srcFileName, "r");
@@ -759,6 +760,7 @@ void taosSetAllDebugFlag(int32_t flag) {
fsDebugFlag = flag;
fnDebugFlag = flag;
smaDebugFlag = flag;
+ idxDebugFlag = flag;
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 00f123370747fcc29eddbb9ad053514134d3bc8f..101ac78e1847a1db244f7dfe867f94aeec0447d4 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -549,11 +549,16 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
// print the statistics information
{
SDiskbasedBufStatis* ps = &pBuf->statis;
- uDebug(
- "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f "
- "Kb\n",
- ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
- ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
+ if (ps->loadPages == 0) {
+ uDebug(
+ "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages)",
+ ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages);
+ } else {
+ uDebug(
+ "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb",
+ ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
+ ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
+ }
}
taosRemoveFile(pBuf->path);
diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c
index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644
--- a/source/util/src/tqueue.c
+++ b/source/util/src/tqueue.c
@@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode;
typedef struct STaosQnode {
STaosQnode *next;
STaosQueue *queue;
+ int64_t timestamp;
int32_t size;
int8_t itype;
int8_t reserved[3];
@@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) {
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
pNode->size = size;
pNode->itype = itype;
+ pNode->timestamp = taosGetTimestampUs();
if (pNode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
-int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) {
+int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) {
STaosQnode *pNode = NULL;
int32_t code = 0;
@@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI
*ppItem = pNode->item;
if (ahandle) *ahandle = queue->ahandle;
if (itemFp) *itemFp = queue->itemFp;
+ if (ts) *ts = pNode->timestamp;
queue->head = pNode->next;
if (queue->head == NULL) queue->tail = NULL;
diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c
index ee1f4185613dd85f0e60d86ebd0487b07b3ceee9..691a0d34d42ca3ab04be5daf61414016436a6bb1 100644
--- a/source/util/src/tsched.c
+++ b/source/util/src/tsched.c
@@ -23,19 +23,19 @@
#define DUMP_SCHEDULER_TIME_WINDOW 30000 // every 30sec, take a snap shot of task queue.
typedef struct {
- char label[TSDB_LABEL_LEN];
- tsem_t emptySem;
- tsem_t fullSem;
+ char label[TSDB_LABEL_LEN];
+ tsem_t emptySem;
+ tsem_t fullSem;
TdThreadMutex queueMutex;
- int32_t fullSlot;
- int32_t emptySlot;
- int32_t queueSize;
- int32_t numOfThreads;
- TdThread *qthread;
- SSchedMsg *queue;
- bool stop;
- void *pTmrCtrl;
- void *pTimer;
+ int32_t fullSlot;
+ int32_t emptySlot;
+ int32_t queueSize;
+ int32_t numOfThreads;
+ TdThread *qthread;
+ SSchedMsg *queue;
+ bool stop;
+ void *pTmrCtrl;
+ void *pTimer;
} SSchedQueue;
static void *taosProcessSchedQueue(void *param);
@@ -218,7 +218,8 @@ void taosCleanUpScheduler(void *param) {
taosThreadMutexDestroy(&pSched->queueMutex);
if (pSched->pTimer) {
- taosTmrStopA(&pSched->pTimer);
+ taosTmrStop(pSched->pTimer);
+ pSched->pTimer = NULL;
}
if (pSched->queue) taosMemoryFree(pSched->queue);
diff --git a/source/util/src/tstrbuild.c b/source/util/src/tstrbuild.c
index 2aae588046402e37569f5a2bde5ed5f72fa24346..c87b889e82ece82c251ddabad1964bc1f0b3ab2f 100644
--- a/source/util/src/tstrbuild.c
+++ b/source/util/src/tstrbuild.c
@@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) {
void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); }
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
- char buf[64];
+ char buf[64] = {0};
size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
}
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
- char buf[512];
+ char buf[512] = {0};
size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
}
diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c
index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..68f96c0385b6c25a4736343917e875f84d4e2c9e 100644
--- a/source/util/src/tworker.c
+++ b/source/util/src/tworker.c
@@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) {
void *msg = NULL;
void *ahandle = NULL;
int32_t code = 0;
+ int64_t ts = 0;
taosBlockSIGPIPE();
setThreadName(pool->name);
uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) {
- if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) {
+ if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
break;
}
if (fp != NULL) {
- SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num};
+ SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts};
(*fp)(&info, msg);
}
}
@@ -162,7 +163,7 @@ int32_t tWWorkerInit(SWWorkerPool *pool) {
worker->pool = pool;
}
- uInfo("worker:%s is initialized, max:%d", pool->name, pool->max);
+ uDebug("worker:%s is initialized, max:%d", pool->name, pool->max);
return 0;
}
@@ -189,7 +190,7 @@ void tWWorkerCleanup(SWWorkerPool *pool) {
taosMemoryFreeClear(pool->workers);
taosThreadMutexDestroy(&pool->mutex);
- uInfo("worker:%s is closed", pool->name);
+ uDebug("worker:%s is closed", pool->name);
}
static void *tWWorkerThreadFp(SWWorker *worker) {
diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py
index 87414303f850bcbd78468238e7b76aa3dbb3326e..809e0e9d25ed79246cbd4d83d39f262b0a678cd0 100644
--- a/tests/pytest/cluster/clusterSetup.py
+++ b/tests/pytest/cluster/clusterSetup.py
@@ -92,13 +92,13 @@ class Node:
self.conn.run("yes|./install.sh")
def configTaosd(self, taosConfigKey, taosConfigValue):
- self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg"))
+ self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg"))
def removeTaosConfig(self, taosConfigKey, taosConfigValue):
self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg"))
def configHosts(self, ip, name):
- self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts'))
+ self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts'))
def removeData(self):
try:
diff --git a/tests/pytest/cq.py b/tests/pytest/cq.py
deleted file mode 100644
index 7778969619f2d0679c2596581d8d76101d41ed9f..0000000000000000000000000000000000000000
--- a/tests/pytest/cq.py
+++ /dev/null
@@ -1,169 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-import threading
-import taos
-import sys
-import json
-import time
-import random
-# query sql
-query_sql = [
-# first supertable
-"select count(*) from test.meters ;",
-"select count(*) from test.meters where t3 > 2;",
-"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';",
-"select count(*) from test.meters where t7 like 'taos_1%';",
-"select count(*) from test.meters where t7 like '_____2';",
-"select count(*) from test.meters where t8 like '%思%';",
-"select count(*) from test.meters interval(1n) order by ts desc;",
-#"select max(c0) from test.meters group by tbname",
-"select first(ts) from test.meters where t5 >5000 and t5<5100;",
-"select last(ts) from test.meters where t5 >5000 and t5<5100;",
-"select last_row(*) from test.meters;",
-"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
-"select avg(c1) from test.meters where t5 >5000 and t5<5100;",
-"select bottom(c1, 2) from test.t1;",
-"select diff(c1) from test.t1;",
-"select leastsquares(c1, 1, 1) from test.t1 ;",
-"select max(c1) from test.meters where t5 >5000 and t5<5100;",
-"select min(c1) from test.meters where t5 >5000 and t5<5100;",
-"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;",
-"select percentile(c1, 50) from test.t1;",
-"select spread(c1) from test.t1 ;",
-"select stddev(c1) from test.t1;",
-"select sum(c1) from test.meters where t5 >5000 and t5<5100;",
-"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;"
-"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
-"select avg(c4) from test.meters where t5 >5000 and t5<5100;",
-"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;",
-"select diff(c4) from test.t1 where t5 >5000 and t5<5100;",
-"select leastsquares(c4, 1, 1) from test.t1 ;",
-"select max(c4) from test.meters where t5 >5000 and t5<5100;",
-"select min(c4) from test.meters where t5 >5000 and t5<5100;",
-"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;",
-"select percentile(c5, 50) from test.t1;",
-"select spread(c5) from test.t1 ;",
-"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;",
-"select sum(c5) from test.meters where t5 >5000 and t5<5100;",
-"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;",
-#all vnode
-"select count(*) from test.meters where t5 >5000 and t5<5100",
-"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100",
-"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100",
-"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100",
-"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100",
-# second supertable
-"select count(*) from test.meters1 where t3 > 2;",
-"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';",
-"select count(*) from test.meters where t7 like 'taos_1%';",
-"select count(*) from test.meters where t7 like '_____2';",
-"select count(*) from test.meters where t8 like '%思%';",
-"select count(*) from test.meters1 interval(1n) order by ts desc;",
-#"select max(c0) from test.meters1 group by tbname",
-"select first(ts) from test.meters1 where t5 >5000 and t5<5100;",
-"select last(ts) from test.meters1 where t5 >5000 and t5<5100;",
-"select last_row(*) from test.meters1 ;",
-"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
-"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;",
-"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;",
-"select diff(c1) from test.m1 ;",
-"select leastsquares(c1, 1, 1) from test.m1 ;",
-"select max(c1) from test.meters1 where t5 >5000 and t5<5100;",
-"select min(c1) from test.meters1 where t5 >5000 and t5<5100;",
-"select c1 + c2 + c1 / c0 + c2 from test.m1 ;",
-"select percentile(c1, 50) from test.m1;",
-"select spread(c1) from test.m1 ;",
-"select stddev(c1) from test.m1;",
-"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;",
-"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;",
-"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
-"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;",
-"select bottom(c5, 2) from test.m1;",
-"select diff(c5) from test.m1;",
-"select leastsquares(c5, 1, 1) from test.m1 ;",
-"select max(c5) from test.meters1 where t5 >5000 and t5<5100;",
-"select min(c5) from test.meters1 where t5 >5000 and t5<5100;",
-"select c5 + c2 + c4 / c5 + c0 from test.m1;",
-"select percentile(c4, 50) from test.m1;",
-"select spread(c4) from test.m1 ;",
-"select stddev(c4) from test.m1;",
-"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;",
-"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;",
-"select count(*) from test.meters1 where t5 >5100 and t5<5300",
-#all vnode
-"select count(*) from test.meters1 where t5 >5100 and t5<5300",
-"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
-"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
-"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100",
-"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100",
-#join
-# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5",
-# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7",
-# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8",
-# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8"
-]
-
-class ConcurrentInquiry:
- def initConnection(self):
- self.numOfTherads = 50
- self.ts=1500000001000
-
- def SetThreadsNum(self,num):
- self.numOfTherads=num
- def query_thread(self,threadID):
- host = "10.211.55.14"
- user = "root"
- password = "taosdata"
- conn = taos.connect(
- host,
- user,
- password,
- )
- cl = conn.cursor()
- cl.execute("use test;")
-
- print("Thread %d: starting" % threadID)
-
- while True:
- ran_query_sql=query_sql
- random.shuffle(ran_query_sql)
- for i in ran_query_sql:
- print("Thread %d : %s"% (threadID,i))
- try:
- start = time.time()
- cl.execute(i)
- cl.fetchall()
- end = time.time()
- print("time cost :",end-start)
- except Exception as e:
- print(
- "Failure thread%d, sql: %s,exception: %s" %
- (threadID, str(i),str(e)))
- exit(-1)
-
-
- print("Thread %d: finishing" % threadID)
-
-
-
- def run(self):
-
- threads = []
- for i in range(self.numOfTherads):
- thread = threading.Thread(target=self.query_thread, args=(i,))
- threads.append(thread)
- thread.start()
-
-q = ConcurrentInquiry()
-q.initConnection()
-q.run()
diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py
index 871d69790d328f3dcea9fdfdac27a6abc3bb14bd..5188aa4a80a8faacfbc4056958bde2363a796449 100644
--- a/tests/pytest/dockerCluster/basic.py
+++ b/tests/pytest/dockerCluster/basic.py
@@ -113,7 +113,7 @@ class BuildDockerCluser:
def cfg(self, option, value, nodeIndex):
cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex)
- cmd = "echo '%s %s' >> %s" % (option, value, cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, cfgPath)
self.execCmd(cmd)
def updateLocalhosts(self):
@@ -122,7 +122,7 @@ class BuildDockerCluser:
print(result)
if result is None or result.isspace():
print("==========")
- cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts"
+ cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts"
display = "echo %s" % cmd
self.execCmd(display)
self.execCmd(cmd)
diff --git a/tests/pytest/fulltest.bat b/tests/pytest/fulltest.bat
new file mode 100644
index 0000000000000000000000000000000000000000..fd74f2ad029c982a3a3dd98ae0c8df264bab9c66
--- /dev/null
+++ b/tests/pytest/fulltest.bat
@@ -0,0 +1,22 @@
+
+python .\test.py -f insert\basic.py
+python .\test.py -f insert\int.py
+python .\test.py -f insert\float.py
+python .\test.py -f insert\bigint.py
+python .\test.py -f insert\bool.py
+python .\test.py -f insert\double.py
+python .\test.py -f insert\smallint.py
+python .\test.py -f insert\tinyint.py
+python .\test.py -f insert\date.py
+python .\test.py -f insert\binary.py
+python .\test.py -f insert\nchar.py
+
+python .\test.py -f query\filter.py
+python .\test.py -f query\filterCombo.py
+python .\test.py -f query\queryNormal.py
+python .\test.py -f query\queryError.py
+python .\test.py -f query\filterAllIntTypes.py
+python .\test.py -f query\filterFloatAndDouble.py
+python .\test.py -f query\filterOtherTypes.py
+python .\test.py -f query\querySort.py
+python .\test.py -f query\queryJoin.py
\ No newline at end of file
diff --git a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py
index f634eb1208b69f263ea89db2440db40ec3e085e6..b2d5171972b9e5e0025c4e46e8dd1f257ed48e24 100644
--- a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py
+++ b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py
@@ -38,7 +38,7 @@ class Node:
def buildTaosd(self):
try:
print(self.conn)
- # self.conn.run('echo "1234" > /home/chr/installtest/test.log')
+ # self.conn.run('echo 1234 > /home/chr/installtest/test.log')
self.conn.run("cd /home/chr/installtest/ && tar -xvf %s " %self.verName)
self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath)
except Exception as e:
@@ -49,7 +49,7 @@ class Node:
def rebuildTaosd(self):
try:
print(self.conn)
- # self.conn.run('echo "1234" > /home/chr/installtest/test.log')
+ # self.conn.run('echo 1234 > /home/chr/installtest/test.log')
self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath)
except Exception as e:
print("Build Taosd error for node %d " % self.index)
@@ -108,7 +108,7 @@ class oneNode:
# install TDengine at 192.168.103/104/141
try:
node = Node(id, username, IP, passwd, version)
- node.conn.run('echo "start taosd"')
+ node.conn.run('echo start taosd')
node.buildTaosd()
# clear DataPath , if need clear data
node.clearData()
@@ -128,7 +128,7 @@ class oneNode:
# start TDengine
try:
node = Node(id, username, IP, passwd, version)
- node.conn.run('echo "restart taosd"')
+ node.conn.run('echo restart taosd')
# clear DataPath , if need clear data
node.clearData()
node.restartTaosd()
@@ -149,14 +149,14 @@ class oneNode:
verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version
# installPath = "TDengine-enterprise-server-%s" % self.version
node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0')
- node131.conn.run('echo "upgrade cluster"')
+ node131.conn.run('echo upgrade cluster')
node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id))
node131.conn.close()
# upgrade TDengine at 192.168.103/104/141
try:
node = Node(id, username, IP, passwd, version)
- node.conn.run('echo "start taosd"')
- node.conn.run('echo "1234" > /home/chr/test.log')
+ node.conn.run('echo start taosd')
+ node.conn.run('echo 1234 > /home/chr/test.log')
node.buildTaosd()
time.sleep(5)
node.startTaosd()
@@ -176,7 +176,7 @@ class oneNode:
# backCluster TDengine at 192.168.103/104/141
try:
node = Node(id, username, IP, passwd, version)
- node.conn.run('echo "rollback taos"')
+ node.conn.run('echo rollback taos')
node.rebuildTaosd()
time.sleep(5)
node.startTaosd()
diff --git a/tests/pytest/stream/cqSupportBefore1970.py b/tests/pytest/stream/cqSupportBefore1970.py
deleted file mode 100644
index 01ba5234fcabb96a4c3c7c28e405c316d6e7dc7d..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/cqSupportBefore1970.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-from util.log import *
-from util.cases import *
-from util.sql import *
-from util.dnodes import *
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug(f"start to execute {__file__}")
- tdSql.init(conn.cursor(), logSql)
-
- def insertnow(self):
-
- # timestamp list:
- # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
- # -631180800000 -> "1950-01-01 00:00:00"
-
- tsp1 = 0
- tsp2 = -28800000
- tsp3 = -946800000000
- tsp4 = "1969-01-01 00:00:00.000"
-
- tdSql.execute("insert into tcq1 values (now-11d, 5)")
- tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
- tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
- tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
- tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
-
- def waitedQuery(self, sql, expectRows, timeout):
- tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
- try:
- for i in range(timeout):
- tdSql.cursor.execute(sql)
- self.queryResult = tdSql.cursor.fetchall()
- self.queryRows = len(self.queryResult)
- self.queryCols = len(tdSql.cursor.description)
- # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
- if self.queryRows >= expectRows:
- return (self.queryRows, i)
- time.sleep(1)
- except Exception as e:
- caller = inspect.getframeinfo(inspect.stack()[1][0])
- tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
- raise Exception(repr(e))
- return (self.queryRows, timeout)
-
- def cq(self):
- tdSql.execute(
- "create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)"
- )
- self.waitedQuery("select * from cq1", 1, 120)
-
- def querycq(self):
- tdSql.query("select * from cq1")
- tdSql.checkData(0, 1, 1.0)
- tdSql.checkData(10, 1, 2.0)
-
- def run(self):
- tdSql.execute("drop database if exists dbcq")
- tdSql.execute("create database if not exists dbcq keep 36500")
- tdSql.execute("use dbcq")
-
- tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
- tdSql.execute("create table tcq1 using stbcq tags(1)")
-
- self.insertnow()
- self.cq()
- self.querycq()
-
- # after wal and sync, check again
- tdSql.query("show dnodes")
- index = tdSql.getData(0, 0)
- tdDnodes.stop(index)
- tdDnodes.start(index)
-
- self.querycq()
-
- def stop(self):
- tdSql.close()
- tdLog.success(f"{__file__} successfully executed")
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/stream/history.py b/tests/pytest/stream/history.py
deleted file mode 100644
index cb8a4d598651473f907aa05a0609c9ce68c78f82..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/history.py
+++ /dev/null
@@ -1,67 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- tdSql.prepare()
-
- tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
- tdSql.execute("create table car0 using cars tags(0)")
- tdSql.execute("create table car1 using cars tags(1)")
- tdSql.execute("create table car2 using cars tags(2)")
- tdSql.execute("create table car3 using cars tags(3)")
- tdSql.execute("create table car4 using cars tags(4)")
-
- tdSql.execute("insert into car0 values('2019-01-01 00:00:00.103', 1)")
- tdSql.execute("insert into car1 values('2019-01-01 00:00:00.234', 1)")
- tdSql.execute("insert into car0 values('2019-01-01 00:00:01.012', 1)")
- tdSql.execute("insert into car0 values('2019-01-01 00:00:02.003', 1)")
- tdSql.execute("insert into car2 values('2019-01-01 00:00:02.328', 1)")
- tdSql.execute("insert into car0 values('2019-01-01 00:00:03.139', 1)")
- tdSql.execute("insert into car0 values('2019-01-01 00:00:04.348', 1)")
- tdSql.execute("insert into car0 values('2019-01-01 00:00:05.783', 1)")
- tdSql.execute("insert into car1 values('2019-01-01 00:00:01.893', 1)")
- tdSql.execute("insert into car1 values('2019-01-01 00:00:02.712', 1)")
- tdSql.execute("insert into car1 values('2019-01-01 00:00:03.982', 1)")
- tdSql.execute("insert into car3 values('2019-01-01 00:00:01.389', 1)")
- tdSql.execute("insert into car4 values('2019-01-01 00:00:01.829', 1)")
-
- tdSql.error("create table strm as select count(*) from cars")
-
- tdSql.execute("create table strm as select count(*) from cars interval(4s)")
- tdSql.waitedQuery("select * from strm", 2, 100)
- tdSql.checkData(0, 1, 11)
- tdSql.checkData(1, 1, 2)
-
-
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/metric_1.py b/tests/pytest/stream/metric_1.py
deleted file mode 100644
index b4cccac69c8afe9c637b7a455732572c029258a7..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/metric_1.py
+++ /dev/null
@@ -1,104 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def createFuncStream(self, expr, suffix, value):
- tbname = "strm_" + suffix
- tdLog.info("create stream table %s" % tbname)
- tdSql.query("select %s from stb interval(1d)" % expr)
- tdSql.checkData(0, 1, value)
- tdSql.execute("create table %s as select %s from stb interval(1d)" % (tbname, expr))
-
- def checkStreamData(self, suffix, value):
- sql = "select * from strm_" + suffix
- tdSql.waitedQuery(sql, 1, 120)
- tdSql.checkData(0, 1, value)
-
- def run(self):
- tbNum = 10
- rowNum = 20
-
- tdSql.prepare()
-
- tdLog.info("===== preparing data =====")
- tdSql.execute(
- "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(0.1)
-
- self.createFuncStream("count(*)", "c1", 200)
- self.createFuncStream("count(tbcol)", "c2", 200)
- self.createFuncStream("count(tbcol2)", "c3", 200)
- self.createFuncStream("avg(tbcol)", "av", 9.5)
- self.createFuncStream("sum(tbcol)", "su", 1900)
- self.createFuncStream("min(tbcol)", "mi", 0)
- self.createFuncStream("max(tbcol)", "ma", 19)
- self.createFuncStream("first(tbcol)", "fi", 0)
- self.createFuncStream("last(tbcol)", "la", 19)
- #tdSql.query("select stddev(tbcol) from stb interval(1d)")
- #tdSql.query("select leastsquares(tbcol, 1, 1) from stb interval(1d)")
- tdSql.query("select top(tbcol, 1) from stb interval(1d)")
- tdSql.query("select bottom(tbcol, 1) from stb interval(1d)")
- #tdSql.query("select percentile(tbcol, 1) from stb interval(1d)")
- #tdSql.query("select diff(tbcol) from stb interval(1d)")
-
- tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d)")
- tdSql.checkData(0, 1, 200)
- #tdSql.execute("create table strm_wh as select count(tbcol) from stb where ts < now + 4m interval(1d)")
-
- self.createFuncStream("count(tbcol)", "as", 200)
-
- tdSql.query("select count(tbcol) from stb interval(1d) group by tgcol")
- tdSql.checkData(0, 1, 20)
-
- tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d) group by tgcol")
- tdSql.checkData(0, 1, 20)
-
- self.checkStreamData("c1", 200)
- self.checkStreamData("c2", 200)
- self.checkStreamData("c3", 200)
- self.checkStreamData("av", 9.5)
- self.checkStreamData("su", 1900)
- self.checkStreamData("mi", 0)
- self.checkStreamData("ma", 19)
- self.checkStreamData("fi", 0)
- self.checkStreamData("la", 19)
- #self.checkStreamData("wh", 200)
- self.checkStreamData("as", 200)
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
-
-
diff --git a/tests/pytest/stream/metric_n.py b/tests/pytest/stream/metric_n.py
deleted file mode 100644
index d223fe81fc79835047bac8ca2341cdbeac2e6617..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/metric_n.py
+++ /dev/null
@@ -1,123 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- tbNum = 10
- rowNum = 20
- totalNum = tbNum * rowNum
-
- tdSql.prepare()
-
- tdLog.info("===== preparing data =====")
- tdSql.execute(
- "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(0.1)
-
- tdLog.info("===== step 1 =====")
- tdSql.query("select count(*), count(tbcol), count(tbcol2) from stb interval(1d)")
- tdSql.checkData(0, 1, totalNum)
- tdSql.checkData(0, 2, totalNum)
- tdSql.checkData(0, 3, totalNum)
-
- tdLog.info("===== step 2 =====")
- tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from stb interval(1d)")
-
- tdLog.info("===== step 3 =====")
- tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
-
- tdLog.info("===== step 4 =====")
- tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
- tdSql.checkData(0, 1, totalNum)
- tdSql.checkData(0, 2, totalNum)
- tdSql.checkData(0, 3, totalNum)
-
- tdLog.info("===== step 5 =====")
- tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
-
- tdLog.info("===== step 6 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)")
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 1900)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)")
-
- tdLog.info("===== step 7 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)")
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 1900)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.checkData(0, 7, totalNum)
-
- tdLog.info("===== step 8 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)")
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 1900)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.checkData(0, 7, totalNum)
-
- tdLog.info("===== step 9 =====")
- tdSql.waitedQuery("select * from strm_c3", 1, 120)
- tdSql.checkData(0, 1, totalNum)
- tdSql.checkData(0, 2, totalNum)
- tdSql.checkData(0, 3, totalNum)
-
- tdLog.info("===== step 10 =====")
- tdSql.waitedQuery("select * from strm_c31", 1, 30)
- for i in range(1, 10):
- tdSql.checkData(0, i, totalNum)
-
- tdLog.info("===== step 11 =====")
- tdSql.waitedQuery("select * from strm_avg", 1, 20)
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 1900)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py
deleted file mode 100644
index 4a0e47c01ad9f9aac7ed78be0ff4fc93fc0d41ed..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/new.py
+++ /dev/null
@@ -1,79 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- rowNum = 200
- tdSql.prepare()
-
- tdLog.info("=============== step1")
- tdSql.execute("create table mt(ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)")
- for i in range(5):
- tdSql.execute("create table tb%d using mt tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j))
- time.sleep(0.1)
-
- tdLog.info("=============== step2")
- tdSql.query("select count(*), count(tbcol), count(tbcol2) from mt interval(10s)")
- tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)")
-
- tdLog.info("=============== step3")
- start = time.time()
- tdSql.waitedQuery("select * from st", 1, 180)
- delay = int(time.time() - start) + 80
- v = tdSql.getData(0, 3)
- if v >= 51:
- tdLog.exit("value is %d, which is larger than 51" % v)
-
- tdLog.info("=============== step4")
- for i in range(5, 10):
- tdSql.execute("create table tb%d using mt tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j))
-
- tdLog.info("=============== step5")
- maxValue = 0
- for i in range(delay):
- time.sleep(1)
- tdSql.query("select * from st order by ts desc")
- v = tdSql.getData(0, 3)
- if v > maxValue:
- maxValue = v
- if v > 51:
- break
-
- if maxValue <= 51:
- tdLog.exit("value is %d, which is smaller than 51" % maxValue)
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
-
-
diff --git a/tests/pytest/stream/parser.py b/tests/pytest/stream/parser.py
deleted file mode 100644
index 3b231d2b391a8a5a92cb8924134555117c5bfed2..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/parser.py
+++ /dev/null
@@ -1,182 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- '''
- def bug2222(self):
- tdSql.prepare()
- tdSql.execute("create table superreal(ts timestamp, addr binary(5), val float) tags (deviceNo binary(20))")
- tdSql.execute("create table real_001 using superreal tags('001')")
- tdSql.execute("create table tj_001 as select sum(val) from real_001 interval(1m)")
-
- t = datetime.datetime.now()
- for i in range(60):
- ts = t.strftime("%Y-%m-%d %H:%M")
- t += datetime.timedelta(minutes=1)
- sql = "insert into real_001 values('%s:0%d', '1', %d)" % (ts, 0, i)
- for j in range(4):
- sql += ",('%s:0%d', '%d', %d)" % (ts, j + 1, j + 1, i)
- tdSql.execute(sql)
- time.sleep(60 + random.random() * 60 - 30)
- '''
-
- def tbase300(self):
- tdLog.debug("begin tbase300")
-
- tdSql.prepare()
- tdSql.execute("create table mt(ts timestamp, c1 int, c2 int) tags(t1 int)")
- tdSql.execute("create table tb1 using mt tags(1)");
- tdSql.execute("create table tb2 using mt tags(2)");
- tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)")
- #tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2), first(c1) from mt interval(4s) sliding(2s)")
- tdLog.sleep(10)
- tdSql.execute("insert into tb2 values(now, 1, 1)");
- tdSql.execute("insert into tb1 values(now, 1, 1)");
- tdLog.sleep(4)
- tdSql.query("select * from mt")
- tdSql.query("select * from strm")
- tdSql.execute("drop table tb1")
-
- tdSql.waitedQuery("select * from strm", 1, 100)
- if tdSql.queryRows < 1 or tdSql.queryRows > 2:
- tdLog.exit("rows should be 1 or 2")
-
- tdSql.execute("drop table tb2")
- tdSql.execute("drop table mt")
- tdSql.execute("drop table strm")
-
- def tbase304(self):
- tdLog.debug("begin tbase304")
- # we cannot reset query cache in server side, as a workaround,
- # set super table name to mt304, need to change back to mt later
- tdSql.execute("create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)")
- tdSql.execute("create table tb1 using mt304 tags(1, 1)")
- tdSql.execute("create table tb2 using mt304 tags(1, -1)")
- time.sleep(0.1)
- tdSql.execute("create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)")
- tdSql.execute("insert into tb1 values (now,1)")
- tdSql.execute("insert into tb2 values (now,2)")
-
- tdSql.waitedQuery("select * from strm", 1, 100)
- if tdSql.queryRows < 1 or tdSql.queryRows > 2:
- tdLog.exit("rows should be 1 or 2")
-
- tdSql.checkData(0, 1, 1)
- tdSql.checkData(0, 2, 1.000000000)
- tdSql.execute("alter table mt304 drop tag t2")
- tdSql.execute("insert into tb2 values (now,2)")
- tdSql.execute("insert into tb1 values (now,1)")
- tdSql.query("select * from strm")
- tdSql.execute("alter table mt304 add tag t2 int")
- tdLog.sleep(1)
- tdSql.query("select * from strm")
-
- def wildcardFilterOnTags(self):
- tdLog.debug("begin wildcardFilterOnTag")
- tdSql.prepare()
- tdSql.execute("create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))")
- tdSql.execute("create table tb1 using stb tags('a1')")
- tdSql.execute("create table tb2 using stb tags('b2')")
- tdSql.execute("create table tb3 using stb tags('a3')")
- tdSql.execute("create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)")
- tdSql.query("describe strm")
- tdSql.checkRows(4)
-
- tdLog.sleep(1)
- tdSql.execute("insert into tb1 values (now, 0, 'tb1')")
- tdLog.sleep(4)
- tdSql.execute("insert into tb2 values (now, 2, 'tb2')")
- tdLog.sleep(4)
- tdSql.execute("insert into tb3 values (now, 0, 'tb3')")
-
- tdSql.waitedQuery("select * from strm", 4, 60)
- tdSql.checkRows(4)
- tdSql.checkData(0, 2, 0.000000000)
- if tdSql.getData(0, 3) == 'tb2':
- tdLog.exit("unexpected value of data03")
- if tdSql.getData(1, 3) == 'tb2':
- tdLog.exit("unexpected value of data13")
- if tdSql.getData(2, 3) == 'tb2':
- tdLog.exit("unexpected value of data23")
- if tdSql.getData(3, 3) == 'tb2':
- tdLog.exit("unexpected value of data33")
-
- tdLog.info("add table tb4 to see if stream still works correctly")
- # The vnode client needs to refresh metadata cache to allow strm calculate tb4's data.
- # But the current refreshing frequency is every 10 min
- # commented out the case below to save running time
- tdSql.execute("create table tb4 using stb tags('a4')")
- tdSql.execute("insert into tb4 values(now, 4, 'tb4')")
- tdSql.waitedQuery("select * from strm order by ts desc", 6, 60)
- tdSql.checkRows(6)
- tdSql.checkData(0, 2, 4)
- tdSql.checkData(0, 3, "tb4")
-
- tdLog.info("change tag values to see if stream still works correctly")
- tdSql.execute("alter table tb4 set tag t1='b4'")
- tdLog.sleep(3)
- tdSql.execute("insert into tb1 values (now, 1, 'tb1_a1')")
- tdLog.sleep(4)
- tdSql.execute("insert into tb4 values (now, -4, 'tb4_b4')")
- tdSql.waitedQuery("select * from strm order by ts desc", 8, 100)
- tdSql.checkRows(8)
- tdSql.checkData(0, 2, 1)
- tdSql.checkData(0, 3, "tb1_a1")
-
- def datatypes(self):
- tdLog.debug("begin data types")
- tdSql.prepare()
- tdSql.execute("create table stb3 (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))")
- tdSql.execute("create table tb0 using stb3 tags(0, 'tb0')")
- tdSql.execute("create table tb1 using stb3 tags(1, 'tb1')")
- tdSql.execute("create table tb2 using stb3 tags(2, 'tb2')")
- tdSql.execute("create table tb3 using stb3 tags(3, 'tb3')")
- tdSql.execute("create table tb4 using stb3 tags(4, 'tb4')")
-
- tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb3 where ts < now + 30s interval(4s) sliding(2s)")
- #tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5) from stb where ts < now + 30s interval(4s) sliding(2s)")
- tdLog.sleep(1)
- tdSql.execute("insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) ")
-
- tdSql.waitedQuery("select * from strm0 order by ts desc", 2, 120)
- tdSql.checkRows(2)
-
- tdSql.execute("insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) ")
- tdSql.waitedQuery("select * from strm0 order by ts desc", 4, 120)
- tdSql.checkRows(4)
-
- def run(self):
- self.tbase300()
- self.tbase304()
- self.wildcardFilterOnTags()
- self.datatypes()
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/showStreamExecTimeisNull.py b/tests/pytest/stream/showStreamExecTimeisNull.py
deleted file mode 100644
index 8a2a09cec6f345d62fc821ba58f60f72d563249f..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/showStreamExecTimeisNull.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-from util.log import *
-from util.cases import *
-from util.sql import *
-from util.dnodes import *
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug(f"start to execute {__file__}")
- tdSql.init(conn.cursor(), logSql)
-
- def insertnow(self):
-
- # timestamp list:
- # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
- # -631180800000 -> "1950-01-01 00:00:00"
-
- tsp1 = 0
- tsp2 = -28800000
- tsp3 = -946800000000
- tsp4 = "1969-01-01 00:00:00.000"
-
- tdSql.execute("insert into tcq1 values (now-11d, 5)")
- tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
- tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
- tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
- tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
-
- def waitedQuery(self, sql, expectRows, timeout):
- tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
- try:
- for i in range(timeout):
- tdSql.cursor.execute(sql)
- self.queryResult = tdSql.cursor.fetchall()
- self.queryRows = len(self.queryResult)
- self.queryCols = len(tdSql.cursor.description)
- # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
- if self.queryRows >= expectRows:
- return (self.queryRows, i)
- time.sleep(1)
- except Exception as e:
- caller = inspect.getframeinfo(inspect.stack()[1][0])
- tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
- raise Exception(repr(e))
- return (self.queryRows, timeout)
-
- def showstream(self):
- tdSql.execute(
- "create table cq1 as select avg(c1) from tcq1 interval(10d) sliding(1d)"
- )
- sql = "show streams"
- timeout = 30
- exception = "ValueError('year -292275055 is out of range')"
- try:
- for i in range(timeout):
- tdSql.cursor.execute(sql)
- self.queryResult = tdSql.cursor.fetchall()
- self.queryRows = len(self.queryResult)
- self.queryCols = len(tdSql.cursor.description)
- # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
- if self.queryRows >= 1:
- tdSql.query(sql)
- tdSql.checkData(0, 5, None)
- return (self.queryRows, i)
- time.sleep(1)
- except Exception as e:
- tdLog.exit(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ")
- # else:
- # tdLog.exit(f"sql: {sql} except raise {exception}, actually not")
-
- def run(self):
- tdSql.execute("drop database if exists dbcq")
- tdSql.execute("create database if not exists dbcq keep 36500")
- tdSql.execute("use dbcq")
-
- tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
- tdSql.execute("create table tcq1 using stbcq tags(1)")
-
- self.insertnow()
- self.showstream()
-
-
- def stop(self):
- tdSql.close()
- tdLog.success(f"{__file__} successfully executed")
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/stream/stream1.py b/tests/pytest/stream/stream1.py
deleted file mode 100644
index c657379441e6da3137e3a1ceb8148ba9fa5ba9a5..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/stream1.py
+++ /dev/null
@@ -1,142 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- tbNum = 10
- rowNum = 20
-
- tdSql.prepare()
-
- tdLog.info("===== step1 =====")
- tdSql.execute(
- "create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(0.1)
-
- tdLog.info("===== step2 =====")
- tdSql.query(
- "select count(*), count(col1), count(col2) from tb0 interval(1d)")
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
- tdSql.query("show tables")
- tdSql.checkRows(tbNum)
- tdSql.execute(
- "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step3 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step4 =====")
- tdSql.execute("drop table s0")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum)
-
- tdLog.info("===== step5 =====")
- tdSql.error("select * from s0")
-
- tdLog.info("===== step6 =====")
- time.sleep(0.1)
- tdSql.execute(
- "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step7 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step8 =====")
- tdSql.query(
- "select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.checkData(0, 1, rowNum * tbNum)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdSql.execute(
- "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 2)
-
- tdLog.info("===== step9 =====")
- tdSql.waitedQuery("select * from s1", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum * tbNum)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step10 =====")
- tdSql.execute("drop table s1")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step11 =====")
- tdSql.error("select * from s1")
-
- tdLog.info("===== step12 =====")
- tdSql.execute(
- "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 2)
-
- tdLog.info("===== step13 =====")
- tdSql.waitedQuery("select * from s1", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum * tbNum)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py
deleted file mode 100644
index 9b4eb8725c96f95196f251c55b0b773cd68e9ed5..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/stream2.py
+++ /dev/null
@@ -1,164 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- tbNum = 10
- rowNum = 20
- totalNum = tbNum * rowNum
-
- tdSql.prepare()
-
- tdLog.info("===== step1 =====")
- tdSql.execute(
- "create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(0.1)
-
- tdLog.info("===== step2 =====")
- tdSql.query("select count(col1) from tb0 interval(1d)")
- tdSql.checkData(0, 1, rowNum)
- tdSql.query("show tables")
- tdSql.checkRows(tbNum)
- tdSql.execute(
- "create table s0 as select count(col1) from tb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step3 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step4 =====")
- tdSql.execute("drop table s0")
- tdSql.query("show tables")
- try:
- tdSql.checkRows(tbNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step5 =====")
- tdSql.error("select * from s0")
-
- tdLog.info("===== step6 =====")
- tdSql.execute(
- "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
- tdSql.query("show tables")
- try:
- tdSql.checkRows(tbNum + 1)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step7 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
- except Exception as e:
- tdLog.info(repr(e))
-
-
- time.sleep(5)
- tdSql.query("show streams")
- tdSql.checkRows(1)
- tdSql.checkData(0, 2, 's0')
-
- tdLog.info("===== step8 =====")
- tdSql.query(
- "select count(*), count(col1), count(col2) from stb0 interval(1d)")
- try:
- tdSql.checkData(0, 1, totalNum)
- tdSql.checkData(0, 2, totalNum)
- tdSql.checkData(0, 3, totalNum)
- except Exception as e:
- tdLog.info(repr(e))
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
- tdSql.execute(
- "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 2)
-
- tdLog.info("===== step9 =====")
- tdSql.waitedQuery("select * from s1", 1, 120)
- try:
- tdSql.checkData(0, 1, totalNum)
- tdSql.checkData(0, 2, totalNum)
- tdSql.checkData(0, 3, totalNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step10 =====")
- tdSql.execute("drop table s1")
- tdSql.query("show tables")
- try:
- tdSql.checkRows(tbNum + 1)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step11 =====")
- tdSql.error("select * from s1")
-
- tdLog.info("===== step12 =====")
- tdSql.execute(
- "create table s1 as select count(col1) from stb0 interval(1d)")
- tdSql.query("show tables")
- try:
- tdSql.checkRows(tbNum + 2)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step13 =====")
- tdSql.waitedQuery("select * from s1", 1, 120)
- try:
- tdSql.checkData(0, 1, totalNum)
- #tdSql.checkData(0, 2, None)
- #tdSql.checkData(0, 3, None)
- except Exception as e:
- tdLog.info(repr(e))
-
- time.sleep(5)
- tdSql.query("show streams")
- tdSql.checkRows(2)
- tdSql.checkData(0, 2, 's1')
- tdSql.checkData(1, 2, 's0')
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/stream3.py b/tests/pytest/stream/stream3.py
deleted file mode 100644
index 9a5c6c9aeca08bff1c94861255919255eef89100..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/stream3.py
+++ /dev/null
@@ -1,108 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- ts = 1500000000000
- tbNum = 10
- rowNum = 20
-
- tdSql.prepare()
-
- tdLog.info("===== step1 =====")
- tdSql.execute(
- "create table stb0(ts timestamp, col1 binary(20), col2 nchar(20)) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (%d, 'binary%d', 'nchar%d')" %
- (i, ts + 60000 * j, j, j))
- tdSql.execute("insert into tb0 values(%d, null, null)" % (ts + 10000000))
- time.sleep(0.1)
-
- tdLog.info("===== step2 =====")
- tdSql.query(
- "select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.checkData(0, 1, rowNum * tbNum + 1)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
-
- tdSql.query("show tables")
- tdSql.checkRows(tbNum)
- tdSql.execute(
- "create table s0 as select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step3 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum * tbNum + 1)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step4 =====")
- tdSql.execute("drop table s0")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum)
-
- tdLog.info("===== step5 =====")
- tdSql.error("select * from s0")
-
- tdLog.info("===== step6 =====")
- time.sleep(0.1)
- tdSql.execute(
- "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)")
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- tdLog.info("===== step7 =====")
- tdSql.waitedQuery("select * from s0", 1, 120)
- try:
- tdSql.checkData(0, 1, rowNum + 1)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
- except Exception as e:
- tdLog.info(repr(e))
-
- tdLog.info("===== step8 =====")
- tdSql.query(
- "select count(*), count(col1), count(col2) from stb0 interval(1d)")
- tdSql.checkData(0, 1, rowNum * tbNum + 1)
- tdSql.checkData(0, 2, rowNum * tbNum)
- tdSql.checkData(0, 3, rowNum * tbNum)
- tdSql.query("show tables")
- tdSql.checkRows(tbNum + 1)
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/sys.py b/tests/pytest/stream/sys.py
deleted file mode 100644
index c9a3fccfe68b61da722dcdb2ccab63bf3d5bcabc..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/sys.py
+++ /dev/null
@@ -1,62 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# migrated from 'stream_on_sys.sim'
-# -*- coding: utf-8 -*-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- updatecfgDict = {'monitor': 1}
-
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
-
- def run(self):
- time.sleep(5)
- tdSql.execute("use log")
-
- tdSql.execute("create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s)")
- tdSql.execute("create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s)")
- tdSql.execute("create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s)")
- tdSql.execute("create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s)")
- tdSql.execute("create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s)")
- tdSql.execute("create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s)")
-
- sqls = [
- "select * from cpustrm",
- "select * from memstrm",
- "select * from diskstrm",
- "select * from bandstrm",
- "select * from reqstrm",
- "select * from iostrm",
- ]
- for sql in sqls:
- (rows, _) = tdSql.waitedQuery(sql, 1, 240)
- if rows < 1:
- tdLog.exit("failed: sql:%s, expect at least one row" % sql)
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/pytest/stream/table_1.py b/tests/pytest/stream/table_1.py
deleted file mode 100644
index b205491fad181a51c991c16da65baa8370174e74..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/table_1.py
+++ /dev/null
@@ -1,89 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def createFuncStream(self, expr, suffix, value):
- tbname = "strm_" + suffix
- tdLog.info("create stream table %s" % tbname)
- tdSql.query("select %s from tb1 interval(1d)" % expr)
- tdSql.checkData(0, 1, value)
- tdSql.execute("create table %s as select %s from tb1 interval(1d)" % (tbname, expr))
-
- def checkStreamData(self, suffix, value):
- sql = "select * from strm_" + suffix
- tdSql.waitedQuery(sql, 1, 120)
- tdSql.checkData(0, 1, value)
-
- def run(self):
- tbNum = 10
- rowNum = 20
-
- tdSql.prepare()
-
- tdLog.info("===== step1 =====")
- tdSql.execute(
- "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(1)
-
- self.createFuncStream("count(*)", "c1", rowNum)
- self.createFuncStream("count(tbcol)", "c2", rowNum)
- self.createFuncStream("count(tbcol2)", "c3", rowNum)
- self.createFuncStream("avg(tbcol)", "av", 9.5)
- self.createFuncStream("sum(tbcol)", "su", 190)
- self.createFuncStream("min(tbcol)", "mi", 0)
- self.createFuncStream("max(tbcol)", "ma", 19)
- self.createFuncStream("first(tbcol)", "fi", 0)
- self.createFuncStream("last(tbcol)", "la", 19)
- self.createFuncStream("stddev(tbcol)", "st", 5.766281297335398)
- self.createFuncStream("percentile(tbcol, 1)", "pe", 0.19)
- self.createFuncStream("count(tbcol)", "as", rowNum)
-
- self.checkStreamData("c1", rowNum)
- self.checkStreamData("c2", rowNum)
- self.checkStreamData("c3", rowNum)
- self.checkStreamData("av", 9.5)
- self.checkStreamData("su", 190)
- self.checkStreamData("mi", 0)
- self.checkStreamData("ma", 19)
- self.checkStreamData("fi", 0)
- self.checkStreamData("la", 19)
- self.checkStreamData("st", 5.766281297335398)
- self.checkStreamData("pe", 0.19)
- self.checkStreamData("as", rowNum)
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/table_n.py b/tests/pytest/stream/table_n.py
deleted file mode 100644
index 371af769778bce1eb1e6cf1bac89333006c582a8..0000000000000000000000000000000000000000
--- a/tests/pytest/stream/table_n.py
+++ /dev/null
@@ -1,143 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import time
-import taos
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def run(self):
- tbNum = 10
- rowNum = 20
-
- tdSql.prepare()
-
- tdLog.info("===== preparing data =====")
- tdSql.execute(
- "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
- for i in range(tbNum):
- tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
- for j in range(rowNum):
- tdSql.execute(
- "insert into tb%d values (now - %dm, %d, %d)" %
- (i, 1440 - j, j, j))
- time.sleep(0.1)
-
- tdLog.info("===== step 1 =====")
- tdSql.query("select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)")
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
-
- tdLog.info("===== step 2 =====")
- tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)")
-
- tdLog.info("===== step 3 =====")
- tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
-
- tdLog.info("===== step 4 =====")
- tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
-
- tdLog.info("===== step 5 =====")
- tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
-
- tdLog.info("===== step 6 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)")
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 190)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)")
-
- tdLog.info("===== step 7 =====")
- tdSql.query("select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)")
- tdSql.checkData(0, 1, 5.766281297335398)
- tdSql.checkData(0, 3, 0.19)
- tdSql.execute("create table strm_ot as select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)")
-
- tdLog.info("===== step 8 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)")
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 190)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.checkData(0, 7, 5.766281297335398)
- tdSql.checkData(0, 8, 0.19)
- tdSql.checkData(0, 9, rowNum)
- tdSql.execute("create table strm_to as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)")
-
- tdLog.info("===== step 9 =====")
- tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)")
- tdSql.checkData(0, 9, rowNum)
- tdSql.execute("create table strm_wh as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)")
-
- tdLog.info("===== step 10 =====")
- tdSql.waitedQuery("select * from strm_c3", 1, 120)
- tdSql.checkData(0, 1, rowNum)
- tdSql.checkData(0, 2, rowNum)
- tdSql.checkData(0, 3, rowNum)
-
- tdLog.info("===== step 11 =====")
- tdSql.waitedQuery("select * from strm_c31", 1, 30)
- for i in range(1, 10):
- tdSql.checkData(0, i, rowNum)
-
- tdLog.info("===== step 12 =====")
- tdSql.waitedQuery("select * from strm_avg", 1, 20)
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 190)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
-
- tdLog.info("===== step 13 =====")
- tdSql.waitedQuery("select * from strm_ot", 1, 20)
- tdSql.checkData(0, 1, 5.766281297335398)
- tdSql.checkData(0, 3, 0.19)
-
- tdLog.info("===== step 14 =====")
- tdSql.waitedQuery("select * from strm_to", 1, 20)
- tdSql.checkData(0, 1, 9.5)
- tdSql.checkData(0, 2, 190)
- tdSql.checkData(0, 3, 0)
- tdSql.checkData(0, 4, 19)
- tdSql.checkData(0, 5, 0)
- tdSql.checkData(0, 6, 19)
- tdSql.checkData(0, 7, 5.766281297335398)
- tdSql.checkData(0, 8, 0.19)
- tdSql.checkData(0, 9, rowNum)
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/test1.py b/tests/pytest/stream/test1.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3439a7bdbbf258795a15164eb63b9278549ed8a
--- /dev/null
+++ b/tests/pytest/stream/test1.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+ tdSql.execute('drop database if exists slmfvojuxt;')
+ tdSql.execute('create database if not exists slmfvojuxt vgroups 1;')
+ tdSql.execute('use slmfvojuxt;')
+ tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);')
+ tdSql.execute('create table ownsampling_ct1 using downsampling_stb tags(10, 10.1, "beijing", True);')
+ tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20)) tags (t1 int);')
+ tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
+ tdSql.execute('create stream downsampling_stream into output_downsampling_stb as select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);')
+ tdSql.execute('create stream scalar_stream into output_scalar_stb as select ts, abs(c1) a1 , abs(c2) a2 from scalar_stb;')
+ tdSql.execute('insert into scalar_ct1 values (1653471881952, 100, 100.1, "beijing");')
+ tdSql.execute('insert into scalar_ct1 values (1653471881952+1s, -50, -50.1, "tianjin");')
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/test2.py b/tests/pytest/stream/test2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a441174722047d7fb7819f535fe7b6c7bf55380f
--- /dev/null
+++ b/tests/pytest/stream/test2.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ #for i in range(100):
+ tdSql.prepare()
+ dbname = tdCom.getLongName(10, "letters")
+ tdSql.execute('show databases')
+ tdSql.execute('drop database if exists ttxkbrzmpo')
+ tdSql.execute('create database if not exists ttxkbrzmpo vgroups 1')
+ tdSql.execute('use ttxkbrzmpo')
+ tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);')
+ tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);')
+ tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);')
+ tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
+ tdSql.execute('create stream downsampling_stream into output_downsampling_stb as select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591, 100, 100.1, "Beijing", True);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591+1s, -100, -100.1, "Tianjin", False);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591+2s, 50, 50.3, "HeBei", False);')
+ tdSql.execute('select * from output_downsampling_stb;')
+ tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;')
+ tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591+10m, 60, 60.3, "heilongjiang", True);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591+11m, 70, 70.3, "JiLin", True);')
+ tdSql.execute('select * from output_downsampling_stb;')
+ tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;')
+ tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);')
+ tdSql.execute('insert into downsampling_ct1 values (1653547828591+21m, 70, 70.3, "JiLin", True);')
+ tdSql.execute('select * from output_downsampling_stb;')
+ tdSql.execute('select * from output_downsampling_stb;')
+ tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;')
+ tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);')
+ tdSql.execute('create stream abs_stream into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_abs_stb')
+ tdSql.execute('create stream acos_stream into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_acos_stb')
+ tdSql.execute('create stream asin_stream into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_asin_stb')
+ tdSql.execute('create stream atan_stream into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_atan_stb')
+ tdSql.execute('create stream ceil_stream into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_ceil_stb')
+ tdSql.execute('create stream cos_stream into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_cos_stb')
+ tdSql.execute('create stream floor_stream into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_floor_stb')
+ tdSql.execute('create stream log_stream into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb;')
+ tdSql.query('describe output_log_stb')
+ tdSql.execute('create stream pow_stream into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb;')
+ tdSql.query('describe output_pow_stb')
+ tdSql.execute('create stream round_stream into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_round_stb')
+ tdSql.execute('create stream sin_stream into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_sin_stb')
+ tdSql.execute('create stream sqrt_stream into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_sqrt_stb')
+ tdSql.execute('create stream tan_stream into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb;')
+ tdSql.query('describe output_tan_stb')
+ tdSql.execute('create stream char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb;')
+ tdSql.query('describe output_char_length_stb')
+ tdSql.execute('create stream concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb;')
+ tdSql.execute('create stream concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb;')
+ tdSql.execute('create stream length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb;')
+ tdSql.query('describe output_length_stb')
+ tdSql.execute('create stream lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb;')
+ tdSql.query('describe output_lower_stb')
+ tdSql.execute('create stream ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb;')
+ tdSql.query('describe output_ltrim_stb')
+ tdSql.execute('create stream rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb;')
+ tdSql.query('describe output_rtrim_stb')
+ tdSql.execute('create stream substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb;')
+ tdSql.query('describe output_substr_stb')
+ tdSql.execute('create stream upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb;')
+ tdSql.query('describe output_upper_stb')
+ tdSql.execute('insert into scalar_ct1 values (1653560440733, 100, 100.1, "beijing", "taos", "Taos");')
+ tdSql.execute('insert into scalar_ct1 values (1653560440733+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");')
+ tdSql.execute('insert into scalar_ct1 values (1653560440733+2s, 0, Null, "hebei", "TDengine", Null);')
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py
new file mode 100644
index 0000000000000000000000000000000000000000..b45521a9476961394c1cf4b2454d6fb9e2368c68
--- /dev/null
+++ b/tests/pytest/stream/test3.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ #for i in range(100):
+ tdSql.prepare()
+ dbname = tdCom.getLongName(10, "letters")
+ tdSql.execute('create database if not exists djnhawvlgq vgroups 1')
+ tdSql.execute('use djnhawvlgq')
+ tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);')
+ tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);')
+ tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);')
+ tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
+ tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)')
+ tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)')
+ tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;')
+ tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);')
+ tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);')
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/test-all.bat b/tests/pytest/test-all.bat
new file mode 100644
index 0000000000000000000000000000000000000000..4e3ece9b565f5fecce55798684d98875e1ffb7cc
--- /dev/null
+++ b/tests/pytest/test-all.bat
@@ -0,0 +1,27 @@
+@echo off
+SETLOCAL EnableDelayedExpansion
+for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a")
+set /a a=0
+@REM echo Windows Taosd Test
+@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do (
+@REM echo Processing %%i
+@REM set /a a+=1
+@REM call %%i ARG1 -w -m localhost > result_!a!.txt 2>error_!a!.txt
+@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. )
+@REM )
+echo Linux Taosd Test
+for /F "usebackq tokens=*" %%i in (fulltest.bat) do (
+ echo Processing %%i
+ set /a a+=1
+ call %%i ARG1 -w 1 -m %1 > result_!a!.txt 2>error_!a!.txt
+ if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && goto :end ) else ( call :colorEcho 0a "Success" &echo. )
+)
+goto :end
+
+:colorEcho
+echo off
+ "%~2"
+findstr /v /a:%1 /R "^$" "%~2" nul
+del "%~2" > nul 2>&1i
+
+:end
\ No newline at end of file
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index 97dca6be1811ee87a31661e018616f469d5fd4ca..30ab6ae3cc14e2d36f4979f03bdc99871cfcd8fa 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -18,6 +18,7 @@ import getopt
import subprocess
import time
from distutils.log import warn as printf
+import platform
from util.log import *
from util.dnodes import *
@@ -35,8 +36,11 @@ if __name__ == "__main__":
logSql = True
stop = 0
restart = False
+ windows = 0
+ if platform.system().lower() == 'windows':
+ windows = 1
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghr', [
- 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help'])
+ 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@@ -61,7 +65,7 @@ if __name__ == "__main__":
deployPath = value
if key in ['-m', '--master']:
- masterIp = value
+ masterIp = value
if key in ['-l', '--logSql']:
if (value.upper() == "TRUE"):
@@ -110,67 +114,105 @@ if __name__ == "__main__":
time.sleep(2)
tdLog.info('stop All dnodes')
-
- tdDnodes.init(deployPath)
- tdDnodes.setTestCluster(testCluster)
- tdDnodes.setValgrind(valgrind)
- tdDnodes.stopAll()
- is_test_framework = 0
- key_word = 'tdCases.addLinux'
- try:
- if key_word in open(fileName).read():
- is_test_framework = 1
- except:
- pass
- if is_test_framework:
- moduleName = fileName.replace(".py", "").replace("/", ".")
- uModule = importlib.import_module(moduleName)
- try:
- ucase = uModule.TDTestCase()
- tdDnodes.deploy(1,ucase.updatecfgDict)
- except :
- tdDnodes.deploy(1,{})
- else:
- tdDnodes.deploy(1,{})
- tdDnodes.start(1)
if masterIp == "":
host = '127.0.0.1'
else:
host = masterIp
- tdLog.info("Procedures for tdengine deployed in %s" % (host))
-
- tdCases.logSql(logSql)
-
- if testCluster:
- tdLog.info("Procedures for testing cluster")
- if fileName == "all":
- tdCases.runAllCluster()
- else:
- tdCases.runOneCluster(fileName)
- else:
+ if (windows):
+ tdCases.logSql(logSql)
tdLog.info("Procedures for testing self-deployment")
+ if masterIp == "" or masterIp == "localhost":
+ tdDnodes.init(deployPath)
+ tdDnodes.setTestCluster(testCluster)
+ tdDnodes.setValgrind(valgrind)
+ tdDnodes.stopAll()
+ is_test_framework = 0
+ key_word = 'tdCases.addWindows'
+ try:
+ if key_word in open(fileName).read():
+ is_test_framework = 1
+ except:
+ pass
+ if is_test_framework:
+ moduleName = fileName.replace(".py", "").replace(os.sep, ".")
+ uModule = importlib.import_module(moduleName)
+ try:
+ ucase = uModule.TDTestCase()
+ tdDnodes.deploy(1,ucase.updatecfgDict)
+ except :
+ tdDnodes.deploy(1,{})
+ else:
+ pass
+ tdDnodes.deploy(1,{})
+ tdDnodes.start(1)
+ else:
+ remote_conn = Connection("root@%s"%host)
+ with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'):
+ remote_conn.run("python3 ./test.py")
+ tdDnodes.init(deployPath)
conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
- if fileName == "all":
- tdCases.runAllLinux(conn)
+ host="%s" % (host),
+ config=tdDnodes.sim.getCfgDir())
+ tdCases.runOneWindows(conn, fileName)
+ tdCases.logSql(logSql)
+ else:
+ tdDnodes.init(deployPath)
+ tdDnodes.setTestCluster(testCluster)
+ tdDnodes.setValgrind(valgrind)
+ tdDnodes.stopAll()
+ is_test_framework = 0
+ key_word = 'tdCases.addLinux'
+ try:
+ if key_word in open(fileName).read():
+ is_test_framework = 1
+ except:
+ pass
+ if is_test_framework:
+ moduleName = fileName.replace(".py", "").replace("/", ".")
+ uModule = importlib.import_module(moduleName)
+ try:
+ ucase = uModule.TDTestCase()
+ tdDnodes.deploy(1,ucase.updatecfgDict)
+ except :
+ tdDnodes.deploy(1,{})
+ else:
+ tdDnodes.deploy(1,{})
+ tdDnodes.start(1)
+
+ tdLog.info("Procedures for tdengine deployed in %s" % (host))
+
+ tdCases.logSql(logSql)
+
+ if testCluster:
+ tdLog.info("Procedures for testing cluster")
+ if fileName == "all":
+ tdCases.runAllCluster()
+ else:
+ tdCases.runOneCluster(fileName)
else:
- tdCases.runOneLinux(conn, fileName)
- if restart:
- if fileName == "all":
- tdLog.info("not need to query ")
- else:
- sp = fileName.rsplit(".", 1)
- if len(sp) == 2 and sp[1] == "py":
- tdDnodes.stopAll()
- tdDnodes.start(1)
- time.sleep(1)
- conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
- tdLog.info("Procedures for tdengine deployed in %s" % (host))
- tdLog.info("query test after taosd restart")
- tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
+ tdLog.info("Procedures for testing self-deployment")
+ conn = taos.connect(
+ host,
+ config=tdDnodes.getSimCfgPath())
+ if fileName == "all":
+ tdCases.runAllLinux(conn)
else:
- tdLog.info("not need to query")
+ tdCases.runOneLinux(conn, fileName)
+ if restart:
+ if fileName == "all":
+ tdLog.info("not need to query ")
+ else:
+ sp = fileName.rsplit(".", 1)
+ if len(sp) == 2 and sp[1] == "py":
+ tdDnodes.stopAll()
+ tdDnodes.start(1)
+ time.sleep(1)
+ conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
+ tdLog.info("Procedures for tdengine deployed in %s" % (host))
+ tdLog.info("query test after taosd restart")
+ tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
+ else:
+ tdLog.info("not need to query")
conn.close()
diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py
index 2fc1ac8515e47f9354483ebb590897eea96dcc57..2bfd8efdcd96979d25b58d7af50bb706d91fd91d 100644
--- a/tests/pytest/util/cases.py
+++ b/tests/pytest/util/cases.py
@@ -34,7 +34,7 @@ class TDCases:
self.clusterCases = []
def __dynamicLoadModule(self, fileName):
- moduleName = fileName.replace(".py", "").replace("/", ".")
+ moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
def logSql(self, logSql):
@@ -101,8 +101,12 @@ class TDCases:
for tmp in self.windowsCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
- case.init(conn)
- case.run()
+ case.init(conn, self._logSql)
+ try:
+ case.run()
+ except Exception as e:
+ tdLog.notice(repr(e))
+ tdLog.exit("%s failed" % (fileName))
case.stop()
runNum += 1
continue
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 35abc4802f9de2080a6b6a166daf833c9cf04578..7b00e6f331f6053c96ce56b5a79219b6967c6ecd 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -14,23 +14,97 @@
import random
import string
from util.sql import tdSql
-
+from util.dnodes import tdDnodes
+import requests
+import time
+import socket
class TDCom:
def init(self, conn, logSql):
tdSql.init(conn.cursor(), logSql)
- def cleanTb(self):
+ def preDefine(self):
+ header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
+ sql_url = "http://127.0.0.1:6041/rest/sql"
+ sqlt_url = "http://127.0.0.1:6041/rest/sqlt"
+ sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc"
+ influx_url = "http://127.0.0.1:6041/influxdb/v1/write"
+ telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet"
+ return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url
+
+ def genTcpParam(self):
+ MaxBytes = 1024*1024
+ host ='127.0.0.1'
+ port = 6046
+ return MaxBytes, host, port
+
+ def tcpClient(self, input):
+ MaxBytes = tdCom.genTcpParam()[0]
+ host = tdCom.genTcpParam()[1]
+ port = tdCom.genTcpParam()[2]
+ sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
+ sock.connect((host, port))
+ sock.send(input.encode())
+ sock.close()
+
+ def restApiPost(self, sql):
+ requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0])
+
+ def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"):
+ if api_type == "taosc":
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} precision 'us'")
+ else:
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1")
+ elif api_type == "restful":
+ if db_update_tag == 0:
+ self.restApiPost(f"drop database if exists {dbname}")
+ self.restApiPost(f"create database if not exists {dbname} precision 'us'")
+ else:
+ self.restApiPost(f"drop database if exists {dbname}")
+ self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1")
+ tdSql.execute(f'use {dbname}')
+
+ def genUrl(self, url_type, dbname, precision):
+ if url_type == "influxdb":
+ if precision is None:
+ url = self.preDefine()[4] + "?" + "db=" + dbname
+ else:
+ url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision
+ elif url_type == "telnet":
+ url = self.preDefine()[5] + "/" + dbname
+ else:
+ url = self.preDefine()[1]
+ return url
+
+ def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None):
+ if url_type == "influxdb":
+ url = self.genUrl(url_type, dbname, precision)
+ elif url_type == "telnet":
+ url = self.genUrl(url_type, dbname, precision)
+ res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
+ return res
+
+ def cleanTb(self, type="taosc"):
+ '''
+ type is taosc or restful
+ '''
query_sql = "show stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
- tdSql.execute(f'drop table if exists {stb}')
+ if type == "taosc":
+ tdSql.execute(f'drop table if exists `{stb}`')
+ if not stb[0].isdigit():
+ tdSql.execute(f'drop table if exists {stb}')
+ elif type == "restful":
+ self.restApiPost(f"drop table if exists `{stb}`")
+ if not stb[0].isdigit():
+ self.restApiPost(f"drop table if exists {stb}")
- query_sql = "show tables"
- res_row_list = tdSql.query(query_sql, True)
- tb_list = map(lambda x: x[0], res_row_list)
- for tb in tb_list:
- tdSql.execute(f'drop table if exists {tb}')
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
def getLongName(self, len, mode = "mixed"):
"""
@@ -47,6 +121,52 @@ class TDCom:
chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len))
return chars
+ def restartTaosd(self, index=1, db_name="db"):
+ tdDnodes.stop(index)
+ tdDnodes.startWithoutSleep(index)
+ tdSql.execute(f"use {db_name}")
+
+ def typeof(self, variate):
+ v_type=None
+ if type(variate) is int:
+ v_type = "int"
+ elif type(variate) is str:
+ v_type = "str"
+ elif type(variate) is float:
+ v_type = "float"
+ elif type(variate) is bool:
+ v_type = "bool"
+ elif type(variate) is list:
+ v_type = "list"
+ elif type(variate) is tuple:
+ v_type = "tuple"
+ elif type(variate) is dict:
+ v_type = "dict"
+ elif type(variate) is set:
+ v_type = "set"
+ return v_type
+
+ def splitNumLetter(self, input_mix_str):
+ nums, letters = "", ""
+ for i in input_mix_str:
+ if i.isdigit():
+ nums += i
+ elif i.isspace():
+ pass
+ else:
+ letters += i
+ return nums, letters
+
+ def smlPass(self, func):
+ smlChildTableName = "no"
+ def wrapper(*args):
+ # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID":
+ if smlChildTableName.upper() == "ID":
+ return func(*args)
+ else:
+ pass
+ return wrapper
+
def close(self):
self.cursor.close()
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 9190943dfd25169e9989ce0112242fd046d6e285..21d235ee5c3502ff3248681ada0a6f2c99a805ea 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -17,6 +17,10 @@ import os.path
import platform
import subprocess
from time import sleep
+import base64
+import json
+import copy
+from fabric2 import Connection
from util.log import *
@@ -67,17 +71,19 @@ class TDSimClient:
if os.system(cmd) != 0:
tdLog.exit(cmd)
- cmd = "mkdir -p " + self.logDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ # cmd = "mkdir -p " + self.logDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.logDir)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
- cmd = "mkdir -p " + self.cfgDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ # cmd = "mkdir -p " + self.cfgDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.cfgDir)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
@@ -109,6 +115,7 @@ class TDDnode:
self.deployed = 0
self.testCluster = False
self.valgrind = 0
+ self.remoteIP = ""
self.cfgDict = {
"walLevel": "2",
"fsync": "1000",
@@ -135,8 +142,9 @@ class TDDnode:
"telemetryReporting": "0"
}
- def init(self, path):
+ def init(self, path, remoteIP = ""):
self.path = path
+ self.remoteIP = remoteIP
def setTestCluster(self, value):
self.testCluster = value
@@ -160,6 +168,29 @@ class TDDnode:
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
+ def remoteExec(self, updateCfgDict, execCmd):
+ try:
+ config = eval(self.remoteIP)
+ remote_conn = Connection(host=config["host"], port=config["port"], user=config["user"], connect_kwargs={'password':config["password"]})
+ remote_top_dir = config["path"]
+ except Exception as r:
+ remote_conn = Connection(host=self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'})
+ remote_top_dir = '~/test'
+ valgrindStr = ''
+ if (self.valgrind==1):
+ valgrindStr = '-g'
+ remoteCfgDict = copy.deepcopy(updateCfgDict)
+ if ("logDir" in remoteCfgDict):
+ del remoteCfgDict["logDir"]
+ if ("dataDir" in remoteCfgDict):
+ del remoteCfgDict["dataDir"]
+ if ("cfgDir" in remoteCfgDict):
+ del remoteCfgDict["cfgDir"]
+ remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
+ execCmdStr = base64.b64encode(execCmd.encode()).decode()
+ with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')):
+ remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
+
def deploy(self, *updatecfgDict):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
@@ -179,17 +210,20 @@ class TDDnode:
if os.system(cmd) != 0:
tdLog.exit(cmd)
- cmd = "mkdir -p " + self.dataDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ # cmd = "mkdir -p " + self.dataDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.dataDir)
- cmd = "mkdir -p " + self.logDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ # cmd = "mkdir -p " + self.logDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.logDir)
- cmd = "mkdir -p " + self.cfgDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ # cmd = "mkdir -p " + self.cfgDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.cfgDir)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
@@ -224,8 +258,11 @@ class TDDnode:
self.cfg(value, key)
else:
self.addExtraCfg(key, value)
- for key, value in self.cfgDict.items():
- self.cfg(key, value)
+ if (self.remoteIP == ""):
+ for key, value in self.cfgDict.items():
+ self.cfg(key, value)
+ else:
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
self.deployed = 1
tdLog.debug(
@@ -242,11 +279,13 @@ class TDDnode:
paths = []
for root, dirs, files in os.walk(projPath):
- if ((tool) in files):
+ if ((tool) in files or ("%s.exe"%tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
paths.append(os.path.join(root, tool))
break
+ if (len(paths) == 0):
+ return ""
return paths[0]
def start(self):
@@ -261,54 +300,68 @@ class TDDnode:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
- cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
- binPath, self.cfgDir)
+ if platform.system().lower() == 'windows':
+ cmd = "mintty -h never -w hide %s -c %s" % (
+ binPath, self.cfgDir)
+ else:
+ cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
+ binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
- cmd = "nohup %s %s -c %s 2>&1 & " % (
- valgrindCmdline, binPath, self.cfgDir)
+ if platform.system().lower() == 'windows':
+ cmd = "mintty -h never -w hide %s %s -c %s" % (
+ valgrindCmdline, binPath, self.cfgDir)
+ else:
+ cmd = "nohup %s %s -c %s 2>&1 & " % (
+ valgrindCmdline, binPath, self.cfgDir)
print(cmd)
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
- self.running = 1
- tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
- if self.valgrind == 0:
- time.sleep(0.1)
- key = 'from offline to online'
- bkey = bytes(key, encoding="utf8")
- logFile = self.logDir + "/taosdlog.0"
- i = 0
- while not os.path.exists(logFile):
- sleep(0.1)
- i += 1
- if i > 50:
- break
- popen = subprocess.Popen(
- 'tail -f ' + logFile,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- pid = popen.pid
- # print('Popen.pid:' + str(pid))
- timeout = time.time() + 60 * 2
- while True:
- line = popen.stdout.readline().strip()
- if bkey in line:
- popen.kill()
- break
- if time.time() > timeout:
- tdLog.exit('wait too long for taosd start')
- tdLog.debug("the dnode:%d has been started." % (self.index))
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index))
+ self.running = 1
else:
- tdLog.debug(
- "wait 10 seconds for the dnode:%d to start." %
- (self.index))
- time.sleep(10)
-
- # time.sleep(5)
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ self.running = 1
+ print("dnode:%d is running with %s " % (self.index, cmd))
+ tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
+ if self.valgrind == 0:
+ time.sleep(0.1)
+ key = 'from offline to online'
+ bkey = bytes(key, encoding="utf8")
+ logFile = self.logDir + "/taosdlog.0"
+ i = 0
+ while not os.path.exists(logFile):
+ sleep(0.1)
+ i += 1
+ if i > 50:
+ break
+ tailCmdStr = 'tail -f '
+ if platform.system().lower() == 'windows':
+ tailCmdStr = 'tail -n +0 -f '
+ popen = subprocess.Popen(
+ tailCmdStr + logFile,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True)
+ pid = popen.pid
+ # print('Popen.pid:' + str(pid))
+ timeout = time.time() + 60 * 2
+ while True:
+ line = popen.stdout.readline().strip()
+ if bkey in line:
+ popen.kill()
+ break
+ if time.time() > timeout:
+ tdLog.exit('wait too long for taosd start')
+ tdLog.debug("the dnode:%d has been started." % (self.index))
+ else:
+ tdLog.debug(
+ "wait 10 seconds for the dnode:%d to start." %
+ (self.index))
+ time.sleep(10)
def startWithoutSleep(self):
binPath = self.getPath()
@@ -332,12 +385,20 @@ class TDDnode:
print(cmd)
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ if (self.remoteIP == ""):
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ else:
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index))
+
self.running = 1
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
def stop(self):
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index)
+ tdLog.info("stop dnode%d"%self.index)
+ return
if self.valgrind == 0:
toBeKilled = "taosd"
else:
@@ -354,9 +415,10 @@ class TDDnode:
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
- for port in range(6030, 6041):
- fuserCmd = "fuser -k -n tcp %d" % port
- os.system(fuserCmd)
+ if not platform.system().lower() == 'windows':
+ for port in range(6030, 6041):
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
@@ -364,6 +426,9 @@ class TDDnode:
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index)
+ return
if self.valgrind == 0:
toBeKilled = "taosd"
else:
@@ -428,8 +493,11 @@ class TDDnodes:
self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10))
self.simDeployed = False
+ self.testCluster = False
+ self.valgrind = 0
+ self.killValgrind = 1
- def init(self, path):
+ def init(self, path, remoteIP = ""):
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
@@ -439,19 +507,20 @@ class TDDnodes:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
- psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
- processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
- while(processID):
- killCmd = "kill -9 %s > /dev/null 2>&1" % processID
- os.system(killCmd)
- time.sleep(1)
- processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ if self.killValgrind == 1:
+ psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
+ processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
+ while(processID):
+ killCmd = "kill -9 %s > /dev/null 2>&1" % processID
+ os.system(killCmd)
+ time.sleep(1)
+ processID = subprocess.check_output(
+ psCmd, shell=True).decode("utf-8")
binPath = self.dnodes[0].getPath() + "/../../../"
- tdLog.debug("binPath %s" % (binPath))
+ # tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
- tdLog.debug("binPath real path %s" % (binPath))
+ # tdLog.debug("binPath real path %s" % (binPath))
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
# tdLog.debug(cmd)
@@ -474,8 +543,7 @@ class TDDnodes:
self.path = os.path.realpath(path)
for i in range(len(self.dnodes)):
- self.dnodes[i].init(self.path)
-
+ self.dnodes[i].init(self.path, remoteIP)
self.sim = TDSimClient(self.path)
def setTestCluster(self, value):
@@ -484,6 +552,9 @@ class TDDnodes:
def setValgrind(self, value):
self.valgrind = value
+ def setKillValgrind(self, value):
+ self.killValgrind = value
+
def deploy(self, index, *updatecfgDict):
self.sim.setTestCluster(self.testCluster)
@@ -557,14 +628,15 @@ class TDDnodes:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
- psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
- processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
- while(processID):
- killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
- os.system(killCmd)
- time.sleep(1)
- processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ if self.killValgrind == 1:
+ psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
+ processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
+ while(processID):
+ killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
+ os.system(killCmd)
+ time.sleep(1)
+ processID = subprocess.check_output(
+ psCmd, shell=True).decode("utf-8")
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
diff --git a/tests/pytest/util/types.py b/tests/pytest/util/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..218a4770269328a5ef7161cc56c0e0dc0c420f73
--- /dev/null
+++ b/tests/pytest/util/types.py
@@ -0,0 +1,38 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+from enum import Enum
+
+class TDSmlProtocolType(Enum):
+ '''
+ Schemaless Protocol types
+ 0 - unknown
+ 1 - InfluxDB Line Protocol
+ 2 - OpenTSDB Telnet Protocl
+ 3 - OpenTSDB JSON Protocol
+ '''
+ UNKNOWN = 0
+ LINE = 1
+ TELNET = 2
+ JSON = 3
+
+class TDSmlTimestampType(Enum):
+ NOT_CONFIGURED = 0
+ HOUR = 1
+ MINUTE = 2
+ SECOND = 3
+ MILLI_SECOND = 4
+ MICRO_SECOND = 5
+ NANO_SECOND = 6
+
+
diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py
index 2f4dcd5ce807cf7bbadfa480af6ed6342058a78a..36056d1bc2d0bef786cf4a4092521867f861b93b 100644
--- a/tests/pytest/wal/addOldWalTest.py
+++ b/tests/pytest/wal/addOldWalTest.py
@@ -31,7 +31,7 @@ class TDTestCase:
def createOldDirAndAddWal(self):
oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old"
- os.system("sudo echo 'test' >> %s/wal" % oldDir)
+ os.system("sudo echo test >> %s/wal" % oldDir)
def run(self):
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index 2ded58a979ad16e06f03ab8d4f828f1c10731df3..7dd7621d0b429caeb2e54c0215b29c4a0b396124 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -10,6 +10,9 @@
#include "../../../include/client/taos.h"
#define FUNCTION_TEST_IDX 1
+#define TIME_PRECISION_MILLI 0
+#define TIME_PRECISION_MICRO 1
+#define TIME_PRECISION_NANO 2
int32_t shortColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_INT};
int32_t fullColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_BOOL, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_UTINYINT, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_USMALLINT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_UINT, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_UBIGINT, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BINARY, TSDB_DATA_TYPE_NCHAR};
@@ -32,6 +35,8 @@ typedef enum {
BP_BIND_COL,
} BP_BIND_TYPE;
+#define BP_BIND_TYPE_STR(t) (((t) == BP_BIND_COL) ? "column" : "tag")
+
OperInfo operInfo[] = {
{">", 2, false},
{">=", 2, false},
@@ -57,11 +62,12 @@ FuncInfo funcInfo[] = {
{"min", 1},
};
+#define BP_STARTUP_TS 1591060628000
+
char *bpStbPrefix = "st";
char *bpTbPrefix = "t";
int32_t bpDefaultStbId = 1;
-
-
+int64_t bpTs;
//char *operatorList[] = {">", ">=", "<", "<=", "=", "<>", "in", "not in"};
//char *varoperatorList[] = {">", ">=", "<", "<=", "=", "<>", "in", "not in", "like", "not like", "match", "nmatch"};
@@ -188,8 +194,10 @@ typedef struct {
bool printCreateTblSql;
bool printQuerySql;
bool printStmtSql;
+ bool printVerbose;
bool autoCreateTbl;
bool numericParam;
+ uint8_t precision;
int32_t rowNum; //row num for one table
int32_t bindColNum;
int32_t bindTagNum;
@@ -209,12 +217,15 @@ typedef struct {
int32_t caseRunNum; // total run case num
} CaseCtrl;
-#if 1
+#if 0
CaseCtrl gCaseCtrl = { // default
+ .precision = TIME_PRECISION_MICRO,
.bindNullNum = 0,
.printCreateTblSql = false,
.printQuerySql = true,
.printStmtSql = true,
+ .printVerbose = false,
+ .printRes = false,
.autoCreateTbl = false,
.numericParam = false,
.rowNum = 0,
@@ -230,7 +241,6 @@ CaseCtrl gCaseCtrl = { // default
.funcIdxListNum = 0,
.funcIdxList = NULL,
.checkParamNum = false,
- .printRes = false,
.runTimes = 0,
.caseIdx = -1,
.caseNum = -1,
@@ -240,26 +250,35 @@ CaseCtrl gCaseCtrl = { // default
#endif
-#if 0
+#if 1
CaseCtrl gCaseCtrl = {
+ .precision = TIME_PRECISION_MILLI,
.bindNullNum = 0,
- .printCreateTblSql = true,
+ .printCreateTblSql = false,
.printQuerySql = true,
.printStmtSql = true,
+ .printVerbose = false,
+ .printRes = true,
.autoCreateTbl = false,
+ .numericParam = false,
.rowNum = 0,
.bindColNum = 0,
.bindTagNum = 0,
.bindRowNum = 0,
+ .bindColTypeNum = 0,
+ .bindColTypeList = NULL,
.bindTagTypeNum = 0,
.bindTagTypeList = NULL,
+ .optrIdxListNum = 0,
+ .optrIdxList = NULL,
+ .funcIdxListNum = 0,
+ .funcIdxList = NULL,
.checkParamNum = false,
- .printRes = false,
.runTimes = 0,
- .caseIdx = 1,
- .caseNum = 1,
+ .caseIdx = -1,
+ .caseNum = -1,
.caseRunIdx = -1,
- .caseRunNum = 1,
+ .caseRunNum = -1,
};
#endif
@@ -891,7 +910,6 @@ int32_t prepareColData(BP_BIND_TYPE bType, BindData *data, int32_t bindIdx, int3
int32_t prepareInsertData(BindData *data) {
- static int64_t tsData = 1591060628000;
uint64_t allRowNum = gCurCase->rowNum * gCurCase->tblNum;
data->colNum = 0;
@@ -918,7 +936,7 @@ int32_t prepareInsertData(BindData *data) {
}
for (int32_t i = 0; i < allRowNum; ++i) {
- data->tsData[i] = tsData++;
+ data->tsData[i] = bpTs++;
data->boolData[i] = (bool)(i % 2);
data->tinyData[i] = (int8_t)i;
data->utinyData[i] = (uint8_t)(i+1);
@@ -956,7 +974,6 @@ int32_t prepareInsertData(BindData *data) {
}
int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) {
- static int64_t tsData = 1591060628000;
uint64_t bindNum = gCurCase->rowNum / gCurCase->bindRowNum;
data->colNum = 0;
@@ -982,7 +999,7 @@ int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) {
}
for (int32_t i = 0; i < bindNum; ++i) {
- data->tsData[i] = tsData + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum;
+ data->tsData[i] = bpTs + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum;
data->boolData[i] = (bool)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
data->tinyData[i] = (int8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
data->utinyData[i] = (uint8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
@@ -1014,7 +1031,6 @@ int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) {
int32_t prepareQueryMiscData(BindData *data, int32_t tblIdx) {
- static int64_t tsData = 1591060628000;
uint64_t bindNum = gCurCase->rowNum / gCurCase->bindRowNum;
data->colNum = 0;
@@ -1040,7 +1056,7 @@ int32_t prepareQueryMiscData(BindData *data, int32_t tblIdx) {
}
for (int32_t i = 0; i < bindNum; ++i) {
- data->tsData[i] = tsData + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum;
+ data->tsData[i] = bpTs + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum;
data->boolData[i] = (bool)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
data->tinyData[i] = (int8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
data->utinyData[i] = (uint8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum);
@@ -1202,39 +1218,7 @@ int32_t bpAppendValueString(char *buf, int type, void *value, int32_t valueLen,
}
-int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
- static int32_t n = 0;
-
- if (gCurCase->bindRowNum > 1) {
- if (0 == (n++%2)) {
- if (taos_stmt_bind_param_batch(stmt, bind)) {
- printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
- exit(1);
- }
- } else {
- for (int32_t i = 0; i < gCurCase->bindColNum; ++i) {
- if (taos_stmt_bind_single_param_batch(stmt, bind++, i)) {
- printf("!!!taos_stmt_bind_single_param_batch error:%s\n", taos_stmt_errstr(stmt));
- exit(1);
- }
- }
- }
- } else {
- if (0 == (n++%2)) {
- if (taos_stmt_bind_param_batch(stmt, bind)) {
- printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
- exit(1);
- }
- } else {
- if (taos_stmt_bind_param(stmt, bind)) {
- printf("!!!taos_stmt_bind_param error:%s\n", taos_stmt_errstr(stmt));
- exit(1);
- }
- }
- }
- return 0;
-}
void bpCheckIsInsert(TAOS_STMT *stmt, int32_t insert) {
int32_t isInsert = 0;
@@ -1280,15 +1264,12 @@ void bpCheckAffectedRowsOnce(TAOS_STMT *stmt, int32_t expectedNum) {
}
void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_BIND* bind) {
- TAOS_RES* res = taos_stmt_use_result(stmt);
- int32_t sqlResNum = 0;
- int32_t stmtResNum = 0;
- bpFetchRows(res, gCaseCtrl.printRes, &stmtResNum);
-
+ // query using sql
char sql[1024];
int32_t len = 0;
char* p = stmtSql;
char* s = NULL;
+ int32_t sqlResNum = 0;
for (int32_t i = 0; true; ++i, p=s+1) {
s = strchr(p, '?');
@@ -1313,6 +1294,12 @@ void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_B
}
bpExecQuery(taos, sql, gCaseCtrl.printRes, &sqlResNum);
+
+ // query using stmt
+ TAOS_RES* res = taos_stmt_use_result(stmt);
+ int32_t stmtResNum = 0;
+ bpFetchRows(res, gCaseCtrl.printRes, &stmtResNum);
+
if (sqlResNum != stmtResNum) {
printf("!!!sql res num %d mis-match stmt res num %d\n", sqlResNum, stmtResNum);
exit(1);
@@ -1321,9 +1308,165 @@ void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_B
printf("***sql res num match stmt res num %d\n", stmtResNum);
}
+void bpCheckColTagFields(TAOS_STMT *stmt, int32_t fieldNum, TAOS_FIELD_E* pFields, int32_t expecteNum, TAOS_MULTI_BIND* pBind, BP_BIND_TYPE type) {
+ int32_t code = 0;
+
+ if (fieldNum != expecteNum) {
+ printf("!!!%s field num %d mis-match expect num %d\n", BP_BIND_TYPE_STR(type), fieldNum, expecteNum);
+ exit(1);
+ }
+
+ if (type == BP_BIND_COL) {
+ if (pFields[0].precision != gCaseCtrl.precision) {
+ printf("!!!db precision %d mis-match expect %d\n", pFields[0].precision, gCaseCtrl.precision);
+ exit(1);
+ }
+ }
+
+ for (int32_t i = 0; i < fieldNum; ++i) {
+ if (pFields[i].type != pBind[i].buffer_type) {
+ printf("!!!%s %dth field type %d mis-match expect type %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].type, pBind[i].buffer_type);
+ exit(1);
+ }
+
+ if (pFields[i].type == TSDB_DATA_TYPE_BINARY) {
+ if (pFields[i].bytes != (pBind[i].buffer_length + 2)) {
+ printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, (pBind[i].buffer_length + 2));
+ exit(1);
+ }
+ } else if (pFields[i].type == TSDB_DATA_TYPE_NCHAR) {
+ if (pFields[i].bytes != (pBind[i].buffer_length * 4 + 2)) {
+ printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, (pBind[i].buffer_length + 2));
+ exit(1);
+ }
+ } else if (pFields[i].bytes != pBind[i].buffer_length) {
+ printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, pBind[i].buffer_length);
+ exit(1);
+ }
+ }
+
+ if (type == BP_BIND_COL) {
+ int fieldType = 0;
+ int fieldBytes = 0;
+ for (int32_t i = 0; i < fieldNum; ++i) {
+ code = taos_stmt_get_param(stmt, i, &fieldType, &fieldBytes);
+ if (code) {
+ printf("!!!taos_stmt_get_param error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ if (pFields[i].type != fieldType) {
+ printf("!!!%s %dth field type %d mis-match expect type %d\n", BP_BIND_TYPE_STR(type), i, fieldType, pFields[i].type);
+ exit(1);
+ }
+
+ if (pFields[i].bytes != fieldBytes) {
+ printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, fieldBytes, pFields[i].bytes);
+ exit(1);
+ }
+ }
+ }
+
+ if (gCaseCtrl.printVerbose) {
+ printf("%s fields check passed\n", BP_BIND_TYPE_STR(type));
+ }
+}
+
+
+void bpCheckTagFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
+ int32_t code = 0;
+ int fieldNum = 0;
+ TAOS_FIELD_E* pFields = NULL;
+ code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields);
+ if (code != 0){
+ printf("!!!taos_stmt_get_tag_fields error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindTagNum, pBind, BP_BIND_TAG);
+}
+
+void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
+ if (gCurCase->testType == TTYPE_QUERY) {
+ return;
+ }
+
+ int32_t code = 0;
+ int fieldNum = 0;
+ TAOS_FIELD_E* pFields = NULL;
+ code = taos_stmt_get_col_fields(stmt, &fieldNum, &pFields);
+ if (code != 0){
+ printf("!!!taos_stmt_get_col_fields error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindColNum, pBind, BP_BIND_COL);
+}
+
+void bpShowBindParam(TAOS_MULTI_BIND *bind, int32_t num) {
+ for (int32_t i = 0; i < num; ++i) {
+ TAOS_MULTI_BIND* b = &bind[i];
+ printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%],null[%d],num[%d]\n",
+ i, b->buffer_type, b->buffer, b->buffer_length, b->length ? *b->length : 0, b->is_null ? *b->is_null : 0, b->num);
+ }
+}
+
+int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
+ static int32_t n = 0;
+
+ bpCheckColFields(stmt, bind);
+
+ if (gCurCase->bindRowNum > 1) {
+ if (0 == (n++%2)) {
+ if (taos_stmt_bind_param_batch(stmt, bind)) {
+ printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
+ bpShowBindParam(bind, gCurCase->bindColNum);
+ exit(1);
+ }
+ } else {
+ for (int32_t i = 0; i < gCurCase->bindColNum; ++i) {
+ if (taos_stmt_bind_single_param_batch(stmt, bind+i, i)) {
+ printf("!!!taos_stmt_bind_single_param_batch %d error:%s\n", taos_stmt_errstr(stmt), i);
+ bpShowBindParam(bind, gCurCase->bindColNum);
+ exit(1);
+ }
+ }
+ }
+ } else {
+ if (0 == (n++%2)) {
+ if (taos_stmt_bind_param_batch(stmt, bind)) {
+ printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
+ bpShowBindParam(bind, gCurCase->bindColNum);
+ exit(1);
+ }
+ } else {
+ if (taos_stmt_bind_param(stmt, bind)) {
+ printf("!!!taos_stmt_bind_param error:%s\n", taos_stmt_errstr(stmt));
+ bpShowBindParam(bind, gCurCase->bindColNum);
+ exit(1);
+ }
+ }
+ }
+
+ return 0;
+}
+
int32_t bpSetTableNameTags(BindData *data, int32_t tblIdx, char *tblName, TAOS_STMT *stmt) {
+ int32_t code = 0;
if (gCurCase->bindTagNum > 0) {
- return taos_stmt_set_tbname_tags(stmt, tblName, data->pTags + tblIdx * gCurCase->bindTagNum);
+ if ((rand() % 2) == 0) {
+ code = taos_stmt_set_tbname(stmt, tblName);
+ if (code != 0){
+ printf("!!!taos_stmt_set_tbname error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ bpCheckTagFields(stmt, data->pTags + tblIdx * gCurCase->bindTagNum);
+
+ return taos_stmt_set_tags(stmt, data->pTags + tblIdx * gCurCase->bindTagNum);
+ } else {
+ return taos_stmt_set_tbname_tags(stmt, tblName, data->pTags + tblIdx * gCurCase->bindTagNum);
+ }
} else {
return taos_stmt_set_tbname(stmt, tblName);
}
@@ -1755,7 +1898,7 @@ int insertAUTOTest1(TAOS_STMT *stmt, TAOS *taos) {
if (gCurCase->tblNum > 1) {
char buf[32];
sprintf(buf, "t%d", t);
- code = taos_stmt_set_tbname_tags(stmt, buf, data.pTags + t * gCurCase->bindTagNum);
+ code = bpSetTableNameTags(&data, t, buf, stmt);
if (code != 0){
printf("!!!taos_stmt_set_tbname_tags error:%s\n", taos_stmt_errstr(stmt));
exit(1);
@@ -2223,14 +2366,48 @@ void generateCreateTableSQL(char *buf, int32_t tblIdx, int32_t colNum, int32_t *
}
}
+char *bpPrecisionStr(uint8_t precision) {
+ switch (precision) {
+ case TIME_PRECISION_MILLI:
+ return "ms";
+ case TIME_PRECISION_MICRO:
+ return "us";
+ case TIME_PRECISION_NANO:
+ return "ns";
+ default:
+ return "unknwon";
+ }
+}
+
+void bpSetStartupTs() {
+ switch (gCaseCtrl.precision) {
+ case TIME_PRECISION_MILLI:
+ bpTs = BP_STARTUP_TS;
+ break;
+ case TIME_PRECISION_MICRO:
+ bpTs = BP_STARTUP_TS * 1000;
+ break;
+ case TIME_PRECISION_NANO:
+ bpTs = BP_STARTUP_TS * 1000000;
+ break;
+ default:
+ bpTs = BP_STARTUP_TS;
+ break;
+ }
+}
+
void prepare(TAOS *taos, int32_t colNum, int32_t *colList, int prepareStb) {
TAOS_RES *result;
int code;
+ char createDbSql[128] = {0};
result = taos_query(taos, "drop database demo");
taos_free_result(result);
- result = taos_query(taos, "create database demo keep 36500");
+ sprintf(createDbSql, "create database demo keep 36500 precision \"%s\"", bpPrecisionStr(gCaseCtrl.precision));
+ printf("\tCreate Database SQL:%s\n", createDbSql);
+
+ result = taos_query(taos, createDbSql);
code = taos_errno(result);
if (code != 0) {
printf("!!!failed to create database, reason:%s\n", taos_errstr(result));
@@ -2278,6 +2455,8 @@ int32_t runCase(TAOS *taos, int32_t caseIdx, int32_t caseRunIdx, bool silent) {
CaseCfg cfg = gCase[caseIdx];
CaseCfg cfgBk;
gCurCase = &cfg;
+
+ bpSetStartupTs();
if ((gCaseCtrl.bindColTypeNum || gCaseCtrl.bindColNum) && (gCurCase->colNum != gFullColNum)) {
return 1;
@@ -2413,22 +2592,28 @@ void* runCaseList(TAOS *taos) {
}
void runAll(TAOS *taos) {
-#if 1
-
- strcpy(gCaseCtrl.caseCatalog, "Normal Test");
+ strcpy(gCaseCtrl.caseCatalog, "Default Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
+ strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
+ printf("%s Begin\n", gCaseCtrl.caseCatalog);
+ gCaseCtrl.precision = TIME_PRECISION_MICRO;
+ runCaseList(taos);
+ gCaseCtrl.precision = TIME_PRECISION_MILLI;
+ strcpy(gCaseCtrl.caseCatalog, "Nano DB precision Test");
+ printf("%s Begin\n", gCaseCtrl.caseCatalog);
+ gCaseCtrl.precision = TIME_PRECISION_NANO;
+ runCaseList(taos);
+ gCaseCtrl.precision = TIME_PRECISION_MILLI;
+
strcpy(gCaseCtrl.caseCatalog, "Auto Create Table Test");
gCaseCtrl.autoCreateTbl = true;
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
gCaseCtrl.autoCreateTbl = false;
-
-#endif
-/*
strcpy(gCaseCtrl.caseCatalog, "Null Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.bindNullNum = 1;
@@ -2441,6 +2626,7 @@ void runAll(TAOS *taos) {
runCaseList(taos);
gCaseCtrl.bindRowNum = 0;
+#if 0
strcpy(gCaseCtrl.caseCatalog, "Row Num Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.rowNum = 1000;
@@ -2448,23 +2634,21 @@ void runAll(TAOS *taos) {
runCaseList(taos);
gCaseCtrl.rowNum = 0;
gCaseCtrl.printRes = true;
-*/
strcpy(gCaseCtrl.caseCatalog, "Runtimes Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.runTimes = 2;
runCaseList(taos);
gCaseCtrl.runTimes = 0;
+#endif
-#if 1
strcpy(gCaseCtrl.caseCatalog, "Check Param Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.checkParamNum = true;
runCaseList(taos);
gCaseCtrl.checkParamNum = false;
-#endif
-/*
+#if 0
strcpy(gCaseCtrl.caseCatalog, "Bind Col Num Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.bindColNum = 6;
@@ -2476,7 +2660,7 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColTypeNum = tListLen(bindColTypeList);
gCaseCtrl.bindColTypeList = bindColTypeList;
runCaseList(taos);
-*/
+#endif
printf("All Test End\n");
}
diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim
index 96ee4390845450d53508cc90c48a3148a0a827dd..043f360856e4b4f0533bf4dc5e4be7cea71c3325 100644
--- a/tests/script/general/alter/cached_schema_after_alter.sim
+++ b/tests/script/general/alter/cached_schema_after_alter.sim
@@ -1,9 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/exec.sh -n dnode1 -s start
-sleep 2000
sql connect
$db = csaa_db
diff --git a/tests/script/general/alter/dnode.sim b/tests/script/general/alter/dnode.sim
index 7b31218fc231cfdbb79ca97573cfc6f6f149037d..64e8a17de02c956a937aa1001ac4d5873a6bed21 100644
--- a/tests/script/general/alter/dnode.sim
+++ b/tests/script/general/alter/dnode.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======== step1
diff --git a/tests/script/general/alter/table.sim b/tests/script/general/alter/table.sim
index 06704eeca6b3149b47ddc2ffb90aaab9df934bd8..9ca2f60bdc37f827e0832dc59399bf73732d7748 100644
--- a/tests/script/general/alter/table.sim
+++ b/tests/script/general/alter/table.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======== step1
@@ -256,6 +252,7 @@ endi
print ======== step8
sql alter table tb add column h binary(10)
+sql select * from tb
sql describe tb
if $data00 != ts then
return -1
@@ -308,7 +305,7 @@ endi
if $data80 != h then
return -1
endi
-if $data81 != BINARY then
+if $data81 != VARCHAR then
return -1
endi
if $data82 != 10 then
@@ -375,7 +372,7 @@ endi
if $data80 != h then
return -1
endi
-if $data81 != BINARY then
+if $data81 != VARCHAR then
return -1
endi
if $data82 != 10 then
@@ -451,7 +448,7 @@ endi
if $data70 != h then
return -1
endi
-if $data71 != BINARY then
+if $data71 != VARCHAR then
return -1
endi
if $data72 != 10 then
@@ -500,7 +497,7 @@ endi
if $data60 != h then
return -1
endi
-if $data61 != BINARY then
+if $data61 != VARCHAR then
return -1
endi
if $data62 != 10 then
@@ -543,7 +540,7 @@ endi
if $data50 != h then
return -1
endi
-if $data51 != BINARY then
+if $data51 != VARCHAR then
return -1
endi
if $data52 != 10 then
@@ -580,7 +577,7 @@ endi
if $data40 != h then
return -1
endi
-if $data41 != BINARY then
+if $data41 != VARCHAR then
return -1
endi
if $data42 != 10 then
@@ -611,7 +608,7 @@ endi
if $data30 != h then
return -1
endi
-if $data31 != BINARY then
+if $data31 != VARCHAR then
return -1
endi
if $data32 != 10 then
@@ -636,7 +633,7 @@ endi
if $data20 != h then
return -1
endi
-if $data21 != BINARY then
+if $data21 != VARCHAR then
return -1
endi
if $data22 != 10 then
diff --git a/tests/script/general/alter/testSuite.sim b/tests/script/general/alter/testSuite.sim
deleted file mode 100644
index cfac68144c080593499159eec81325924e7f25e6..0000000000000000000000000000000000000000
--- a/tests/script/general/alter/testSuite.sim
+++ /dev/null
@@ -1,7 +0,0 @@
-run general/alter/cached_schema_after_alter.sim
-run general/alter/count.sim
-run general/alter/import.sim
-run general/alter/insert1.sim
-run general/alter/insert2.sim
-run general/alter/metrics.sim
-run general/alter/table.sim
\ No newline at end of file
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 45fa037458b4c415563bb45c62b7a163495b582a..bb446fb248602842b579817dbdbbcc02f0a0680f 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -56,6 +56,9 @@
# ---- mnode
./test.sh -f tsim/mnode/basic1.sim
+./test.sh -f tsim/mnode/basic2.sim
+./test.sh -f tsim/mnode/basic3.sim
+#./test.sh -f tsim/mnode/basic4.sim
# ---- show
./test.sh -f tsim/show/basic.sim
@@ -66,8 +69,12 @@
# ---- stream
./test.sh -f tsim/stream/basic0.sim
./test.sh -f tsim/stream/basic1.sim
+./test.sh -f tsim/stream/basic2.sim
+# ./test.sh -f tsim/stream/session0.sim
+# ./test.sh -f tsim/stream/session1.sim
# ---- transaction
+ ./test.sh -f tsim/trans/lossdata1.sim
./test.sh -f tsim/trans/create_db.sim
# ---- tmq
@@ -83,15 +90,26 @@
./test.sh -f tsim/tmq/topic.sim
# --- stable
-./test.sh -f tsim/stable/alter1.sim
./test.sh -f tsim/stable/disk.sim
./test.sh -f tsim/stable/dnode3.sim
./test.sh -f tsim/stable/metrics.sim
./test.sh -f tsim/stable/refcount.sim
-#./test.sh -f tsim/stable/show.sim
+./test.sh -f tsim/stable/show.sim
./test.sh -f tsim/stable/values.sim
./test.sh -f tsim/stable/vnode3.sim
-
+./test.sh -f tsim/stable/column_add.sim
+./test.sh -f tsim/stable/column_drop.sim
+./test.sh -f tsim/stable/column_modify.sim
+./test.sh -f tsim/stable/tag_add.sim
+./test.sh -f tsim/stable/tag_drop.sim
+./test.sh -f tsim/stable/tag_modify.sim
+./test.sh -f tsim/stable/tag_rename.sim
+./test.sh -f tsim/stable/alter_comment.sim
+./test.sh -f tsim/stable/alter_count.sim
+./test.sh -f tsim/stable/alter_insert1.sim
+./test.sh -f tsim/stable/alter_insert2.sim
+./test.sh -f tsim/stable/alter_import.sim
+./test.sh -f tsim/stable/tag_filter.sim
# --- for multi process mode
./test.sh -f tsim/user/basic1.sim -m
@@ -103,13 +121,22 @@
./test.sh -f tsim/tmq/basic3.sim -m
./test.sh -f tsim/stable/vnode3.sim -m
./test.sh -f tsim/qnode/basic1.sim -m
-./test.sh -f tsim/mnode/basic1.sim -m
+#./test.sh -f tsim/mnode/basic1.sim -m
# --- sma
-./test.sh -f tsim/sma/tsmaCreateInsertData.sim
+#./test.sh -f tsim/sma/tsmaCreateInsertData.sim
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
# --- valgrind
./test.sh -f tsim/valgrind/checkError.sim -v
+# --- sync
+./test.sh -f tsim/sync/3Replica1VgElect.sim
+./test.sh -f tsim/sync/3Replica5VgElect.sim
+./test.sh -f tsim/sync/oneReplica1VgElect.sim
+./test.sh -f tsim/sync/oneReplica5VgElect.sim
+
+# --- catalog
+./test.sh -f tsim/catalog/alterInCurrent.sim
+
#======================b1-end===============
diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh
index da295f640e01cbf5cab4919aafc6cf56f1a268fc..5edc0a4d3e858d48e11eb3eea8d2fd48244b08ee 100755
--- a/tests/script/sh/deploy.sh
+++ b/tests/script/sh/deploy.sh
@@ -136,7 +136,7 @@ echo "qDebugFlag 143" >> $TAOS_CFG
echo "rpcDebugFlag 143" >> $TAOS_CFG
echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "uDebugFlag 143" >> $TAOS_CFG
-echo "sDebugFlag 135" >> $TAOS_CFG
+echo "sDebugFlag 143" >> $TAOS_CFG
echo "wDebugFlag 143" >> $TAOS_CFG
echo "numOfLogLines 20000000" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG
diff --git a/tests/script/tsim/bnode/basic1.sim b/tests/script/tsim/bnode/basic1.sim
index b1db6efc72afce083d9594987ccee3d10ab83ef4..80608453b8cf1243f27583a719f315462a4412d4 100644
--- a/tests/script/tsim/bnode/basic1.sim
+++ b/tests/script/tsim/bnode/basic1.sim
@@ -24,7 +24,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
@@ -71,7 +71,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
diff --git a/tests/script/tsim/catalog/alterInCurrent.sim b/tests/script/tsim/catalog/alterInCurrent.sim
new file mode 100644
index 0000000000000000000000000000000000000000..3cb337bbe1930104a21d3d31bf4d5d34a2515352
--- /dev/null
+++ b/tests/script/tsim/catalog/alterInCurrent.sim
@@ -0,0 +1,70 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+
+print ========= start dnode1 as LEADER
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== drop column in normal table
+sql drop database if exists db1;
+sql create database db1;
+sql use db1;
+sql create table t1 (ts timestamp, f1 int, f2 int);
+sql insert into t1 values (1591060628000, 1, 2);
+sql alter table t1 drop column f2;
+sql insert into t1 values (1591060628001, 2);
+
+print ======== add column in normal table
+sql drop database db1;
+sql create database db1;
+sql use db1;
+sql create table t1 (ts timestamp, f1 int);
+sql insert into t1 values (1591060628000, 1);
+sql alter table t1 add column f2 int;
+sql insert into t1 values (1591060628001, 2, 2);
+
+
+print ======== drop column in super table
+sql drop database db1;
+sql create database db1;
+sql use db1;
+sql create stable st1 (ts timestamp, f1 int, f2 int) tags (t1 int);
+sql create table t1 using st1 tags(1);
+sql insert into t1 values (1591060628000, 1, 2);
+sql alter table st1 drop column f2;
+sql insert into t1 values (1591060628001, 2);
+
+
+print ======== add column in super table
+sql drop database db1;
+sql create database db1;
+sql use db1;
+sql create stable st1 (ts timestamp, f1 int) tags (t1 int);
+sql create table t1 using st1 tags(1);
+sql insert into t1 values (1591060628000, 1);
+sql alter table st1 add column f2 int;
+sql insert into t1 values (1591060628001, 2, 2);
+
+
+print ======== add tag in super table
+sql drop database db1;
+sql create database db1;
+sql use db1;
+sql create stable st1 (ts timestamp, f1 int) tags (t1 int);
+sql create table t1 using st1 tags(1);
+sql insert into t1 values (1591060628000, 1);
+sql alter table st1 add tag t2 int;
+sql create table t2 using st1 tags(2, 2);
+
+
+print ======== drop tag in super table
+sql drop database db1;
+sql create database db1;
+sql use db1;
+sql create stable st1 (ts timestamp, f1 int) tags (t1 int, t2 int);
+sql create table t1 using st1 tags(1, 1);
+sql insert into t1 values (1591060628000, 1);
+sql alter table st1 drop tag t2;
+sql create table t2 using st1 tags(2);
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim
index aeb04293f2d5df29a07e629d32df3d96cb5d16b1..f3adb4535ec0a2b6dae2de6a277ab3a913bb711a 100644
--- a/tests/script/tsim/db/alter_option.sim
+++ b/tests/script/tsim/db/alter_option.sim
@@ -131,43 +131,43 @@ endi
sleep 3000
#sql show db.vgroups
-#if $data[0][4] == LEADER then
-# if $data[0][6] != FOLLOWER then
+#if $data[0][4] == leader then
+# if $data[0][6] != follower then
# return -1
# endi
-# if $data[0][8] != FOLLOWER then
+# if $data[0][8] != follower then
# return -1
# endi
#endi
-#if $data[0][6] == LEADER then
-# if $data[0][4] != FOLLOWER then
+#if $data[0][6] == leader then
+# if $data[0][4] != follower then
# return -1
# endi
-# if $data[0][8] != FOLLOWER then
+# if $data[0][8] != follower then
# return -1
# endi
#endi
-#if $data[0][8] == LEADER then
-# if $data[0][4] != FOLLOWER then
+#if $data[0][8] == leader then
+# if $data[0][4] != follower then
# return -1
# endi
-# if $data[0][6] != FOLLOWER then
+# if $data[0][6] != follower then
# return -1
# endi
#endi
#
-#if $data[0][4] != LEADER then
-# if $data[0][4] != FOLLOWER then
+#if $data[0][4] != leader then
+# if $data[0][4] != follower then
# return -1
# endi
#endi
-#if $data[0][6] != LEADER then
-# if $data[0][6] != FOLLOWER then
+#if $data[0][6] != leader then
+# if $data[0][6] != follower then
# return -1
# endi
#endi
-#if $data[0][8] != LEADER then
-# if $data[0][8] != FOLLOWER then
+#if $data[0][8] != leader then
+# if $data[0][8] != follower then
# return -1
# endi
#endi
diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim
new file mode 100644
index 0000000000000000000000000000000000000000..e20bf6937d1f3f64656848674fbce2be4c369717
--- /dev/null
+++ b/tests/script/tsim/db/alter_replica_13.sim
@@ -0,0 +1,141 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql connect
+
+print =============== step1: create dnodes
+sql create dnode $hostname port 7200
+
+$x = 0
+step1:
+ $ = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+if $rows != 2 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+print =============== step2: create database
+sql create database db vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+if $data(db)[4] != 1 then
+ return -1
+endi
+
+sql show dnodes
+if $data(2)[2] != 1 then
+ return -1
+endi
+
+# vnodes
+sql show dnodes
+if $data(2)[2] != 1 then
+ return -1
+endi
+
+# v1_dnode
+sql show db.vgroups
+if $data(2)[3] != 2 then
+ return -1
+endi
+
+sql_error alter database db replica 3
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, "102")
+sql insert into db.ctb values(now, 1, "2")
+sql select * from db.stb
+if $rows != 1 then
+ return -1
+endi
+
+print =============== step3: create dnodes
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> rows: $rows
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data24 $data35
+if $rows != 4 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto step3
+endi
+if $data(2)[4] != ready then
+ goto step3
+endi
+if $data(3)[4] != ready then
+ goto step3
+endi
+if $data(4)[4] != ready then
+ goto step3
+endi
+
+print ============= step4: alter database
+sql alter database db replica 3
+
+$x = 0
+step4:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show db.vgroups
+print ===> rows: $rows
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+if $data[0][4] != leader then
+ goto step4
+endi
+if $data[0][6] != follower then
+ goto step4
+endi
+if $data[0][8] != follower then
+ goto step4
+endi
+
+print ============= step5: stop dnode 2
+return
+
+sql select * from db.stb
+if $rows != 1 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/dnode/basic1.sim b/tests/script/tsim/dnode/basic1.sim
index d49dba60f3940094245c0a9f82a912d3a97155c4..a5b5427e036e1f74a2287a2d4995c5936fd149f5 100644
--- a/tests/script/tsim/dnode/basic1.sim
+++ b/tests/script/tsim/dnode/basic1.sim
@@ -7,6 +7,7 @@ sql connect
print =============== show dnodes
sql show dnodes;
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
return -1
endi
@@ -15,12 +16,9 @@ if $data00 != 1 then
return -1
endi
-# check 'vnodes' feild ?
-#if $data02 != 0 then
-# return -1
-#endi
sql show mnodes;
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
return -1
endi
@@ -29,7 +27,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
@@ -76,7 +74,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim
index 3cb5e4008e3a57e3178721b7e3f5458ef07be52b..0ba3e98c913e1c37c50c351bed7d7385a1cad0d3 100644
--- a/tests/script/tsim/insert/update0.sim
+++ b/tests/script/tsim/insert/update0.sim
@@ -9,7 +9,7 @@ sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table and register rsma
-sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
+sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1;
sql show stables
if $rows != 1 then
diff --git a/tests/script/tsim/mnode/basic1.sim b/tests/script/tsim/mnode/basic1.sim
index 235889ece6da4aa2713d5dac2cc306f52cd694cd..d93d4ca53fc2b6c56e0a8eccf7d3ac3ea74ec0ee 100644
--- a/tests/script/tsim/mnode/basic1.sim
+++ b/tests/script/tsim/mnode/basic1.sim
@@ -6,15 +6,6 @@ system sh/exec.sh -n dnode2 -s start
sql connect
print =============== show dnodes
-sql show dnodes;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 1 then
- return -1
-endi
-
sql show mnodes;
if $rows != 1 then
return -1
@@ -24,94 +15,131 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
print =============== create dnodes
sql create dnode $hostname port 7200
-sleep 2000
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 500
+ if $x == 20 then
+ return -1
+ endi
+sql show dnodes -x step1
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+sql_error create mnode on dnode 1
+sql_error drop mnode on dnode 1
+
+print =============== create mnode 2
+sql create mnode on dnode 2
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
+sql show mnodes
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
-sql show dnodes;
if $rows != 2 then
return -1
endi
-
-if $data00 != 1 then
+if $data(1)[0] != 1 then
return -1
endi
-
-if $data10 != 2 then
+if $data(1)[2] != leader then
return -1
endi
-
-print $data02
-if $data02 != 0 then
+if $data(2)[0] != 2 then
return -1
endi
-
-if $data12 != 0 then
- return -1
+if $data(2)[2] != follower then
+ goto step2
endi
-if $data04 != ready then
+sleep 2000
+print ============ drop mnode 2
+sql drop mnode on dnode 2
+sql show mnodes
+if $rows != 1 then
return -1
endi
+sql_error drop mnode on dnode 2
-if $data14 != ready then
- return -1
-endi
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
+sql show mnodes
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
-sql show mnodes;
if $rows != 1 then
return -1
endi
-
-if $data00 != 1 then
+if $data(1)[0] != 1 then
return -1
endi
-
-if $data02 != LEADER then
+if $data(1)[2] != leader then
return -1
endi
+if $data(2)[0] != null then
+ goto step2
+endi
+if $data(2)[2] != null then
+ goto step2
+endi
-print =============== create drop mnode 1
-sql_error create mnode on dnode 1
-sql_error drop mnode on dnode 1
+sleep 2000
-print =============== create drop mnode 2
+print =============== create mnodes
sql create mnode on dnode 2
sql show mnodes
if $rows != 2 then
return -1
endi
-sql_error create mnode on dnode 2
-sql drop mnode on dnode 2
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
sql show mnodes
-if $rows != 1 then
- return -1
-endi
-sql_error drop mnode on dnode 2
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
-print =============== create drop mnodes
-sql create mnode on dnode 2
-sql show mnodes
if $rows != 2 then
return -1
endi
-
-print =============== restart
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode1 -s start
-system sh/exec.sh -n dnode2 -s start
-
-sleep 2000
-sql show mnodes
-if $rows != 2 then
+if $data(1)[0] != 1 then
+ return -1
+endi
+if $data(1)[2] != leader then
return -1
endi
+if $data(2)[0] != 2 then
+ return -1
+endi
+if $data(2)[2] != follower then
+ goto step3
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim
new file mode 100644
index 0000000000000000000000000000000000000000..78558263d619ee3e9cef2e03c51790823c95b6a9
--- /dev/null
+++ b/tests/script/tsim/mnode/basic2.sim
@@ -0,0 +1,134 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sql connect
+
+print =============== show dnodes
+sql show mnodes;
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+if $data02 != leader then
+ return -1
+endi
+
+print =============== create dnodes
+sql create dnode $hostname port 7200
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 500
+ if $x == 20 then
+ return -1
+ endi
+sql show dnodes -x step1
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+print =============== create mnode 2
+sql create mnode on dnode 2
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
+sql show mnodes
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
+
+if $rows != 2 then
+ return -1
+endi
+if $data(1)[0] != 1 then
+ return -1
+endi
+if $data(1)[2] != leader then
+ return -1
+endi
+if $data(2)[0] != 2 then
+ return -1
+endi
+if $data(2)[2] != follower then
+ goto step2
+endi
+
+print =============== create user
+sql create user user1 PASS 'user1'
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+sql create database db
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+sleep 5000
+
+print =============== restart
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+
+sql connect
+sql show mnodes
+if $rows != 2 then
+ return -1
+endi
+
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 500
+ if $x == 20 then
+ return -1
+ endi
+sql show dnodes -x step3
+if $data(1)[4] != ready then
+ goto step3
+endi
+if $data(2)[4] != ready then
+ goto step3
+endi
+
+print =============== insert data
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, 102, "103")
+sql insert into db.ctb values(now, 1, "2")
+
+sql select * from db.ctb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+
+if $rows != 1 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
\ No newline at end of file
diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim
new file mode 100644
index 0000000000000000000000000000000000000000..bc70cd7a85522230a54359b8a4144eb4ce7a4eed
--- /dev/null
+++ b/tests/script/tsim/mnode/basic3.sim
@@ -0,0 +1,150 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode3 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode4 -c transPullupInterval -v 1
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql connect
+
+print =============== step1: create dnodes
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show dnodes -x step1
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+if $data(3)[4] != ready then
+ goto step1
+endi
+
+print =============== step2: create mnode 2
+sql create mnode on dnode 2
+sql create mnode on dnode 3
+sql_error create mnode on dnode 4
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step2
+if $data(1)[2] != leader then
+ goto step2
+endi
+if $data(2)[2] != follower then
+ goto step2
+endi
+if $data(3)[2] != follower then
+ goto step2
+endi
+
+print =============== step3: create user
+sql create user user1 PASS 'user1'
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+# wait mnode2 mnode3 recv data finish
+sleep 10000
+
+print =============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop
+
+$x = 0
+step4:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step4
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
+print $data(3)[0] $data(3)[1] $data(3)[2]
+
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+sleep 1000
+sql show dnodes
+if $data(2)[4] != ready then
+ return -1
+endi
+if $data(3)[4] != ready then
+ return -1
+endi
+
+print =============== step5: stop dnode1
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s stop
+
+$x = 0
+step5:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step5
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
+print $data(3)[0] $data(3)[1] $data(3)[2]
+
+if $data(2)[2] != offline then
+ goto step5
+endi
+
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+print =============== step6: stop dnode1
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s stop
+
+$x = 0
+step6:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step6
+print $data(1)[0] $data(1)[1] $data(1)[2]
+print $data(2)[0] $data(2)[1] $data(2)[2]
+print $data(3)[0] $data(3)[1] $data(3)[2]
+
+sql show users
+if $rows != 2 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode3 -s stop
+system sh/exec.sh -n dnode4 -s stop
\ No newline at end of file
diff --git a/tests/script/tsim/mnode/basic4.sim b/tests/script/tsim/mnode/basic4.sim
new file mode 100644
index 0000000000000000000000000000000000000000..88deb5af898fde58d94f5129fb4e2a030795f29b
--- /dev/null
+++ b/tests/script/tsim/mnode/basic4.sim
@@ -0,0 +1,194 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sql connect
+
+print =============== step1: create dnodes
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql show dnodes -x step1
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+print =============== step2: create mnode 2
+sql create mnode on dnode 2
+sql_error create mnode on dnode 3
+
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql show dnodes -x step2
+if $data(1)[4] != ready then
+ goto step2
+endi
+if $data(2)[4] != ready then
+ goto step2
+endi
+
+system sh/exec.sh -n dnode3 -s stop
+sql_error create mnode on dnode 3
+
+print =============== step3: show mnodes
+
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step3
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != leader then
+ goto step3
+endi
+if $data(2)[2] != follower then
+ goto step3
+endi
+if $data(3)[2] != offline then
+ goto step3
+endi
+if $data(1)[3] != ready then
+ goto step3
+endi
+if $data(2)[3] != ready then
+ goto step3
+endi
+if $data(3)[3] != creating then
+ goto step3
+endi
+
+print =============== step4: start dnode3
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step4:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step4
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != leader then
+ goto step4
+endi
+if $data(2)[2] != follower then
+ goto step4
+endi
+if $data(3)[2] != follower then
+ goto step4
+endi
+if $data(1)[3] != ready then
+ goto step4
+endi
+if $data(2)[3] != ready then
+ goto step4
+endi
+if $data(3)[3] != ready then
+ goto step4
+endi
+
+print =============== step5: drop mnode 3 and stop dnode3
+system sh/exec.sh -n dnode3 -s stop
+sql_error drop mnode on dnode 3
+
+$x = 0
+step5:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step5
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != leader then
+ goto step5
+endi
+if $data(2)[2] != follower then
+ goto step5
+endi
+if $data(3)[2] != offline then
+ goto step5
+endi
+if $data(1)[3] != ready then
+ goto step5
+endi
+if $data(2)[3] != ready then
+ goto step5
+endi
+if $data(3)[3] != dropping then
+ goto step5
+endi
+
+print =============== step6: start dnode3
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step6:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step6
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $rows != 2 then
+ goto step6
+endi
+if $data(1)[2] != leader then
+ goto step6
+endi
+if $data(2)[2] != follower then
+ goto step6
+endi
+if $data(3)[2] != null then
+ goto step6
+endi
+if $data(1)[3] != ready then
+ goto step6
+endi
+if $data(2)[3] != ready then
+ goto step6
+endi
+if $data(3)[3] != null then
+ goto step6
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode3 -s stop
+system sh/exec.sh -n dnode4 -s stop
diff --git a/tests/script/tsim/qnode/basic1.sim b/tests/script/tsim/qnode/basic1.sim
index 2351403909e9f641e2ada2789561a095a0e915d4..7108fcaf59ec420a8657dd8e061e5261ec15ce3c 100644
--- a/tests/script/tsim/qnode/basic1.sim
+++ b/tests/script/tsim/qnode/basic1.sim
@@ -24,7 +24,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
@@ -71,7 +71,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim
index 21162a99b0928040ae115b13c117864a170ef4e9..2b0d52d25327833221ffe53953f904d74ed1784a 100644
--- a/tests/script/tsim/query/explain.sim
+++ b/tests/script/tsim/query/explain.sim
@@ -1,7 +1,7 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-print ========= start dnode1 as LEADER
+print ========= start dnode1 as leader
system sh/exec.sh -n dnode1 -s start
sql connect
diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim
index c7e7aa9a349a98623033c40cebfa9c499ee0fe2a..b08ac1d3d9abe157915ec25c438d82e2774ced04 100644
--- a/tests/script/tsim/query/scalarNull.sim
+++ b/tests/script/tsim/query/scalarNull.sim
@@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
-print ========= start dnode1 as LEADER
+print ========= start dnode1 as leader
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim
index 24ddcc1b75c70865f334f553a6b0f1ee176d62ca..93cae4e3912cab0b5c36e60d28743f0c10f1e45a 100644
--- a/tests/script/tsim/query/udf.sim
+++ b/tests/script/tsim/query/udf.sim
@@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c udf -v 1
-print ========= start dnode1 as LEADER
+print ========= start dnode1 as leader
system sh/exec.sh -n dnode1 -s start
sleep 1000
sql connect
diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim
index e171d1abb9e3249c5b93ae01899b596e6238a4c4..95201bc48e0db1b57471039c80169ccdf4e30094 100644
--- a/tests/script/tsim/show/basic.sim
+++ b/tests/script/tsim/show/basic.sim
@@ -25,7 +25,7 @@ sql connect
# select */column from information_schema.xxxx; xxxx include:
# dnodes, mnodes, modules, qnodes,
-# user_databases, user_functions, user_indexes, user_stables, user_streams,
+# user_databases, user_functions, user_indexes, user_stables, streams,
# user_tables, user_table_distributed, user_users, vgroups,
print =============== add dnode2 into cluster
@@ -96,7 +96,7 @@ sql select * from information_schema.user_stables
if $rows != 1 then
return -1
endi
-#sql select * from information_schema.user_streams
+#sql select * from information_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 28 then
return -1
@@ -194,7 +194,7 @@ sql select * from information_schema.user_stables
if $rows != 1 then
return -1
endi
-#sql select * from information_schema.user_streams
+#sql select * from performance_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 28 then
return -1
diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim
index 38ae0dc0a298d7743f3eb1466357ff0bbb621d06..f929dda18cb1b287c3ffe05487464624ff0eebc5 100644
--- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim
+++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim
@@ -9,7 +9,7 @@ sql create database d0 retentions 15s:7d,1m:21d,15m:365d;
sql use d0
print =============== create super table and register rsma
-sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
+sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1;
sql show stables
if $rows != 1 then
@@ -37,6 +37,15 @@ if $rows > 2 then
print retention level 2 file rows $rows > 2
return -1
endi
+
+
+if $data01 != 1 then
+ if $data01 != 10 then
+ print retention level 2 file result $data01 != 1 or 10
+ return -1
+ endi
+endi
+
print =============== select * from retention level 1 from memory
sql select * from ct1 where ts > now-8d;
print $data00 $data01
@@ -44,15 +53,30 @@ if $rows > 2 then
print retention level 1 file rows $rows > 2
return -1
endi
+
+if $data01 != 1 then
+ if $data01 != 10 then
+ print retention level 1 file result $data01 != 1 or 10
+ return -1
+ endi
+endi
+
print =============== select * from retention level 0 from memory
sql select * from ct1 where ts > now-3d;
print $data00 $data01
print $data10 $data11
print $data20 $data21
+
if $rows < 1 then
print retention level 0 file rows $rows < 1
return -1
endi
+
+if $data01 != 10 then
+ print retention level 0 file result $data01 != 10
+ return -1
+endi
+
#===================================================================
@@ -68,6 +92,13 @@ if $rows > 2 then
return -1
endi
+if $data01 != 1 then
+ if $data01 != 10 then
+ print retention level 2 file result $data01 != 1 or 10
+ return -1
+ endi
+endi
+
print =============== select * from retention level 1 from file
sql select * from ct1 where ts > now-8d;
print $data00 $data01
@@ -76,6 +107,13 @@ if $rows > 2 then
return -1
endi
+if $data01 != 1 then
+ if $data01 != 10 then
+ print retention level 1 file result $data01 != 1 or 10
+ return -1
+ endi
+endi
+
print =============== select * from retention level 0 from file
sql select * from ct1 where ts > now-3d;
print $data00 $data01
@@ -86,4 +124,9 @@ if $rows < 1 then
return -1
endi
+if $data01 != 10 then
+ print retention level 0 file result $data01 != 10
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/sma/tsmaCreateInsertData.sim b/tests/script/tsim/sma/tsmaCreateInsertData.sim
index b7a127e1b0d67f9af620919740dae87e649c82cd..0202c53800260b4974cabe10ff4cbd9f180fd590 100644
--- a/tests/script/tsim/sma/tsmaCreateInsertData.sim
+++ b/tests/script/tsim/sma/tsmaCreateInsertData.sim
@@ -5,7 +5,7 @@ sleep 50
sql connect
print =============== create database
-sql create database d1
+sql create database d1 vgroups 1
sql use d1
print =============== create super table, include column type for count/sum/min/max/first
@@ -37,5 +37,12 @@ print =============== trigger stream to execute sma aggr task and insert sma dat
sql insert into ct1 values(now+5s, 20, 20.0, 30.0)
#===================================================================
+print =============== select * from ct1 from memory
+sql select * from ct1;
+print $data00 $data01
+if $rows != 5 then
+ print rows $rows != 5
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/snode/basic1.sim b/tests/script/tsim/snode/basic1.sim
index 660951c591bb9048b592e7be60492925b13b600d..a9d4867354e70a867d23e65ed03dda47b0b2524c 100644
--- a/tests/script/tsim/snode/basic1.sim
+++ b/tests/script/tsim/snode/basic1.sim
@@ -24,7 +24,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
@@ -71,7 +71,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
diff --git a/tests/script/tsim/stable/alter1.sim b/tests/script/tsim/stable/alter_comment.sim
similarity index 99%
rename from tests/script/tsim/stable/alter1.sim
rename to tests/script/tsim/stable/alter_comment.sim
index 1205f50f6ea144de6f5fae06ef7569a60b47e0cb..cfcbb9a1daa046c894bbfe47f4684ded5faf79a6 100644
--- a/tests/script/tsim/stable/alter1.sim
+++ b/tests/script/tsim/stable/alter_comment.sim
@@ -166,4 +166,5 @@ if $data[0][6] != abcde then
return -1
endi
+return
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/alter/count.sim b/tests/script/tsim/stable/alter_count.sim
similarity index 96%
rename from tests/script/general/alter/count.sim
rename to tests/script/tsim/stable/alter_count.sim
index fc936668b8ea08f9cd08874ad98668a4d8904315..e5af9a5735e6f7f9844d055be8d4c2892d6b2ed7 100644
--- a/tests/script/general/alter/count.sim
+++ b/tests/script/tsim/stable/alter_count.sim
@@ -1,13 +1,8 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
-sleep 2000
sql connect
print ======== step1
@@ -141,10 +136,13 @@ endi
print ============= step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sql connect
+
+sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb;
+sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb;
+sql use d1
sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb
if $data00 != 24 then
return -1
diff --git a/tests/script/general/alter/import.sim b/tests/script/tsim/stable/alter_import.sim
similarity index 73%
rename from tests/script/general/alter/import.sim
rename to tests/script/tsim/stable/alter_import.sim
index aef0a258b24563e915cd8aa3dd42f6623a29170a..cdd7b60e14fc5e8f46f3413e9037a95f534718e1 100644
--- a/tests/script/general/alter/import.sim
+++ b/tests/script/tsim/stable/alter_import.sim
@@ -1,13 +1,8 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
-sleep 2000
sql connect
print ======== step1
@@ -34,14 +29,14 @@ if $data00 != 3 then
endi
print ========= step3
-sql import into tb values(now-23d, -23, 0)
-sql import into tb values(now-21d, -21, 0)
+sql insert into tb values(now-23d, -23, 0)
+sql insert into tb values(now-21d, -21, 0)
sql select count(b) from tb
if $data00 != 5 then
return -1
endi
-sql import into tb values(now-29d, -29, 0)
+sql insert into tb values(now-29d, -29, 0)
sql select count(b) from tb
if $data00 != 6 then
return -1
diff --git a/tests/script/general/alter/insert1.sim b/tests/script/tsim/stable/alter_insert1.sim
similarity index 99%
rename from tests/script/general/alter/insert1.sim
rename to tests/script/tsim/stable/alter_insert1.sim
index 12ab09beb989dd963a9e8c9c3ff5926e78d8b0ac..82781f2fe5cadf0488c5107e9e54b06364629680 100644
--- a/tests/script/general/alter/insert1.sim
+++ b/tests/script/tsim/stable/alter_insert1.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======== step1
diff --git a/tests/script/general/alter/insert2.sim b/tests/script/tsim/stable/alter_insert2.sim
similarity index 98%
rename from tests/script/general/alter/insert2.sim
rename to tests/script/tsim/stable/alter_insert2.sim
index dcd9f500304f906ddddb33bd1a04c5943c232d49..a30175f3980cc117ec052ebb13a2e0b31b2cb316 100644
--- a/tests/script/general/alter/insert2.sim
+++ b/tests/script/tsim/stable/alter_insert2.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======== step1
diff --git a/tests/script/general/alter/metrics.sim b/tests/script/tsim/stable/alter_metrics.sim
similarity index 96%
rename from tests/script/general/alter/metrics.sim
rename to tests/script/tsim/stable/alter_metrics.sim
index fd0b210cd1b452b2a35ebcd9f74aec98c3817b03..f33246dfe2d14c092cb9483ce31c0788da9e5397 100644
--- a/tests/script/general/alter/metrics.sim
+++ b/tests/script/tsim/stable/alter_metrics.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======== step1
@@ -351,7 +347,7 @@ endi
if $data80 != h then
return -1
endi
-if $data81 != BINARY then
+if $data81 != VARCHAR then
return -1
endi
if $data82 != 10 then
@@ -367,9 +363,8 @@ endi
print ======== step9
print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sql connect
sql use d2
sql describe tb
@@ -424,7 +419,7 @@ endi
if $data80 != h then
return -1
endi
-if $data81 != BINARY then
+if $data81 != VARCHAR then
return -1
endi
if $data82 != 10 then
@@ -506,7 +501,7 @@ endi
if $data70 != h then
return -1
endi
-if $data71 != BINARY then
+if $data71 != VARCHAR then
return -1
endi
if $data72 != 10 then
@@ -561,7 +556,7 @@ endi
if $data60 != h then
return -1
endi
-if $data61 != BINARY then
+if $data61 != VARCHAR then
return -1
endi
if $data62 != 10 then
@@ -610,7 +605,7 @@ endi
if $data50 != h then
return -1
endi
-if $data51 != BINARY then
+if $data51 != VARCHAR then
return -1
endi
if $data52 != 10 then
@@ -653,7 +648,7 @@ endi
if $data40 != h then
return -1
endi
-if $data41 != BINARY then
+if $data41 != VARCHAR then
return -1
endi
if $data42 != 10 then
@@ -690,7 +685,7 @@ endi
if $data30 != h then
return -1
endi
-if $data31 != BINARY then
+if $data31 != VARCHAR then
return -1
endi
if $data32 != 10 then
@@ -721,7 +716,7 @@ endi
if $data20 != h then
return -1
endi
-if $data21 != BINARY then
+if $data21 != VARCHAR then
return -1
endi
if $data22 != 10 then
@@ -762,7 +757,7 @@ endi
print ======= over
sql drop database d2
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim
new file mode 100644
index 0000000000000000000000000000000000000000..db592e6c69ee2fc3111b19b2502d67960ee943cf
--- /dev/null
+++ b/tests/script/tsim/stable/column_add.sim
@@ -0,0 +1,302 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, 102, "103")
+sql insert into db.ctb values(now, 1, "2")
+
+sql show db.stables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != stb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != 3 then
+ return -1
+endi
+if $data[0][6] != abd then
+ return -1
+endi
+
+sql show db.tables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != ctb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != stb then
+ return -1
+endi
+if $data[0][6] != 2 then
+ return -1
+endi
+if $data[0][9] != CHILD_TABLE then
+ return -1
+endi
+
+sql select * from db.stb
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+
+sql_error alter table db.stb add column ts int
+sql_error alter table db.stb add column t1 int
+sql_error alter table db.stb add column t2 int
+sql_error alter table db.stb add column t3 int
+sql_error alter table db.stb add column c1 int
+
+print ========== step1 add column c3
+sql alter table db.stb add column c3 int
+sql show db.stables
+if $data[0][3] != 4 then
+ return -1
+endi
+
+sql show db.tables
+if $data[0][3] != 4 then
+ return -1
+endi
+
+sql select * from db.stb
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != NULL then
+ return -1
+endi
+if $data[0][4] != 101 then
+ return -1
+endi
+
+sql insert into db.ctb values(now+1s, 1, 2, 3)
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+
+if $rows != 2 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != NULL then
+ return -1
+endi
+if $data[0][4] != 101 then
+ return -1
+endi
+if $data[1][1] != 1 then
+ return -1
+endi
+if $data[1][2] != 2 then
+ return -1
+endi
+if $data[1][3] != 3 then
+ return -1
+endi
+if $data[1][4] != 101 then
+ return -1
+endi
+
+print ========== step2 add column c4
+sql alter table db.stb add column c4 bigint
+sql select * from db.stb
+sql insert into db.ctb values(now+2s, 1, 2, 3, 4)
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 3 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != NULL then
+ return -1
+endi
+if $data[0][4] != NULL then
+ return -1
+endi
+if $data[0][5] != 101 then
+ return -1
+endi
+if $data[1][1] != 1 then
+ return -1
+endi
+if $data[1][2] != 2 then
+ return -1
+endi
+if $data[1][3] != 3 then
+ return -1
+endi
+if $data[1][4] != NULL then
+ return -1
+endi
+if $data[1][5] != 101 then
+ return -1
+endi
+if $data[2][1] != 1 then
+ return -1
+endi
+if $data[2][2] != 2 then
+ return -1
+endi
+if $data[2][3] != 3 then
+ return -1
+endi
+if $data[2][4] != 4 then
+ return -1
+endi
+if $data[2][5] != 101 then
+ return -1
+endi
+
+print ========== step3 add column c5
+sql alter table db.stb add column c5 int
+sql insert into db.ctb values(now+3s, 1, 2, 3, 4, 5)
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 4 then
+ return -1
+endi
+if $data[2][1] != 1 then
+ return -1
+endi
+if $data[2][2] != 2 then
+ return -1
+endi
+if $data[2][3] != 3 then
+ return -1
+endi
+if $data[2][4] != 4 then
+ return -1
+endi
+if $data[2][5] != NULL then
+ return -1
+endi
+if $data[2][6] != 101 then
+ return -1
+endi
+if $data[3][1] != 1 then
+ return -1
+endi
+if $data[3][2] != 2 then
+ return -1
+endi
+if $data[3][3] != 3 then
+ return -1
+endi
+if $data[3][4] != 4 then
+ return -1
+endi
+if $data[3][5] != 5 then
+ return -1
+endi
+if $data[3][6] != 101 then
+ return -1
+endi
+
+print ========== step4 add column c6
+sql alter table db.stb add column c6 int
+sql insert into db.ctb values(now+4s, 1, 2, 3, 4, 5, 6)
+sql select * from db.stb
+
+if $rows != 5 then
+ return -1
+endi
+if $data[3][1] != 1 then
+ return -1
+endi
+if $data[3][2] != 2 then
+ return -1
+endi
+if $data[3][3] != 3 then
+ return -1
+endi
+if $data[3][4] != 4 then
+ return -1
+endi
+if $data[3][5] != 5 then
+ return -1
+endi
+if $data[3][6] != NULL then
+ return -1
+endi
+if $data[3][7] != 101 then
+ return -1
+endi
+if $data[4][1] != 1 then
+ return -1
+endi
+if $data[4][2] != 2 then
+ return -1
+endi
+if $data[4][3] != 3 then
+ return -1
+endi
+if $data[4][4] != 4 then
+ return -1
+endi
+if $data[4][5] != 5 then
+ return -1
+endi
+if $data[4][6] != 6 then
+ return -1
+endi
+if $data[4][7] != 101 then
+ return -1
+endi
+
+print ========== step5 describe
+sql describe db.ctb
+if $rows != 10 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/column_drop.sim b/tests/script/tsim/stable/column_drop.sim
new file mode 100644
index 0000000000000000000000000000000000000000..3401465103762d523b8cb5f15585f9924db4abfa
--- /dev/null
+++ b/tests/script/tsim/stable/column_drop.sim
@@ -0,0 +1,210 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4), c3 int, c4 bigint, c5 int, c6 int) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, 102, "103")
+sql insert into db.ctb values(now, 1, "2", 3, 4, 5, 6)
+
+sql show db.stables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != stb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 7 then
+ return -1
+endi
+if $data[0][4] != 3 then
+ return -1
+endi
+if $data[0][6] != abd then
+ return -1
+endi
+
+sql show db.tables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != ctb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 7 then
+ return -1
+endi
+if $data[0][4] != stb then
+ return -1
+endi
+if $data[0][6] != 2 then
+ return -1
+endi
+if $data[0][9] != CHILD_TABLE then
+ return -1
+endi
+
+sql select * from db.stb
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != 4 then
+ return -1
+endi
+if $data[0][5] != 5 then
+ return -1
+endi
+if $data[0][6] != 6 then
+ return -1
+endi
+if $data[0][7] != 101 then
+ return -1
+endi
+
+sql_error alter table db.stb drop column ts
+sql_error alter table db.stb drop column t1
+sql_error alter table db.stb drop column t2
+sql_error alter table db.stb drop column t3
+sql_error alter table db.stb drop column c9
+
+print ========== step1 drop column c6
+sql alter table db.stb drop column c6
+sql show db.stables
+if $data[0][3] != 6 then
+ return -1
+endi
+
+sql show db.tables
+if $data[0][3] != 6 then
+ return -1
+endi
+
+sql select * from db.stb
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != 4 then
+ return -1
+endi
+if $data[0][5] != 5 then
+ return -1
+endi
+if $data[0][6] != 101 then
+ return -1
+endi
+
+sql insert into db.ctb values(now+1s, 1, 2, 3, 4, 5)
+sql select * from db.stb
+if $rows != 2 then
+ return -1
+endi
+
+print ========== step2 drop column c5
+sql alter table db.stb drop column c5
+sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5)
+sql insert into db.ctb values(now+2s, 1, 2, 3, 4)
+sql insert into db.ctb values(now+3s, 1, 2, 3, 4)
+sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5)
+
+sql select * from db.stb
+if $rows != 4 then
+ return -1
+endi
+
+print ========== step3 drop column c4
+sql alter table db.stb drop column c4
+sql select * from db.stb
+sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5)
+sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4)
+sql insert into db.ctb values(now+3s, 1, 2, 3)
+
+sql select * from db.stb
+if $rows != 5 then
+ return -1
+endi
+
+print ========== step4 add column c4
+sql alter table db.stb add column c4 binary(13)
+sql insert into db.ctb values(now+4s, 1, 2, 3, '4')
+sql select * from db.stb
+if $rows != 6 then
+ return -1
+endi
+if $data[1][4] != NULL then
+ return -1
+endi
+if $data[2][4] != NULL then
+ return -1
+endi
+if $data[3][4] != NULL then
+ return -1
+endi
+if $data[5][4] != 4 then
+ return -1
+endi
+
+print ========== step5 describe
+sql describe db.ctb
+if $rows != 8 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[3][0] != c3 then
+ return -1
+endi
+if $data[4][0] != c4 then
+ return -1
+endi
+if $data[4][1] != VARCHAR then
+ return -1
+endi
+if $data[4][2] != 13 then
+ return -1
+endi
+if $data[5][0] != t1 then
+ return -1
+endi
+if $data[6][0] != t2 then
+ return -1
+endi
+if $data[7][0] != t3 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim
new file mode 100644
index 0000000000000000000000000000000000000000..e2752ccf951cef30587aa1f604f92cbbaa265b85
--- /dev/null
+++ b/tests/script/tsim/stable/column_modify.sim
@@ -0,0 +1,109 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, 102, "103")
+sql insert into db.ctb values(now, 1, "1234")
+
+sql_error alter table db.stb MODIFY column c2 binary(3)
+sql_error alter table db.stb MODIFY column c2 int
+sql_error alter table db.stb MODIFY column c1 int
+sql_error alter table db.stb MODIFY column ts int
+sql_error insert into db.ctb values(now, 1, "12345")
+
+print ========== step1 modify column
+sql alter table db.stb MODIFY column c2 binary(5)
+sql insert into db.ctb values(now, 1, "12345")
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+
+if $rows != 2 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[1][1] != 1 then
+ return -1
+endi
+if $data[1][2] != 12345 then
+ return -1
+endi
+if $data[1][3] != 101 then
+ return -1
+endi
+
+print ========== step2 describe
+sql describe db.ctb
+if $rows != 6 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[2][1] != VARCHAR then
+ return -1
+endi
+if $data[2][2] != 5 then
+ return -1
+endi
+if $data[3][0] != t1 then
+ return -1
+endi
+if $data[4][0] != t2 then
+ return -1
+endi
+if $data[5][0] != t3 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+
+sql connect
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+
+if $rows != 2 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[1][1] != 1 then
+ return -1
+endi
+if $data[1][2] != 12345 then
+ return -1
+endi
+if $data[1][3] != 101 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim
index c1ced6ae1076b3b1cc5e8a79f31188c076a93f59..ff734b4234263ca71253dee97eaa0158fe5221c4 100644
--- a/tests/script/tsim/stable/disk.sim
+++ b/tests/script/tsim/stable/disk.sim
@@ -1,17 +1,9 @@
system sh/stop_dnodes.sh
-
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======================== dnode1 start
-
$dbPrefix = d_db
$tbPrefix = d_tb
$mtPrefix = d_mt
@@ -57,11 +49,9 @@ if $data00 != $totalNum then
return -1
endi
-sleep 1000
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
system sh/exec.sh -n dnode1 -s start
-sleep 6000
+sql connect
sql use $db
sql show vgroups
diff --git a/tests/script/tsim/stable/dnode3.sim b/tests/script/tsim/stable/dnode3.sim
index 706c4aa499ce3cebaedcbb71c24a9473a9069c9a..03e8df26b7543e61f0e8e52a1fd5bd8ab9de5e0f 100644
--- a/tests/script/tsim/stable/dnode3.sim
+++ b/tests/script/tsim/stable/dnode3.sim
@@ -1,19 +1,9 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode4 -c walLevel -v 1
-# system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-# system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-# system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-# system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
sql connect
sql create dnode $hostname PORT 7200
diff --git a/tests/script/tsim/stable/metrics.sim b/tests/script/tsim/stable/metrics.sim
index e68d95511cfd3c4ea556e34ffed5111f05064405..c652670d7f4e904461adf33af8f1d10fc9e9e319 100644
--- a/tests/script/tsim/stable/metrics.sim
+++ b/tests/script/tsim/stable/metrics.sim
@@ -1,10 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
-sleep 1000
sql connect
$dbPrefix = m_me_db
@@ -97,9 +93,6 @@ $i = 2
$tb = $tbPrefix . $i
sql insert into $tb values (now + 1m , 1 )
-print sleep 2000
-sleep 2000
-
print =============== step6
# sql select * from $mt
diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim
index fffa6f75a4adfe2b52b1a7d1b587f6bf7a182ba4..d77c8e08900c1b0eeeee95bbfc4c6a4540558e6b 100644
--- a/tests/script/tsim/stable/refcount.sim
+++ b/tests/script/tsim/stable/refcount.sim
@@ -1,11 +1,6 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print =============== step1
diff --git a/tests/script/tsim/stable/show.sim b/tests/script/tsim/stable/show.sim
index 823aefe9d86954dc8a3af85359ec02a475182aae..d3ab75adf5ac08dbd4c2a8a0870cfe4fbfd62a4d 100644
--- a/tests/script/tsim/stable/show.sim
+++ b/tests/script/tsim/stable/show.sim
@@ -1,14 +1,9 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======================== create stable
-
sql create database d1
sql use d1
diff --git a/tests/script/tsim/stable/add_column.sim b/tests/script/tsim/stable/tag_add.sim
similarity index 52%
rename from tests/script/tsim/stable/add_column.sim
rename to tests/script/tsim/stable/tag_add.sim
index 0b2df509f9f4c32f60fd073076517911d1f84f3e..a7615df14c3fc51851feb19937c51cbead7c8ea2 100644
--- a/tests/script/tsim/stable/add_column.sim
+++ b/tests/script/tsim/stable/tag_add.sim
@@ -5,8 +5,8 @@ sql connect
print ========== prepare stb and ctb
sql create database db vgroups 1
-sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
-sql create table db.ctb using db.stb tags(101, 102, "103")
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, "102")
sql insert into db.ctb values(now, 1, "2")
sql show db.stables
@@ -22,7 +22,7 @@ endi
if $data[0][3] != 3 then
return -1
endi
-if $data[0][4] != 3 then
+if $data[0][4] != 2 then
return -1
endi
if $data[0][6] != abd then
@@ -65,20 +65,43 @@ endi
if $data[0][3] != 101 then
return -1
endi
+if $data[0][4] != 102 then
+ return -1
+endi
+
+sql_error alter table db.stb add tag ts int
+sql_error alter table db.stb add tag t1 int
+sql_error alter table db.stb add tag t2 int
+sql_error alter table db.stb add tag c1 int
+sql_error alter table db.stb add tag c2 int
+
+print ========== step1 add tag t3
+sql alter table db.stb add tag t3 int
-print ========== add column c3
-sql alter table db.stb add column c3 int
sql show db.stables
-if $data[0][3] != 4 then
+if $data[0][3] != 3 then
return -1
endi
sql show db.tables
-if $data[0][3] != 4 then
+if $data[0][3] != 3 then
+ return -1
+endi
+
+sql describe db.ctb
+if $rows != 6 then
+ return -1
+endi
+if $data[5][0] != t3 then
+ return -1
+endi
+if $data[5][1] != INT then
+ return -1
+endi
+if $data[5][2] != 4 then
return -1
endi
-sql select * from db.stb
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
@@ -90,19 +113,25 @@ endi
if $data[0][2] != 2 then
return -1
endi
-if $data[0][3] != NULL then
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 102 then
return -1
endi
-if $data[0][4] != 101 then
+if $data[0][5] != NULL then
return -1
endi
-sql insert into db.ctb values(now+1s, 1, 2, 3)
+print ========== step2 add tag t4
+sql alter table db.stb add tag t4 bigint
+sql select * from db.stb
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
-if $rows != 2 then
+if $rows != 1 then
return -1
endi
if $data[0][1] != 1 then
@@ -111,31 +140,54 @@ endi
if $data[0][2] != 2 then
return -1
endi
-if $data[0][3] != NULL then
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 102 then
return -1
endi
-if $data[0][4] != 101 then
+if $data[0][5] != NULL then
return -1
endi
-if $data[1][1] != 1 then
+if $data[0][6] != NULL then
return -1
endi
-if $data[2][2] != 2 then
+
+sql_error create table db.ctb2 using db.stb tags(101, "102")
+sql create table db.ctb2 using db.stb tags(101, "102", 103, 104)
+sql insert into db.ctb2 values(now, 1, "2")
+
+sql select * from db.stb where tbname = 'ctb2';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
return -1
endi
-if $data[1][3] != 3 then
+if $data[0][4] != 102 then
return -1
endi
-if $data[1][4] != 101 then
+if $data[0][5] != 103 then
+ return -1
+endi
+if $data[0][6] != 104 then
return -1
endi
-print ========== add column c4
-sql alter table db.stb add column c4 bigint
-sql insert into db.ctb values(now+2s, 1, 2, 3, 4)
-sql select * from db.stb
-sql select * from db.stb
-print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ========== step3 describe
+sql describe db.ctb
+if $rows != 7 then
+ return -1
+endi
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim
new file mode 100644
index 0000000000000000000000000000000000000000..50907be23efb005071820c8f1baa4ca58b0b727b
--- /dev/null
+++ b/tests/script/tsim/stable/tag_drop.sim
@@ -0,0 +1,337 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
+sql create table db.ctb using db.stb tags(101, "102")
+sql insert into db.ctb values(now, 1, "2")
+
+sql show db.stables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != stb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != 2 then
+ return -1
+endi
+if $data[0][6] != abd then
+ return -1
+endi
+
+sql show db.tables
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != ctb then
+ return -1
+endi
+if $data[0][1] != db then
+ return -1
+endi
+if $data[0][3] != 3 then
+ return -1
+endi
+if $data[0][4] != stb then
+ return -1
+endi
+if $data[0][6] != 2 then
+ return -1
+endi
+if $data[0][9] != CHILD_TABLE then
+ return -1
+endi
+
+sql select * from db.stb
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 102 then
+ return -1
+endi
+
+sql_error alter table db.stb drop tag ts int
+sql_error alter table db.stb drop tag t3 int
+sql_error alter table db.stb drop tag t4 int
+sql_error alter table db.stb drop tag c1 int
+sql_error alter table db.stb drop tag c2 int
+
+print ========== step1 drop tag t2
+sql alter table db.stb drop tag t2
+
+sql show db.stables
+if $data[0][4] != 1 then
+ return -1
+endi
+
+sql describe db.ctb
+if $rows != 4 then
+ return -1
+endi
+if $data[4][0] != null then
+ return -1
+endi
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != null then
+ return -1
+endi
+
+print ========== step2 add tag t3
+sql alter table db.stb add tag t3 int
+
+sql show db.stables
+if $data[0][4] != 2 then
+ return -1
+endi
+
+sql describe db.ctb
+if $rows != 5 then
+ return -1
+endi
+if $data[4][0] != t3 then
+ return -1
+endi
+if $data[4][1] != INT then
+ return -1
+endi
+if $data[4][2] != 4 then
+ return -1
+endi
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != NULL then
+ return -1
+endi
+
+print ========== step3 add tag t4
+sql alter table db.stb add tag t4 bigint
+sql select * from db.stb
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != NULL then
+ return -1
+endi
+if $data[0][5] != NULL then
+ return -1
+endi
+if $data[0][6] != null then
+ return -1
+endi
+
+sql_error create table db.ctb2 using db.stb tags(101, "102")
+sql create table db.ctb2 using db.stb tags(201, 202, 203)
+sql insert into db.ctb2 values(now, 1, "2")
+
+sql select * from db.stb where tbname = 'ctb2';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 201 then
+ return -1
+endi
+if $data[0][4] != 202 then
+ return -1
+endi
+if $data[0][5] != 203 then
+ return -1
+endi
+
+print ========== step4 describe
+sql describe db.ctb
+if $rows != 6 then
+ return -1
+endi
+
+print ========== step5 add tag2
+sql alter table db.stb add tag t2 bigint
+sql select * from db.stb where tbname = 'ctb2';
+sql select * from db.stb where tbname = 'ctb2';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 201 then
+ return -1
+endi
+if $data[0][4] != 202 then
+ return -1
+endi
+if $data[0][5] != 203 then
+ return -1
+endi
+if $data[0][6] != NULL then
+ return -1
+endi
+
+sql_error create table db.ctb2 using db.stb tags(101, "102")
+sql_error create table db.ctb2 using db.stb tags(201, 202, 203)
+sql create table db.ctb3 using db.stb tags(301, 302, 303, 304)
+sql insert into db.ctb3 values(now, 1, "2")
+
+sql select * from db.stb where tbname = 'ctb3';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 301 then
+ return -1
+endi
+if $data[0][4] != 302 then
+ return -1
+endi
+if $data[0][5] != 303 then
+ return -1
+endi
+if $data[0][6] != 304 then
+ return -1
+endi
+
+print ========== step6 describe
+sql describe db.ctb
+if $rows != 7 then
+ return -1
+endi
+
+if $data[3][0] != t1 then
+ return -1
+endi
+if $data[4][0] != t3 then
+ return -1
+endi
+if $data[5][0] != t4 then
+ return -1
+endi
+if $data[6][0] != t2 then
+ return -1
+endi
+if $data[6][1] != BIGINT then
+ return -1
+endi
+
+print ========== step7 drop tag t1
+sql alter table db.stb drop tag t1
+
+sql show db.stables
+if $data[0][4] != 3 then
+ return -1
+endi
+
+sql describe db.ctb
+if $rows != 6 then
+ return -1
+endi
+
+sql select * from db.stb where tbname = 'ctb3';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 2 then
+ return -1
+endi
+if $data[0][3] != 302 then
+ return -1
+endi
+if $data[0][4] != 303 then
+ return -1
+endi
+if $data[0][5] != 304 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/tag_filter.sim b/tests/script/tsim/stable/tag_filter.sim
new file mode 100644
index 0000000000000000000000000000000000000000..c8edfb1ee3862046875c6f432be8602b43120a9a
--- /dev/null
+++ b/tests/script/tsim/stable/tag_filter.sim
@@ -0,0 +1,59 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
+
+
+sql create table db.ctb1 using db.stb tags(1, "102")
+sql insert into db.ctb1 values(now, 1, "2")
+
+sql create table db.ctb2 using db.stb tags(2, "102")
+sql insert into db.ctb2 values(now, 2, "2")
+
+sql create table db.ctb3 using db.stb tags(3, "102")
+sql insert into db.ctb3 values(now, 3, "2")
+
+sql create table db.ctb4 using db.stb tags(4, "102")
+sql insert into db.ctb4 values(now, 4, "2")
+
+sql create table db.ctb5 using db.stb tags(5, "102")
+sql insert into db.ctb5 values(now, 5, "2")
+
+sql create table db.ctb6 using db.stb tags(6, "102")
+sql insert into db.ctb6 values(now, 6, "2")
+
+sql select * from db.stb where t1 = 1
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.stb where t1 < 1
+if $rows != 0 then
+ return -=1
+endi
+
+sql select * from db.stb where t1 < 2
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.stb where t1 <= 2
+if $rows != 2 then
+ return -1
+endi
+
+sql select * from db.stb where t1 >= 1
+if $rows != 6 then
+ return -1
+endi
+
+sql select * from db.stb where t1 > 1
+if $rows != 5 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim
new file mode 100644
index 0000000000000000000000000000000000000000..62e4c7b28255ee085250cb4fc43612116fc50be0
--- /dev/null
+++ b/tests/script/tsim/stable/tag_modify.sim
@@ -0,0 +1,123 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd"
+
+sql_error alter table db.stb MODIFY tag c2 binary(3)
+sql_error alter table db.stb MODIFY tag c2 int
+sql_error alter table db.stb MODIFY tag c1 int
+sql_error alter table db.stb MODIFY tag ts int
+sql_error alter table db.stb MODIFY tag t2 binary(3)
+sql_error alter table db.stb MODIFY tag t2 int
+sql_error alter table db.stb MODIFY tag t1 int
+sql create table db.ctb using db.stb tags(101, "12345")
+sql insert into db.ctb values(now, 1, "1234")
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 1234 then
+ return -1
+endi
+
+print ========== step1 modify tag
+sql alter table db.stb MODIFY tag t2 binary(5)
+sql select * from db.stb
+
+sql create table db.ctb2 using db.stb tags(101, "12345")
+sql insert into db.ctb2 values(now, 1, "1234")
+
+sql select * from db.stb where tbname = 'ctb2';
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 12345 then
+ return -1
+endi
+
+print ========== step2 describe
+sql describe db.ctb2
+if $rows != 5 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[3][0] != t1 then
+ return -1
+endi
+if $data[4][0] != t2 then
+ return -1
+endi
+if $data[4][1] != VARCHAR then
+ return -1
+endi
+if $data[4][2] != 5 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+
+sql connect
+sql describe db.ctb2
+if $rows != 5 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[3][0] != t1 then
+ return -1
+endi
+if $data[4][0] != t2 then
+ return -1
+endi
+if $data[4][1] != VARCHAR then
+ return -1
+endi
+if $data[4][2] != 5 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim
new file mode 100644
index 0000000000000000000000000000000000000000..2f67a3ab2c51d8c8499219ea8779b23797d9d0af
--- /dev/null
+++ b/tests/script/tsim/stable/tag_rename.sim
@@ -0,0 +1,120 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== prepare stb and ctb
+sql create database db vgroups 1
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd"
+
+sql_error alter table db.stb rename tag c2 c3
+sql_error alter table db.stb rename tag c2 c3
+sql_error alter table db.stb rename tag c1 c3
+sql_error alter table db.stb rename tag ts c3
+sql_error alter table db.stb rename tag t2 t1
+sql_error alter table db.stb rename tag t2 t2
+sql_error alter table db.stb rename tag t1 t2
+sql create table db.ctb using db.stb tags(101, "12345")
+sql insert into db.ctb values(now, 1, "1234")
+
+sql select * from db.stb
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 1234 then
+ return -1
+endi
+
+print ========== step1 rename tag
+sql alter table db.stb rename tag t1 t3
+sql select * from db.stb
+sql select * from db.stb
+
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+
+if $rows != 1 then
+ return -1
+endi
+if $data[0][1] != 1 then
+ return -1
+endi
+if $data[0][2] != 1234 then
+ return -1
+endi
+if $data[0][3] != 101 then
+ return -1
+endi
+if $data[0][4] != 1234 then
+ return -1
+endi
+
+print ========== step2 describe
+sql describe db.ctb
+if $rows != 5 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[3][0] != t3 then
+ return -1
+endi
+if $data[4][0] != t2 then
+ return -1
+endi
+if $data[4][1] != VARCHAR then
+ return -1
+endi
+if $data[4][2] != 4 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+
+sql connect
+sql describe db.ctb
+if $rows != 5 then
+ return -1
+endi
+if $data[0][0] != ts then
+ return -1
+endi
+if $data[1][0] != c1 then
+ return -1
+endi
+if $data[2][0] != c2 then
+ return -1
+endi
+if $data[3][0] != t3 then
+ return -1
+endi
+if $data[4][0] != t2 then
+ return -1
+endi
+if $data[4][1] != VARCHAR then
+ return -1
+endi
+if $data[4][2] != 4 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stable/values.sim b/tests/script/tsim/stable/values.sim
index e5e3118e12634f41b0d124d3ba379b8f93df442f..88eca28a12c6a48c5c39178f194e8836864e71d8 100644
--- a/tests/script/tsim/stable/values.sim
+++ b/tests/script/tsim/stable/values.sim
@@ -1,16 +1,9 @@
system sh/stop_dnodes.sh
-
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======================== dnode1 start
-
sql create database vdb0
sql create table vdb0.mt (ts timestamp, tbcol int) TAGS(tgcol int)
diff --git a/tests/script/tsim/stable/vnode3.sim b/tests/script/tsim/stable/vnode3.sim
index 97a8203883cc5f427ccc355cf5898b1e3ebe6cd2..186d0f5eea254aeb451f48c3cbf7d0d094723c09 100644
--- a/tests/script/tsim/stable/vnode3.sim
+++ b/tests/script/tsim/stable/vnode3.sim
@@ -1,16 +1,9 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
sql connect
print ======================== dnode1 start
-
$dbPrefix = v3_db
$tbPrefix = v3_tb
$mtPrefix = v3_mt
diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim
index 9edad991dc0ac5c5c960be026c1fd17073d17881..29775a5ef1d1daf90122f053da6c153bac843341 100644
--- a/tests/script/tsim/stream/basic0.sim
+++ b/tests/script/tsim/stream/basic0.sim
@@ -63,7 +63,8 @@ if $data02 != 234 then
return -1
endi
-if $data03 != 234 then
+if $data03 != 234 then
+ print expect 234, actual $data03
return -1
endi
diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..41a8b3371002848dd6909ab1c681bde0628e6324
--- /dev/null
+++ b/tests/script/tsim/stream/session0.sim
@@ -0,0 +1,162 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+
+
+sql create table t1(ts timestamp, a int, b int , c int, d double,id int);
+sql create stream streams2 trigger at_once into streamt as select _wstartts, count(*) c1, sum(a), max(a), min(d), stddev(a), last(a), first(d), max(id) s from t1 session(ts,10s);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL,1);
+sql insert into t1 values(1648791223001,10,2,3,1.1,2);
+sql insert into t1 values(1648791233002,3,2,3,2.1,3);
+sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4);
+sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6);
+sleep 300
+sql select * from streamt order by s desc;
+
+# row 0
+if $data01 != 3 then
+ print ======$data01
+ return -1
+endi
+
+if $data02 != 3 then
+ print ======$data02
+ return -1
+endi
+
+if $data03 != 3 then
+ print ======$data03
+ return -1
+endi
+
+if $data04 != 2.100000000 then
+ print ======$data04
+ return -1
+endi
+
+if $data05 != 0.000000000 then
+ print ======$data05
+ return -1
+endi
+
+if $data06 != 3 then
+ print ======$data05
+ return -1
+endi
+
+if $data07 != 2.100000000 then
+ print ======$data05
+ return -1
+endi
+
+if $data08 != 6 then
+ print ======$data05
+ return -1
+endi
+
+# row 1
+
+if $data11 != 3 then
+ print ======$data01
+ return -1
+endi
+
+if $data12 != 10 then
+ print ======$data02
+ return -1
+endi
+
+if $data13 != 10 then
+ print ======$data03
+ return -1
+endi
+
+if $data14 != 1.100000000 then
+ print ======$data04
+ return -1
+endi
+
+if $data15 != 0.000000000 then
+ print ======$data05
+ return -1
+endi
+
+if $data16 != 10 then
+ print ======$data05
+ return -1
+endi
+
+if $data17 != 1.100000000 then
+ print ======$data05
+ return -1
+endi
+
+if $data18 != 5 then
+ print ======$data05
+ return -1
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0,7);
+sql insert into t1 values(1648791223001,2,2,3,1.1,8);
+sql insert into t1 values(1648791233002,3,2,3,2.1,9);
+sql insert into t1 values(1648791243003,4,2,3,3.1,10);
+sql insert into t1 values(1648791213002,4,2,3,4.1,11) ;
+sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13);
+sleep 300
+sql select * from streamt order by s desc ;
+
+# row 0
+if $data01 != 7 then
+ print ======$data01
+ return -1
+endi
+
+if $data02 != 9 then
+ print ======$data02
+ return -1
+endi
+
+if $data03 != 4 then
+ print ======$data03
+ return -1
+endi
+
+if $data04 != 1.100000000 then
+ print ======$data04
+ return -1
+endi
+
+if $data05 != 0.816496581 then
+ print ======$data05
+ return -1
+endi
+
+if $data06 != 3 then
+ print ======$data05
+ return -1
+endi
+
+if $data07 != 1.100000000 then
+ print ======$data05
+ return -1
+endi
+
+if $data08 != 13 then
+ print ======$data05
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim
new file mode 100644
index 0000000000000000000000000000000000000000..fb31818f98138948ca91758e14de85146b9940d5
--- /dev/null
+++ b/tests/script/tsim/stream/session1.sim
@@ -0,0 +1,190 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+
+
+sql create table t1(ts timestamp, a int, b int , c int, d double,id int);
+sql create stream streams2 trigger at_once into streamt as select _wstartts, count(*) c1, sum(a), min(b), max(id) s from t1 session(ts,10s);
+sql insert into t1 values(1648791210000,1,1,1,1.1,1);
+sql insert into t1 values(1648791220000,2,2,2,2.1,2);
+sql insert into t1 values(1648791230000,3,3,3,3.1,3);
+sql insert into t1 values(1648791240000,4,4,4,4.1,4);
+sleep 300
+sql select * from streamt order by s desc;
+
+# row 0
+if $data01 != 4 then
+ print ======$data01
+ return -1
+endi
+
+if $data02 != 10 then
+ print ======$data02
+ return -1
+endi
+
+if $data03 != 1 then
+ print ======$data03
+ return -1
+endi
+
+if $data04 != 4 then
+ print ======$data04
+ return -1
+endi
+
+sql insert into t1 values(1648791250005,5,5,5,5.1,5);
+sql insert into t1 values(1648791260006,6,6,6,6.1,6);
+sql insert into t1 values(1648791270007,7,7,7,7.1,7);
+sql insert into t1 values(1648791240005,5,5,5,5.1,8) (1648791250006,6,6,6,6.1,9);
+sleep 300
+sql select * from streamt order by s desc;
+
+# row 0
+if $data01 != 8 then
+ print ======$data01
+ return -1
+endi
+
+if $data02 != 32 then
+ print ======$data02
+ return -1
+endi
+
+if $data03 != 1 then
+ print ======$data03
+ return -1
+endi
+
+if $data04 != 9 then
+ print ======$data04
+ return -1
+endi
+
+# row 1
+if $data11 != 1 then
+ print ======$data11
+ return -1
+endi
+
+if $data12 != 7 then
+ print ======$data12
+ return -1
+endi
+
+if $data13 != 7 then
+ print ======$data13
+ return -1
+endi
+
+if $data14 != 7 then
+ print ======$data14
+ return -1
+endi
+
+sql insert into t1 values(1648791280008,7,7,7,7.1,10) (1648791300009,8,8,8,8.1,11);
+sql insert into t1 values(1648791260007,7,7,7,7.1,12) (1648791290008,7,7,7,7.1,13) (1648791290009,8,8,8,8.1,14);
+sql insert into t1 values(1648791500000,7,7,7,7.1,15) (1648791520000,8,8,8,8.1,16) (1648791540000,8,8,8,8.1,17);
+sql insert into t1 values(1648791530000,8,8,8,8.1,18);
+sql insert into t1 values(1648791220000,10,10,10,10.1,19) (1648791290008,2,2,2,2.1,20) (1648791540000,17,17,17,17.1,21) (1648791500001,22,22,22,22.1,22);
+sleep 300
+sql select * from streamt order by s desc;
+
+# row 0
+if $data01 != 2 then
+ print ======$data01
+ return -1
+endi
+
+if $data02 != 29 then
+ print ======$data02
+ return -1
+endi
+
+if $data03 != 7 then
+ print ======$data03
+ return -1
+endi
+
+if $data04 != 22 then
+ print ======$data04
+ return -1
+endi
+
+# row 1
+if $data11 != 3 then
+ print ======$data11
+ return -1
+endi
+
+if $data12 != 33 then
+ print ======$data12
+ return -1
+endi
+
+if $data13 != 8 then
+ print ======$data13
+ return -1
+endi
+
+if $data14 != 21 then
+ print ======$data14
+ return -1
+endi
+
+# row 2
+if $data21 != 4 then
+ print ======$data21
+ return -1
+endi
+
+if $data22 != 25 then
+ print ======$data22
+ return -1
+endi
+
+if $data23 != 2 then
+ print ======$data23
+ return -1
+endi
+
+if $data24 != 20 then
+ print ======$data24
+ return -1
+endi
+
+# row 3
+if $data31 != 10 then
+ print ======$data31
+ return -1
+endi
+
+if $data32 != 54 then
+ print ======$data32
+ return -1
+endi
+
+if $data33 != 1 then
+ print ======$data33
+ return -1
+endi
+
+if $data34 != 19 then
+ print ======$data34
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..756f591f3ff8a58586cc77ba5a95acc1f31d46b0
--- /dev/null
+++ b/tests/script/tsim/stream/triggerInterval0.sim
@@ -0,0 +1,97 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
+
+sql insert into t1 values(1648791213001,1,2,3,1.0);
+sleep 300
+sql select * from streamt;
+if $rows != 0 then
+ print ======$rows
+ return -1
+endi
+
+sql insert into t1 values(1648791223001,2,2,3,1.1);
+sql insert into t1 values(1648791223002,2,2,3,1.1);
+sql insert into t1 values(1648791223003,2,2,3,1.1);
+sql insert into t1 values(1648791223001,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t1 values(1648791233001,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 3 then
+ print ======$data11
+ return -1
+endi
+
+sql insert into t1 values(1648791223004,2,2,3,1.1);
+sql insert into t1 values(1648791223004,2,2,3,1.1);
+sql insert into t1 values(1648791223005,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 5 then
+ print ======$data11
+ return -1
+endi
+
+
+sql insert into t1 values(1648791233002,3,2,3,2.1);
+sql insert into t1 values(1648791213002,4,2,3,3.1)
+sql insert into t1 values(1648791213002,4,2,3,4.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 2 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 5 then
+ print ======$data11
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..fb0666fdcfe847dd25a3e4eb3b66acd16ed09f63
--- /dev/null
+++ b/tests/script/tsim/stream/triggerSession0.sim
@@ -0,0 +1,105 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+sql create table t2(ts timestamp, a int, b int , c int, d double);
+sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s);
+
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791222999,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791233001,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 0 then
+ print ======$rows
+ return -1
+endi
+
+sql insert into t2 values(1648791243002,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 5 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 6 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791233002,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 6 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791253003,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 8 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0);
+sleep 500
+sql select * from streamt2;
+if $rows != 3 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 10 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 1 then
+ print ======$data11
+ return -1
+endi
+if $data21 != 1 then
+ print ======$data21
+ return -1
+endi
+
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/sync/3Replica1VgElect.sim b/tests/script/tsim/sync/3Replica1VgElect.sim
new file mode 100644
index 0000000000000000000000000000000000000000..e531fa82ad3d78d45447b583834c5b8842c171d1
--- /dev/null
+++ b/tests/script/tsim/sync/3Replica1VgElect.sim
@@ -0,0 +1,478 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+$loop_cnt = 0
+check_dnode_ready:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+if $data[0][4] != ready then
+ goto check_dnode_ready
+endi
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$loop_cnt = 0
+check_dnode_ready_1:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 10 then
+ print ====> dnodes not ready!
+ return -1
+endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_1
+endi
+
+$replica = 3
+$vgroups = 1
+
+print ============= create database
+sql create database db replica $replica vgroups $vgroups
+
+$loop_cnt = 0
+check_db_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 100 then
+ print ====> db not ready!
+ return -1
+endi
+sql show databases
+print ===> rows: $rows
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
+if $rows != 3 then
+ return -1
+endi
+if $data[2][19] != ready then
+ goto check_db_ready
+endi
+
+sql use db
+
+$loop_cnt = 0
+check_vg_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+
+vg_ready:
+print ====> create stable/child table
+sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+sql show stables
+if $rows != 1 then
+ return -1
+endi
+
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+$totalTblNum = $tbNum * 2
+sleep 1000
+sql show tables
+print ====> expect $totalTblNum and infinsert $rows in fact
+if $rows != $totalTblNum then
+ return -1
+endi
+
+start_switch_leader:
+
+$switch_loop_cnt = 0
+sql show vgroups
+$dnodeId = $data[0][3]
+$dnodeId = dnode . $dnodeId
+
+switch_leader_to_offine_loop:
+
+print $dnodeId
+print ====> stop $dnodeId
+system sh/exec.sh -n $dnodeId -s stop -x SIGINT
+
+
+$loop_cnt = 0
+$loop_cnt = $loop_cnt + 1
+sleep 201
+if $loop_cnt == 300 then
+ print ====> vgroups switch fail!!!
+ return -1
+endi
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+
+if $rows != $vgroups then
+ return -1
+endi
+
+
+vg_offline_1:
+
+print ====> start $dnodeId
+system sh/exec.sh -n $dnodeId -s start
+
+$switch_loop_cnt = $switch_loop_cnt + 1
+print $switch_loop_cnt
+
+if $switch_loop_cnt == 1 then
+ sql show vgroups
+ $dnodeId = $data[1][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 2 then
+ sql show vgroups
+ $dnodeId = $data[2][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 3 then
+ sql show vgroups
+ $dnodeId = $data[3][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 4 then
+ sql show vgroups
+ $dnodeId = $data[4][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+else
+ goto stop_leader_to_offine_loop
+endi
+
+stop_leader_to_offine_loop:
+
+$loop_cnt = 0
+check_vg_ready1:
+$loop_cnt = $loop_cnt + 1
+print $loop_cnt
+sleep 202
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+
+print ====> final test: create stable/child table
+sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+$ctbPrefix = ctb1
+$ntbPrefix = ntb1
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb1 tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+sleep 1000
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+if $rows != 40 then
+ return -1
+endi
+
+
+
+system sh/deploy.sh -n dnode5 -i 5
+system sh/exec.sh -n dnode5 -s start
+
+sql connect
+sql create dnode $hostname port 7500
+
+$loop_cnt = 0
+check_dnode_ready3:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 100 then
+ print ====> dnode not ready!
+ return -1
+ endi
+
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+
+if $rows != 5 then
+ return -1
+endi
+
+if $data[4][4] != ready then
+ goto check_dnode_ready3
+endi
+
+
+
+# restart clusters
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+system sh/exec.sh -n dnode5 -s stop -x SIGINT
+
+
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+system sh/exec.sh -n dnode5 -s start
+
+
+$loop_cnt = 0
+check_dnode_ready_2:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+
+if $data[0][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_2
+endi
+
+sql use db;
+$ctbPrefix = ctb2
+$ntbPrefix = ntb2
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb1 tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+sleep 1000
+sql use db
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+print $rows
+if $rows != 60 then
+ return -1
+endi
+
+
+
+$replica = 3
+$vgroups = 5
+
+print ============= create database
+sql create database db1 replica $replica vgroups $vgroups
+
+$loop_cnt = 0
+check_db_ready1:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 100 then
+ print ====> db not ready!
+ return -1
+endi
+sql show databases
+print ===> rows: $rows
+print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19]
+if $rows != 4 then
+ return -1
+endi
+if $data(db1)[19] != ready then
+ goto check_db_ready1
+endi
+
+
+sql use db1
+
+$loop_cnt = 0
+check_vg_ready3:
+$loop_cnt = $loop_cnt + 1
+print $loop_cnt
+sleep 202
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+
+
+
diff --git a/tests/script/tsim/sync/3Replica5VgElect.sim b/tests/script/tsim/sync/3Replica5VgElect.sim
new file mode 100644
index 0000000000000000000000000000000000000000..2e05e848be43671da4e981242eff6d03721e36b9
--- /dev/null
+++ b/tests/script/tsim/sync/3Replica5VgElect.sim
@@ -0,0 +1,755 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+$loop_cnt = 0
+check_dnode_ready:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+if $data[0][4] != ready then
+ goto check_dnode_ready
+endi
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$loop_cnt = 0
+check_dnode_ready_1:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 10 then
+ print ====> dnodes not ready!
+ return -1
+endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_1
+endi
+
+$replica = 3
+$vgroups = 5
+
+print ============= create database
+sql create database db replica $replica vgroups $vgroups
+
+$loop_cnt = 0
+check_db_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 100 then
+ print ====> db not ready!
+ return -1
+endi
+sql show databases
+print ===> rows: $rows
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
+if $rows != 3 then
+ return -1
+endi
+if $data[2][19] != ready then
+ goto check_db_ready
+endi
+
+sql use db
+
+$loop_cnt = 0
+check_vg_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+if $data[1][4] == leader then
+ if $data[1][6] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
+ endi
+ endi
+elif $data[1][6] == leader then
+ if $data[1][4] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
+ endi
+ endi
+elif $data[1][8] == leader then
+ if $data[1][4] == follower then
+ if $data[1][6] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+if $data[2][4] == leader then
+ if $data[2][6] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
+ endi
+ endi
+elif $data[2][6] == leader then
+ if $data[2][4] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
+ endi
+ endi
+elif $data[2][8] == leader then
+ if $data[2][4] == follower then
+ if $data[2][6] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+if $data[3][4] == leader then
+ if $data[3][6] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
+ endi
+ endi
+elif $data[3][6] == leader then
+ if $data[3][4] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
+ endi
+ endi
+elif $data[3][8] == leader then
+ if $data[3][4] == follower then
+ if $data[3][6] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+if $data[4][4] == leader then
+ if $data[4][6] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
+ endi
+ endi
+elif $data[4][6] == leader then
+ if $data[4][4] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
+ endi
+ endi
+elif $data[4][8] == leader then
+ if $data[4][4] == follower then
+ if $data[4][6] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
+ endi
+ endi
+else
+ goto check_vg_ready
+endi
+
+vg_ready:
+print ====> create stable/child table
+sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+sql show stables
+if $rows != 1 then
+ return -1
+endi
+
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+$totalTblNum = $tbNum * 2
+sleep 1000
+sql show tables
+print ====> expect $totalTblNum and infinsert $rows in fact
+if $rows != $totalTblNum then
+ return -1
+endi
+
+start_switch_leader:
+
+$switch_loop_cnt = 0
+sql show vgroups
+$dnodeId = $data[0][3]
+$dnodeId = dnode . $dnodeId
+
+switch_leader_to_offine_loop:
+
+print $dnodeId
+print ====> stop $dnodeId
+system sh/exec.sh -n $dnodeId -s stop -x SIGINT
+
+
+$loop_cnt = 0
+$loop_cnt = $loop_cnt + 1
+sleep 201
+if $loop_cnt == 300 then
+ print ====> vgroups switch fail!!!
+ return -1
+endi
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11]
+if $rows != $vgroups then
+ return -1
+endi
+
+
+vg_offline_1:
+
+print ====> start $dnodeId
+system sh/exec.sh -n $dnodeId -s start
+
+$switch_loop_cnt = $switch_loop_cnt + 1
+print $switch_loop_cnt
+
+if $switch_loop_cnt == 1 then
+ sql show vgroups
+ $dnodeId = $data[1][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 2 then
+ sql show vgroups
+ $dnodeId = $data[2][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 3 then
+ sql show vgroups
+ $dnodeId = $data[3][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 4 then
+ sql show vgroups
+ $dnodeId = $data[4][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+else
+ goto stop_leader_to_offine_loop
+endi
+
+stop_leader_to_offine_loop:
+
+$loop_cnt = 0
+check_vg_ready1:
+$loop_cnt = $loop_cnt + 1
+print $loop_cnt
+sleep 202
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+if $data[1][4] == leader then
+ if $data[1][6] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
+ endi
+ endi
+elif $data[1][6] == leader then
+ if $data[1][4] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
+ endi
+ endi
+elif $data[1][8] == leader then
+ if $data[1][4] == follower then
+ if $data[1][6] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+if $data[2][4] == leader then
+ if $data[2][6] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
+ endi
+ endi
+elif $data[2][6] == leader then
+ if $data[2][4] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
+ endi
+ endi
+elif $data[2][8] == leader then
+ if $data[2][4] == follower then
+ if $data[2][6] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+if $data[3][4] == leader then
+ if $data[3][6] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
+ endi
+ endi
+elif $data[3][6] == leader then
+ if $data[3][4] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
+ endi
+ endi
+elif $data[3][8] == leader then
+ if $data[3][4] == follower then
+ if $data[3][6] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+if $data[4][4] == leader then
+ if $data[4][6] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
+ endi
+ endi
+elif $data[4][6] == leader then
+ if $data[4][4] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
+ endi
+ endi
+elif $data[4][8] == leader then
+ if $data[4][4] == follower then
+ if $data[4][6] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
+ endi
+ endi
+else
+ goto check_vg_ready1
+endi
+
+
+print ====> final test: create stable/child table
+sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+$ctbPrefix = ctb1
+$ntbPrefix = ntb1
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb1 tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+sleep 1000
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+if $rows != 40 then
+ return -1
+endi
+
+
+
+system sh/deploy.sh -n dnode5 -i 5
+system sh/exec.sh -n dnode5 -s start
+
+sql connect
+sql create dnode $hostname port 7500
+
+$loop_cnt = 0
+check_dnode_ready3:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 100 then
+ print ====> dnode not ready!
+ return -1
+ endi
+
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+
+if $rows != 5 then
+ return -1
+endi
+
+if $data[4][4] != ready then
+ goto check_dnode_ready3
+endi
+
+
+
+# restart clusters
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+system sh/exec.sh -n dnode5 -s stop -x SIGINT
+
+
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+system sh/exec.sh -n dnode5 -s start
+
+
+$loop_cnt = 0
+check_dnode_ready_2:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+
+if $data[0][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_2
+endi
+
+sql use db;
+$ctbPrefix = ctb2
+$ntbPrefix = ntb2
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb1 tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+sleep 1000
+sql use db
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+print $rows
+if $rows != 60 then
+ return -1
+endi
+
+
+
+$replica = 3
+$vgroups = 5
+
+print ============= create database
+sql create database db1 replica $replica vgroups $vgroups
+
+$loop_cnt = 0
+check_db_ready1:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 100 then
+ print ====> db not ready!
+ return -1
+endi
+sql show databases
+print ===> rows: $rows
+print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19]
+if $rows != 4 then
+ return -1
+endi
+if $data(db1)[19] != ready then
+ goto check_db_ready1
+endi
+
+
+sql use db1
+
+$loop_cnt = 0
+check_vg_ready3:
+$loop_cnt = $loop_cnt + 1
+print $loop_cnt
+sleep 202
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ if $data[0][6] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ endi
+ endi
+elif $data[0][6] == leader then
+ if $data[0][4] == follower then
+ if $data[0][8] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ endi
+ endi
+elif $data[0][8] == leader then
+ if $data[0][4] == follower then
+ if $data[0][6] == follower then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+if $data[1][4] == leader then
+ if $data[1][6] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
+ endi
+ endi
+elif $data[1][6] == leader then
+ if $data[1][4] == follower then
+ if $data[1][8] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
+ endi
+ endi
+elif $data[1][8] == leader then
+ if $data[1][4] == follower then
+ if $data[1][6] == follower then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+if $data[2][4] == leader then
+ if $data[2][6] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
+ endi
+ endi
+elif $data[2][6] == leader then
+ if $data[2][4] == follower then
+ if $data[2][8] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
+ endi
+ endi
+elif $data[2][8] == leader then
+ if $data[2][4] == follower then
+ if $data[2][6] == follower then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+if $data[3][4] == leader then
+ if $data[3][6] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
+ endi
+ endi
+elif $data[3][6] == leader then
+ if $data[3][4] == follower then
+ if $data[3][8] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
+ endi
+ endi
+elif $data[3][8] == leader then
+ if $data[3][4] == follower then
+ if $data[3][6] == follower then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+if $data[4][4] == leader then
+ if $data[4][6] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
+ endi
+ endi
+elif $data[4][6] == leader then
+ if $data[4][4] == follower then
+ if $data[4][8] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
+ endi
+ endi
+elif $data[4][8] == leader then
+ if $data[4][4] == follower then
+ if $data[4][6] == follower then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
+ endi
+ endi
+else
+ goto check_vg_ready3
+endi
+
+# sql drop dnode 5
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+
+
diff --git a/tests/script/tsim/sync/insertDataByRunBack.sim b/tests/script/tsim/sync/insertDataByRunBack.sim
index c86cd3844bd3258b5cac4f7b4bbe5dd1c3e0dec2..00f0643b61c3066de4d3bda25f60c54a9cf22084 100644
--- a/tests/script/tsim/sync/insertDataByRunBack.sim
+++ b/tests/script/tsim/sync/insertDataByRunBack.sim
@@ -20,6 +20,8 @@ print $data[1][0] $data[1][1] $data[1][2] $data[1][3]
if $rows == 2 then
if $data[1][1] == stop then
goto end_insert
+ elif $data[0][1] == stop then
+ goto end_insert
endi
endi
@@ -47,6 +49,9 @@ endw
if $loop_cnt == 0 then
print ====> notify main to working for insert data
sql insert into interaction values (now, 'working', 0, 0);
+ sql select * from interaction
+ print $data[0][0] $data[0][1] $data[0][2] $data[0][3]
+ print $data[1][0] $data[1][1] $data[1][2] $data[1][3]
endi
$loop_cnt = $loop_cnt + 1
goto loop_insert
diff --git a/tests/script/tsim/sync/oneReplica1VgElect.sim b/tests/script/tsim/sync/oneReplica1VgElect.sim
index bb9b3f449640818d888137721350b0cea90eebae..cf8912e654c04314e96a5fb4a718a3569ddea5f5 100644
--- a/tests/script/tsim/sync/oneReplica1VgElect.sim
+++ b/tests/script/tsim/sync/oneReplica1VgElect.sim
@@ -31,7 +31,7 @@ if $data[0][4] != ready then
goto check_dnode_ready
endi
-#sql connect
+sql connect
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
sql create dnode $hostname port 7400
@@ -66,144 +66,99 @@ $vgroups = 1
$replica = 1
print ============= create database
-sql create database db replica $replica vgroups $vgroups
+sql create database db1 replica $replica vgroups $vgroups
$loop_cnt = 0
check_db_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
-if $loop_cnt == 10 then
- print ====> db not ready!
+if $loop_cnt == 100 then
+ print ====> db1 not ready!
return -1
endi
sql show databases
print ===> rows: $rows
-print $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
+print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20]
if $rows != 3 then
return -1
endi
-if $data(db)[19] != ready then
+if $data(db1)[19] != ready then
goto check_db_ready
endi
-sql use db
+sql use db1
$loop_cnt = 0
check_vg_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
-if $loop_cnt == 10 then
+if $loop_cnt == 300 then
print ====> vgroups not ready!
return -1
endi
sql show vgroups
print ===> rows: $rows
-print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13]
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != NULL then
- goto check_vg_ready
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready
- endi
+if $data[0][4] == leader then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
- goto vg_ready
-endi
-if $data[0][6] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready
- endi
+ goto vg_ready
+elif $data[0][6] == leader then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
- goto vg_ready
-endi
-if $data[0][8] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready
- endi
- if $data[0][6] != NULL then
- goto check_vg_ready
- endi
+ goto vg_ready
+elif $data[0][8] == leader then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
- goto vg_ready
+ goto vg_ready
+else
+ goto check_vg_ready
endi
-vg_ready:
-print ====> create stable/child table, insert data, and select
-sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+vg_ready:
+print ====> create stable/child table
+sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
sql show stables
if $rows != 1 then
return -1
endi
+
$ctbPrefix = ctb
$ntbPrefix = ntb
$tbNum = 10
-$rowNum = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-
$i = 0
while $i < $tbNum
$ctb = $ctbPrefix . $i
sql create table $ctb using stb tags( $i )
$ntb = $ntbPrefix . $i
sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
-
- $x = 0
- while $x < $rowNum
- $binary = ' . binary
- $binary = $binary . $i
- $binary = $binary . '
-
- sql insert into $ctb values ($tstart , $i , $x , $binary )
- sql insert into $ntb values ($tstart , 999 , 999 , 'binary-ntb' )
- $tstart = $tstart + 1
- $x = $x + 1
- endw
-
- print ====> insert rows: $rowNum into $ctb and $ntb
-
$i = $i + 1
- $tstart = 1640966400000
endw
$totalTblNum = $tbNum * 2
+sleep 1000
sql show tables
+print ====> expect $totalTblNum and infinsert $rows in fact
if $rows != $totalTblNum then
return -1
endi
-sql select count(*) from ntb0
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $rowNum then
- return -1
-endi
+start_switch_leader:
-$totalRowsOfStb = $rowNum * $tbNum
-sql select count(*) from stb
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $totalRowsOfStb then
- return -1
-endi
+$switch_loop_cnt = 0
+switch_leader_to_offine_loop:
print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*)
sql show vgroups
-print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
+if $data[0][4] == leader then
$dnodeId = $data[0][3]
-elif $data[0][6] == LEADER then
+elif $data[0][6] == leader then
$dnodeId = $data[0][5]
-elif $data[0][8] == LEADER then
+elif $data[0][8] == leader then
$dnodeId = $data[0][7]
else
print ====> no leader vnode!!!
@@ -213,148 +168,78 @@ endi
$dnodeId = dnode . $dnodeId
print ====> stop $dnodeId
system sh/exec.sh -n $dnodeId -s stop -x SIGINT
+#print ====> start $dnodeId
+#system sh/exec.sh -n $dnodeId -s start
$loop_cnt = 0
check_vg_ready_2:
$loop_cnt = $loop_cnt + 1
sleep 200
-if $loop_cnt == 10 then
+if $loop_cnt == 300 then
print ====> vgroups switch fail!!!
return -1
endi
sql show vgroups
print ===> rows: $rows
-print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13]
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != NULL then
- goto check_vg_ready_2
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready_2
- endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][3]
- goto vg_ready_2
-endi
-if $data[0][6] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready_2
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready_2
- endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][5]
- goto vg_ready_2
-endi
-if $data[0][8] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready_2
- endi
- if $data[0][6] != NULL then
- goto check_vg_ready_2
- endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][7]
- goto vg_ready_2
-endi
-vg_ready_2:
-sql select count(*) from ntb0
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $rowNum then
- return -1
-endi
-
-sql select count(*) from ctb0
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $rowNum then
- return -1
-endi
+if $data[0][4] == offline then
+ print ---- vgroup $dnodeId leader switch to offline
+ goto vg_offline_1
+elif $data[0][6] == offline then
+ print ---- vgroup $dnodeId leader switch to offline
+ goto vg_offline_1
+elif $data[0][8] == offline then
+ print ---- vgroup $dnodeId leader switch to offline
+ goto vg_offline_1
+else
+ goto check_vg_ready_2
+endi
-sql select count(*) from stb
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $totalRowsOfStb then
- return -1
-endi
+vg_offline_1:
-print ====> stop and start all dnode(not include the dnode where mnode is located), then query
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s start
-system sh/exec.sh -n dnode3 -s start
-system sh/exec.sh -n dnode2 -s start
+print ====> start $dnodeId
+system sh/exec.sh -n $dnodeId -s start
-$loop_cnt = 0
-check_vg_ready_1:
-$loop_cnt = $loop_cnt + 1
+$loop_cnt1= 0
+check_vg1_ready:
+$loop_cnt1 = $loop_cnt1 + 1
sleep 200
-if $loop_cnt == 10 then
- print ====> after restart dnode, vgroups not ready!
+if $loop_cnt1 == 300 then
+ print ====> vgroups not ready!
return -1
endi
sql show vgroups
print ===> rows: $rows
-print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13]
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != NULL then
- goto check_vg_ready_1
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready_1
- endi
- goto vg_ready_1
-endi
-if $data[0][6] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready_1
- endi
- if $data[0][8] != NULL then
- goto check_vg_ready_1
- endi
- goto vg_ready_1
-endi
-if $data[0][8] == LEADER then
- if $data[0][4] != NULL then
- goto check_vg_ready_1
- endi
- if $data[0][6] != NULL then
- goto check_vg_ready_1
- endi
- goto vg_ready_1
+if $data[0][4] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+ goto countinu_loop
+elif $data[0][6] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+ goto countinu_loop
+elif $data[0][8] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+ goto countinu_loop
+else
+ goto check_vg1_ready
endi
-vg_ready_1:
-print ====> after restart dnode2/dnode3/dnode4, query stb/ntb count(*)
-sql select count(*) from ntb0
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $rowNum then
- return -1
-endi
+countinu_loop:
-sql select count(*) from ctb0
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $rowNum then
- return -1
+$switch_loop_cnt = $switch_loop_cnt + 1
+print $switch_loop_cnt
+if $switch_loop_cnt < 4 then
+ goto switch_leader_to_offine_loop
endi
-sql select count(*) from stb
-print rows: $rows
-print $data[0][0] $data[0][1]
-if $data[0][0] != $totalRowsOfStb then
- return -1
-endi
+stop_leader_to_offine_loop:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
diff --git a/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim b/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim
index 7ceeb2806b320014c2b35ea5c640063e44793063..06a67b3c1bfdf183f919fb7ac9c861055f566f42 100644
--- a/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim
+++ b/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim
@@ -104,7 +104,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
if $data[0][6] != NULL then
goto check_vg_ready
endi
@@ -114,7 +114,7 @@ if $data[0][4] == LEADER then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
goto vg_ready
endi
-if $data[0][6] == LEADER then
+if $data[0][6] == leader then
if $data[0][4] != NULL then
goto check_vg_ready
endi
@@ -124,7 +124,7 @@ if $data[0][6] == LEADER then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
goto vg_ready
endi
-if $data[0][8] == LEADER then
+if $data[0][8] == leader then
if $data[0][4] != NULL then
goto check_vg_ready
endi
@@ -208,11 +208,11 @@ switch_leader_loop:
print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*)
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
$dnodeId = $data[0][3]
-elif $data[0][6] == LEADER then
+elif $data[0][6] == leader then
$dnodeId = $data[0][5]
-elif $data[0][8] == LEADER then
+elif $data[0][8] == leader then
$dnodeId = $data[0][7]
else
print ====> no leader vnode!!!
@@ -238,7 +238,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
if $data[0][6] != NULL then
goto check_vg_ready_2
endi
@@ -248,7 +248,7 @@ if $data[0][4] == LEADER then
print ---- vgroup $data[0][0] leader switch to dnode $data[0][3]
goto vg_ready_2
endi
-if $data[0][6] == LEADER then
+if $data[0][6] == leader then
if $data[0][4] != NULL then
goto check_vg_ready_2
endi
@@ -258,7 +258,7 @@ if $data[0][6] == LEADER then
print ---- vgroup $data[0][0] leader switch to dnode $data[0][5]
goto vg_ready_2
endi
-if $data[0][8] == LEADER then
+if $data[0][8] == leader then
if $data[0][4] != NULL then
goto check_vg_ready_2
endi
@@ -343,7 +343,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
if $data[0][6] != NULL then
goto check_vg_ready_1
endi
@@ -352,7 +352,7 @@ if $data[0][4] == LEADER then
endi
goto vg_ready_1
endi
-if $data[0][6] == LEADER then
+if $data[0][6] == leader then
if $data[0][4] != NULL then
goto check_vg_ready_1
endi
@@ -361,7 +361,7 @@ if $data[0][6] == LEADER then
endi
goto vg_ready_1
endi
-if $data[0][8] == LEADER then
+if $data[0][8] == leader then
if $data[0][4] != NULL then
goto check_vg_ready_1
endi
diff --git a/tests/script/tsim/sync/oneReplica5VgElect.sim b/tests/script/tsim/sync/oneReplica5VgElect.sim
new file mode 100644
index 0000000000000000000000000000000000000000..5af48c7491208c8f3a440665fa7bb6919c373a46
--- /dev/null
+++ b/tests/script/tsim/sync/oneReplica5VgElect.sim
@@ -0,0 +1,417 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+$loop_cnt = 0
+check_dnode_ready:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+if $data[0][4] != ready then
+ goto check_dnode_ready
+endi
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+$loop_cnt = 0
+check_dnode_ready_1:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 10 then
+ print ====> dnodes not ready!
+ return -1
+endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_1
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_1
+endi
+
+$replica = 1
+$vgroups = 5
+
+print ============= create database
+sql create database db1 replica $replica vgroups $vgroups
+
+$loop_cnt = 0
+check_db_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 100 then
+ print ====> db1 not ready!
+ return -1
+endi
+sql show databases
+print ===> rows: $rows
+print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
+if $rows != 3 then
+ return -1
+endi
+if $data(db1)[19] != ready then
+ goto check_db_ready
+endi
+
+sql use db1
+
+$loop_cnt = 0
+check_vg_ready:
+$loop_cnt = $loop_cnt + 1
+sleep 200
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+elif $data[0][6] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+elif $data[0][8] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready
+endi
+
+if $data[1][4] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[0][3]
+elif $data[1][6] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[0][5]
+elif $data[1][8] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready
+endi
+
+if $data[2][4] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[0][3]
+elif $data[2][6] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[0][5]
+elif $data[2][8] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready
+endi
+
+if $data[3][4] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[0][3]
+elif $data[3][6] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[0][5]
+elif $data[3][8] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready
+endi
+
+if $data[4][4] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[0][3]
+elif $data[4][6] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[0][5]
+elif $data[4][8] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready
+endi
+
+vg_ready:
+print ====> create stable/child table
+sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+sql show stables
+if $rows != 1 then
+ return -1
+endi
+
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+$totalTblNum = $tbNum * 2
+sleep 1000
+sql show tables
+print ====> expect $totalTblNum and infinsert $rows in fact
+if $rows != $totalTblNum then
+ return -1
+endi
+
+start_switch_leader:
+
+$switch_loop_cnt = 0
+sql show vgroups
+$dnodeId = $data[0][3]
+$dnodeId = dnode . $dnodeId
+
+switch_leader_to_offine_loop:
+
+print $dnodeId
+print ====> stop $dnodeId
+system sh/exec.sh -n $dnodeId -s stop -x SIGINT
+
+
+$loop_cnt = 0
+check_vg_ready_2:
+$loop_cnt = $loop_cnt + 1
+sleep 201
+if $loop_cnt == 300 then
+ print ====> vgroups switch fail!!!
+ return -1
+endi
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13]
+if $rows != $vgroups then
+ return -1
+endi
+
+
+vg_offline_1:
+
+print ====> start $dnodeId
+system sh/exec.sh -n $dnodeId -s start
+
+$switch_loop_cnt = $switch_loop_cnt + 1
+print $switch_loop_cnt
+
+if $switch_loop_cnt == 1 then
+ sql show vgroups
+ $dnodeId = $data[1][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 2 then
+ sql show vgroups
+ $dnodeId = $data[2][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 3 then
+ sql show vgroups
+ $dnodeId = $data[3][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+elif $switch_loop_cnt == 4 then
+ sql show vgroups
+ $dnodeId = $data[4][3]
+ $dnodeId = dnode . $dnodeId
+ goto switch_leader_to_offine_loop
+else
+ goto stop_leader_to_offine_loop
+endi
+
+stop_leader_to_offine_loop:
+
+$loop_cnt = 0
+check_vg_ready1:
+$loop_cnt = $loop_cnt + 1
+print $loop_cnt
+sleep 202
+if $loop_cnt == 300 then
+ print ====> vgroups not ready!
+ return -1
+endi
+
+sql show vgroups
+print ===> rows: $rows
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13]
+print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13]
+print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13]
+if $rows != $vgroups then
+ return -1
+endi
+
+if $data[0][4] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
+elif $data[0][6] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
+elif $data[0][8] == leader then
+ print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
+else
+ goto check_vg_ready1
+endi
+
+if $data[1][4] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
+elif $data[1][6] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
+elif $data[1][8] == leader then
+ print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
+else
+ goto check_vg_ready1
+endi
+
+if $data[2][4] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
+elif $data[2][6] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
+elif $data[2][8] == leader then
+ print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
+else
+ goto check_vg_ready
+endi
+
+if $data[3][4] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
+elif $data[3][6] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
+elif $data[3][8] == leader then
+ print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
+else
+ goto check_vg_ready1
+endi
+
+if $data[4][4] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
+elif $data[4][6] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
+elif $data[4][8] == leader then
+ print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
+else
+ goto check_vg_ready1
+endi
+
+
+print ====> final test: create stable/child table
+sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
+
+
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+$ctbPrefix = ctb1
+$ntbPrefix = ntb1
+$tbNum = 10
+$i = 0
+while $i < $tbNum
+ $ctb = $ctbPrefix . $i
+ sql create table $ctb using stb1 tags( $i )
+ $ntb = $ntbPrefix . $i
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
+ $i = $i + 1
+endw
+
+
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+if $rows != 40 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+
+
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+
+
+$loop_cnt = 0
+check_dnode_ready_2:
+ $loop_cnt = $loop_cnt + 1
+ sleep 200
+ if $loop_cnt == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
+print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
+if $data[0][0] != 1 then
+ return -1
+endi
+
+if $data[0][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[1][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[2][4] != ready then
+ goto check_dnode_ready_2
+endi
+if $data[3][4] != ready then
+ goto check_dnode_ready_2
+endi
+
+sql use db1
+sql show stables
+if $rows != 2 then
+ return -1
+endi
+
+sql show tables
+if $rows != 40 then
+ return -1
+endi
+
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
diff --git a/tests/script/tsim/sync/threeReplica1VgElect.sim b/tests/script/tsim/sync/threeReplica1VgElect.sim
index 1496d7c778b475895479eb3661fae7ad86a121d3..c3e9c13793466ecdd57890d7e48a71f5b04ca190 100644
--- a/tests/script/tsim/sync/threeReplica1VgElect.sim
+++ b/tests/script/tsim/sync/threeReplica1VgElect.sim
@@ -104,7 +104,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
if $data[0][6] != FLLOWER then
goto check_vg_ready
endi
@@ -114,7 +114,7 @@ if $data[0][4] == LEADER then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
goto vg_ready
endi
-if $data[0][6] == LEADER then
+if $data[0][6] == leader then
if $data[0][4] != FLLOWER then
goto check_vg_ready
endi
@@ -124,7 +124,7 @@ if $data[0][6] == LEADER then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
goto vg_ready
endi
-if $data[0][8] == LEADER then
+if $data[0][8] == leader then
if $data[0][4] != FLLOWER then
goto check_vg_ready
endi
@@ -199,11 +199,11 @@ endi
print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*)
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
$dnodeId = $data[0][3]
-elif $data[0][6] == LEADER then
+elif $data[0][6] == leader then
$dnodeId = $data[0][5]
-elif $data[0][8] == LEADER then
+elif $data[0][8] == leader then
$dnodeId = $data[0][7]
else
print ====> no leader vnode!!!
@@ -216,11 +216,11 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
print ---- vgroup $data[0][0] leader switch to dnode $data[0][3]
-elif $data[0][6] == LEADER then
+elif $data[0][6] == leader then
print ---- vgroup $data[0][0] leader switch to dnode $data[0][5]
-elif $data[0][8] == LEADER then
+elif $data[0][8] == leader then
print ---- vgroup $data[0][0] leader switch to dnode $data[0][7]
else
print ====> no leader vnode!!!
@@ -264,7 +264,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
if $data[0][6] != FLLOWER then
goto check_vg_ready_1
endi
@@ -273,7 +273,7 @@ if $data[0][4] == LEADER then
endi
goto vg_ready_1
endi
-if $data[0][6] == LEADER then
+if $data[0][6] == leader then
if $data[0][4] != FLLOWER then
goto check_vg_ready_1
endi
@@ -282,7 +282,7 @@ if $data[0][6] == LEADER then
endi
goto vg_ready_1
endi
-if $data[0][8] == LEADER then
+if $data[0][8] == leader then
if $data[0][4] != FLLOWER then
goto check_vg_ready_1
endi
@@ -325,27 +325,27 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT
check_vg_ready_3:
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
- if $data[0][6] == LEADER then
+if $data[0][4] == leader then
+ if $data[0][6] == leader then
goto check_vg_ready_3
endi
- if $data[0][8] == LEADER then
+ if $data[0][8] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][5]
-elif $data[0][6] == LEADER then
- if $data[0][4] == LEADER then
+elif $data[0][6] == leader then
+ if $data[0][4] == leader then
goto check_vg_ready_3
endi
- if $data[0][8] == LEADER then
+ if $data[0][8] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][7]
-elif $data[0][8] == LEADER then
- if $data[0][4] == LEADER then
+elif $data[0][8] == leader then
+ if $data[0][4] == leader then
goto check_vg_ready_3
endi
- if $data[0][6] == LEADER then
+ if $data[0][6] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][9]
diff --git a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim
index f568008a820c880628af0128bb848297d63d5ffe..3c21dff1b65a9737d96f9e0d9ae1b8c173fa3250 100644
--- a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim
+++ b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim
@@ -103,29 +103,29 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != FOLLOWER then
+if $data[0][4] == leader then
+ if $data[0][6] != follower then
goto check_vg_ready
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready
endi
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
goto vg_ready
-elif $data[0][6] == LEADER then
- if $data[0][4] != FOLLOWER then
+elif $data[0][6] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready
endi
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
goto vg_ready
-elif $data[0][8] == LEADER then
- if $data[0][4] != FOLLOWER then
+elif $data[0][8] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready
endi
- if $data[0][6] != FOLLOWER then
+ if $data[0][6] != follower then
goto check_vg_ready
endi
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
@@ -155,28 +155,13 @@ while $i < $tbNum
sql create table $ctb using stb tags( $i )
$ntb = $ntbPrefix . $i
sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
-
-# $x = 0
-# while $x < $rowNum
-# $binary = ' . binary
-# $binary = $binary . $i
-# $binary = $binary . '
-#
-# sql insert into $ctb values ($tstart , $i , $x , $binary )
-# sql insert into $ntb values ($tstart , 999 , 999 , 'binary-ntb' )
-# $tstart = $tstart + 1
-# $x = $x + 1
-# endw
-
-# print ====> insert rows: $rowNum into $ctb and $ntb
-
$i = $i + 1
-# $tstart = 1640966400000
endw
$totalTblNum = $tbNum * 2
-print ====>totalTblNum:$totalTblNum
+sleep 1000
sql show tables
+print ====> expect $totalTblNum and infinsert $rows in fact
if $rows != $totalTblNum then
return -1
endi
@@ -208,11 +193,11 @@ switch_leader_loop:
print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*)
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
+if $data[0][4] == leader then
$dnodeId = $data[0][3]
-elif $data[0][6] == LEADER then
+elif $data[0][6] == leader then
$dnodeId = $data[0][5]
-elif $data[0][8] == LEADER then
+elif $data[0][8] == leader then
$dnodeId = $data[0][7]
else
print ====> no leader vnode!!!
@@ -222,6 +207,9 @@ endi
$dnodeId = dnode . $dnodeId
print ====> stop $dnodeId
system sh/exec.sh -n $dnodeId -s stop -x SIGINT
+sleep 1000
+print ====> start $dnodeId
+system sh/exec.sh -n $dnodeId -s start
$loop_cnt = 0
check_vg_ready_2:
@@ -238,32 +226,32 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != FOLLOWER then
+if $data[0][4] == leader then
+ if $data[0][6] != follower then
goto check_vg_ready_2
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready_2
endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][3]
+ print ---- vgroup $dnodeId leader switch to dnode $data[0][3]
goto vg_ready_2
-elif $data[0][6] == LEADER then
- if $data[0][4] != FOLLOWER then
+elif $data[0][6] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready_2
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready_2
endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][5]
+ print ---- vgroup $dnodeId leader switch to dnode $data[0][5]
goto vg_ready_2
-elif $data[0][8] == LEADER then
- if $data[0][4] != FOLLOWER then
+elif $data[0][8] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready_2
endi
- if $data[0][6] != FOLLOWER then
+ if $data[0][6] != follower then
goto check_vg_ready_2
endi
- print ---- vgroup $data[0][0] leader switch to dnode $data[0][7]
+ print ---- vgroup $dnodeId leader switch to dnode $data[0][7]
goto vg_ready_2
else
goto check_vg_ready_2
@@ -272,8 +260,6 @@ vg_ready_2:
$switch_loop_cnt = $switch_loop_cnt + 1
if $switch_loop_cnt < 3 then
- print ====> start $dnodeId
- system sh/exec.sh -n $dnodeId -s start
goto switch_leader_loop
endi
@@ -343,29 +329,29 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d
if $rows != $vgroups then
return -1
endi
-if $data[0][4] == LEADER then
- if $data[0][6] != FOLLOWER then
+if $data[0][4] == leader then
+ if $data[0][6] != follower then
goto check_vg_ready_1
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready_1
endi
goto vg_ready_1
endi
-if $data[0][6] == LEADER then
- if $data[0][4] != FOLLOWER then
+if $data[0][6] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready_1
endi
- if $data[0][8] != FOLLOWER then
+ if $data[0][8] != follower then
goto check_vg_ready_1
endi
goto vg_ready_1
endi
-if $data[0][8] == LEADER then
- if $data[0][4] != FOLLOWER then
+if $data[0][8] == leader then
+ if $data[0][4] != follower then
goto check_vg_ready_1
endi
- if $data[0][6] != FOLLOWER then
+ if $data[0][6] != follower then
goto check_vg_ready_1
endi
goto vg_ready_1
@@ -404,27 +390,27 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT
check_vg_ready_3:
sql show vgroups
print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13]
-if $data[0][4] == LEADER then
- if $data[0][6] == LEADER then
+if $data[0][4] == leader then
+ if $data[0][6] == leader then
goto check_vg_ready_3
endi
- if $data[0][8] == LEADER then
+ if $data[0][8] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][5]
-elif $data[0][6] == LEADER then
- if $data[0][4] == LEADER then
+elif $data[0][6] == leader then
+ if $data[0][4] == leader then
goto check_vg_ready_3
endi
- if $data[0][8] == LEADER then
+ if $data[0][8] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][7]
-elif $data[0][8] == LEADER then
- if $data[0][4] == LEADER then
+elif $data[0][8] == leader then
+ if $data[0][4] == leader then
goto check_vg_ready_3
endi
- if $data[0][6] == LEADER then
+ if $data[0][6] == leader then
goto check_vg_ready_3
endi
print ---- vgroup $data[0][0] leader locating dnode $data[0][9]
diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim
index e32abe4b7ff8850f9818113bed5f006c2182392e..0b1f0df04e9db6af2547cc1da49873082b2682b3 100644
--- a/tests/script/tsim/testsuit.sim
+++ b/tests/script/tsim/testsuit.sim
@@ -77,3 +77,4 @@ run sma/tsmaCreateInsertData.sim
run sma/rsmaCreateInsertQuery.sim
run valgrind/checkError.sim
run bnode/basic1.sim
+
diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim
index 0db5add88aeb6ea217cfe932ab3600398d3dd886..e13014f9c02370f65cf1e1700b84efdc4bcdcce2 100644
--- a/tests/script/tsim/trans/create_db.sim
+++ b/tests/script/tsim/trans/create_db.sim
@@ -26,7 +26,7 @@ if $data00 != 1 then
return -1
endi
-if $data02 != LEADER then
+if $data02 != leader then
return -1
endi
@@ -64,7 +64,7 @@ if $rows != 1 then
return -1
endi
-if $data[0][0] != 2 then
+if $data[0][0] != 7 then
return -1
endi
@@ -76,14 +76,6 @@ if $data[0][3] != d1 then
return -1
endi
-if $data[0][4] != create-db then
- return -1
-endi
-
-if $data[0][7] != @Unable to establish connection@ then
- return -1
-endi
-
sql_error create database d1 vgroups 2;
print =============== start dnode2
@@ -114,7 +106,7 @@ if $rows != 1 then
return -1
endi
-if $data[0][0] != 4 then
+if $data[0][0] != 9 then
return -1
endi
@@ -125,19 +117,11 @@ endi
if $data[0][3] != d2 then
return -1
endi
-
-if $data[0][4] != create-db then
- return -1
-endi
-
-if $data[0][7] != @Unable to establish connection@ then
- return -1
-endi
-
+return
sql_error create database d2 vgroups 2;
print =============== kill transaction
-sql kill transaction 4;
+sql kill transaction 9;
sleep 2000
sql show transactions
diff --git a/tests/script/tsim/trans/lossdata1.sim b/tests/script/tsim/trans/lossdata1.sim
new file mode 100644
index 0000000000000000000000000000000000000000..44785934e54e9fadbaa1b65bab7ef37808b18a69
--- /dev/null
+++ b/tests/script/tsim/trans/lossdata1.sim
@@ -0,0 +1,33 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======= backup sdbdata
+system sh/exec.sh -n dnode1 -s stop
+system cp ../../../../sim/dnode1/data/mnode/data/sdb.data ../../../../sim/dnode1/data/mnode/data/sdb.data.bak1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print =============== create user1
+sql create user user1 PASS 'user1'
+sql create user user2 PASS 'user2'
+sql show users
+if $rows != 3 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+
+print ======= restore backup data
+system cp ../../../../sim/dnode1/data/mnode/data/sdb.data.bak1 ../../../../sim/dnode1/data/mnode/data/sdb.data
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+sql show users
+if $rows != 3 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop
\ No newline at end of file
diff --git a/tests/script/tsim/valgrind/checkError.sim b/tests/script/tsim/valgrind/checkError.sim
index 97d16dba9663a77fdf96fe1741d045765a306d42..5790437a671e61dedb90b3384de08b145f2a4cac 100644
--- a/tests/script/tsim/valgrind/checkError.sim
+++ b/tests/script/tsim/valgrind/checkError.sim
@@ -71,7 +71,7 @@ print ====> start to check if there are ERRORS in vagrind log file for each dnod
# -n : dnode[x] be check
system_content sh/checkValgrind.sh -n dnode1
print cmd return result----> [ $system_content ]
-if $system_content <= 1 then
+if $system_content <= 3 then
return 0
endi
diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim
deleted file mode 100644
index e36de29e7c5835ddc78a9f3eab4b2b4d34634c42..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_create.sim
+++ /dev/null
@@ -1,80 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============================ dnode1 start
-
-$i = 0
-$dbPrefix = acdb
-$tbPrefix = actb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-$accountPrefix = acac
-
-print =============== step1-4
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-$i = 0
-$acc = $accountPrefix . $i
-sql_error create account $acc PASS pass123
-sql create account $acc PASS 'pass123'
-#sql create account $acc PASS 'pass123' -x step1
-# return -1
-#step1:
-sql create user $acc PASS 'pass123' -x step2
- return -1
-step2:
-
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step5-6
-sql drop account $acc
-sql drop account $acc -x step5
- return -1
-step5:
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step7
-sql create account $acc PASS 'pass123'
-#sql create account $acc PASS 'pass123' -x step7
-# return -1
-#step7:
-
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account $acc
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim
deleted file mode 100644
index d99a8b559dc6e04e4d6996e042d915671781d699..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_delete.sim
+++ /dev/null
@@ -1,99 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-sql create account oroot pass 'taosdata'
-sql close
-sql connect oroot
-sleep 2000
-
-print ============= step2
-sql create user read pass 'taosdata'
-sql create user write pass 'taosdata'
-
-sql create database d1
-sql create database d2
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d2.t2 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 1)
-sql insert into d2.t2 values(now, 1)
-sql insert into d2.t2 values(now+1s, 2)
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql show users
-if $rows != 4 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-print ============= step3
-sql close
-sql connect
-sleep 2000
-
-sql show databases
-if $rows != 0 then
- return -1
-endi
-sql show dnodes
-print $data00 $data01 $data02 $data03
-if $data02 != 2 then
- return -1
-endi
-sql drop account oroot
-
-print ============= step4
-$x = 0
-show4:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show dnodes
-if $data02 != 0 then
- goto show4
-endi
-
-print ============= step5
-sql create account oroot pass 'taosdata'
-
-sql close
-sql connect oroot
-sleep 2000
-
-sql show databases
-if $rows != 0 then
- return -1
-endi
-sql show users
-if $rows != 2 then
- return -1
-endi
-
-sql close
-sql connect
-sleep 2000
-sql drop account oroot
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim
deleted file mode 100644
index f8379bdf954bdde122e68585b973f4957ef15739..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_len.sim
+++ /dev/null
@@ -1,92 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = aldb
-$tbPrefix = altb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-
-print =============== step1
-sql drop account ac -x step0
- return -1
-step0:
-
-sql create account PASS 123 -x step1
- return -1
-step1:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step2
-sql drop account a -x step2
-step2:
-sql create account a PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account a
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step3
-sql drop account abc01234567890123456789 -x step3
-step3:
-sql create account abc01234567890123456789 PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account abc01234567890123456789
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step4
-sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4
- return -1
-step4:
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step5
-sql drop account 123 -x step5
-step5:
-sql create account 123 pass '123' -x step51
- return -1
-step51:
-
-sql create account a123 PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account a123
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim
deleted file mode 100644
index 8f2408de1429a8ea34add79e335f6bf7f42ca2b0..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/authority.sim
+++ /dev/null
@@ -1,346 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-
-sql create user read pass 'taosdata'
-sql create user write pass 'taosdata'
-sql create user manage pass 'taosdata'
-
-sql create user a PASS 'ade' privilege -x step11
- return -1
-step11:
-
-sql create user a PASS 'ade' privilege a -x step12
- return -1
-step12:
-
-sql create user a PASS 'ade' privilege read -x step13
- return -1
-step13:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-sql alter user read privilege read
-sql alter user write privilege write
-sql_error alter user manage privilege super
-
-print ============= step2
-sql close
-sql connect write
-sleep 2000
-
-sql create database d1
-sql create database d2
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d2.t2 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 1)
-sql insert into d2.t2 values(now, 1)
-sql insert into d2.t2 values(now+1s, 2)
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql create account t1 pass 'taosdata' -x step21
- return -1
-step21:
-
-sql create user t1 pass 'taosdata' -x step22
- return -1
-step22:
-
-sql alter user read pass 'taosdata' -x step23
- return -1
-step23:
-
-sql create dnode $hostname2 -x step24
- return -1
-step24:
-
-sql drop dnode $hostname2 -x step25
- return -1
-step25:
-
-sql create mnode 192.168.0.2 -x step26
- return -1
-step26:
-
-sql drop mnode 192.168.0.2 -x step27
- return -1
-step27:
-
-sql drop account root -x step28
- return -1
-step28:
-
-sql alter user write pass 'taosdata'
-
-print ============= step3
-sql close
-sql connect read
-sleep 2000
-
-sql create database d3 -x step31
- return -1
-step31:
-
-sql create table d1.t3 (ts timestamp, i int) -x step32
- return -1
-step32:
-
-#sql insert into d1.t1 values(now, 2) -x step33
-# return -1
-#step33:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step34
- return -1
-step34:
-
-sql sql create user t1 pass 'taosdata' -x step35
- return -1
-step35:
-
-print ============= step4
-sql close
-sql connect manage
-sleep 2000
-
-sql create database d3
-sql create database d4
-sql create table d3.t3 (ts timestamp, i int)
-sql create table d4.t4 (ts timestamp, i int)
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 4 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql create account other pass 'taosdata' -x step41
- return -1
-step41:
-
-sql close
-sql connect
-sleep 2000
-sql create account other pass 'taosdata'
-
-print ============= step5
-sql close
-sql connect other
-sleep 2000
-sql create user read pass 'taosdata' -x step51
- return -1
-step51:
-sql create other write pass 'taosdata' -x step52
- return -1
-step52:
-
-sql create user oread pass 'taosdata'
-sql create user owrite pass 'taosdata'
-sql create user omanage pass 'taosdata'
-
-sql show users
-print show users $rows
-if $rows != 5 then
- return -1
-endi
-
-sql alter user oread privilege read
-sql alter user owrite privilege write
-sql alter user oroot privilege super -x step53
- return -1
-step53:
-sql alter user read privilege read -x step54
- return -1
-step54:
-
-print ============= step6
-sql close
-sql connect owrite
-sleep 2000
-sql reset query cache
-sleep 1000
-sql create database d1
-sql create database d3
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d3.t3 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 11)
-sql insert into d3.t3 values(now, 11)
-sql insert into d3.t3 values(now+1s, 12)
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step6
- return -1
-step6:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step61
- return -1
-step61:
-
-sql sql create user t1 pass 'taosdata' -x step62
- return -1
-step62:
-
-print ============= step7
-sql close
-sql connect oread
-sleep 2000
-
-sql create database d7 -x step71
- return -1
-step71:
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step72
- return -1
-step72:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step73
- return -1
-step73:
-
-sql sql create user t1 pass 'taosdata' -x step74
- return -1
-step74:
-
-print ============= step8
-sql close
-sql connect omanage
-sleep 2000
-
-sql create account t1 pass 'taosdata' -x step81
- return -1
-step81:
-
-sql create database d4
-sql create table d4.t4 (ts timestamp, i int)
-
-sql show databases
-if $rows != 3 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step82
- return -1
-step82:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-print ============= step9
-sql close
-sql connect
-sleep 2000
-sql show databases
-if $rows != 4 then
- return -1
-endi
-
-sql drop account other
-sql drop user read
-sql drop user manage
-sql drop user write
-
-sql close
-sql connect
-sleep 2000
-sql drop database d1
-sql drop database d2
-sql drop database d3
-sql drop database d4
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim
deleted file mode 100644
index 00e706a4482d9fa57ed2f97a9995ce84d3667fa1..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/basic.sim
+++ /dev/null
@@ -1,46 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-print =============== show accounts
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print $data00 $data01 $data02
-
-print =============== create account1
-sql create account account1 PASS 'account1'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-
-print =============== create account2
-sql create account account2 PASS 'account2'
-sql show accounts
-if $rows != 3 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-print $data20 $data11 $data22
-
-print =============== drop account1
-sql drop account account1
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim
deleted file mode 100644
index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/paras.sim
+++ /dev/null
@@ -1,114 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-print =============== show accounts
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 3/128 then
- return -1
-endi
-if $data03 != 0/128 then
- return -1
-endi
-if $data04 != 0/2147483647 then
- return -1
-endi
-if $data05 != 0/1000 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== create account
-sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/3 then
- return -1
-endi
-if $data13 != 0/3 then
- return -1
-endi
-if $data14 != 0/80000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-print =============== alter account
-sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/5 then
- return -1
-endi
-if $data13 != 0/5 then
- return -1
-endi
-if $data14 != 0/8000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-print =============== alter account
-sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/6 then
- return -1
-endi
-if $data13 != 0/5 then
- return -1
-endi
-if $data14 != 0/8000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim
deleted file mode 100644
index 8b857b014a292d53536c5acf2a00daa15be11239..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/pass_alter.sim
+++ /dev/null
@@ -1,116 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-sql create user read pass 'taosdata1'
-sql create user write pass 'taosdata1'
-
-sql alter user read pass 'taosdata'
-sql alter user write pass 'taosdata'
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print ============= step2
-sql close
-sql connect read
-sleep 2000
-sql alter user read pass 'taosdata'
-sql alter user write pass 'taosdata1' -x step2
- return -1
-step2:
-
-
-print ============= step3
-sql close
-sql connect write
-sleep 2000
-sql alter user write pass 'taosdata'
-sql alter user read pass 'taosdata' -x step3
- return -1
-step3:
-
-print ============= step4
-sql close
-sleep 1000
-sql connect
-sleep 2000
-sql create account oroot pass 'taosdata'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print ============= step5
-sql close
-sql connect oroot
-sleep 2000
-
-sql create user oread pass 'taosdata1'
-sql create user owrite pass 'taosdata1'
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata'
-
-sql create user read pass 'taosdata1' -x step51
- return -1
-step51:
-sql alter user read pass 'taosdata1' -x step52
- return -1
-step52:
-
-sql show accounts -x step53
- return -1
-step53:
-sql show users
-print show users $rows
-if $rows != 4 then
- return -1
-endi
-
-print ============= step6
-sql close
-sql connect oread
-sleep 2000
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata1' -x step6
- return -1
-step6:
-
-
-print ============= step7
-sql close
-sql connect owrite
-sleep 2000
-sql alter user owrite pass 'taosdata'
-sql alter user oread pass 'taosdata' -x step7
- return -1
-step7:
-
-print ============= step8
-sql close
-sql connect
-sleep 2000
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata'
-sql alter user oroot pass 'taosdata'
-
-sql drop account oroot
-sql drop user read
-sql drop user write
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim
deleted file mode 100644
index f4ceb76f7b8b41873217bd11ae2c3d385386b0e9..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/pass_len.sim
+++ /dev/null
@@ -1,81 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = apdb
-$tbPrefix = aptb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-$userPrefix = apusr
-
-print =============== step1
-$i = 0
-$user = $userPrefix . $i
-
-sql drop user $user -x step11
- return -1
-step11:
-
-sql create user $user PASS -x step12
- return -1
-step12:
-
-sql create user $user PASS 'taosdata'
-
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-print =============== step2
-$i = 1
-$user = $userPrefix . $i
-sql drop user $user -x step2
-step2:
-sql create user $user PASS '1'
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print =============== step3
-$i = 2
-$user = $userPrefix . $i
-sql drop user $user -x step3
-step3:
-
-sql create user $user PASS 'abc0123456789'
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-print =============== step4
-$i = 3
-$user = $userPrefix . $i
-sql create user $user PASS 'abcd012345678901234567891234567890' -x step4
- return -1
-step4:
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-$i = 0
-while $i < 3
- $user = $userPrefix . $i
- sql drop user $user
- $i = $i + 1
-endw
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/testSuite.sim b/tests/script/unique/account/testSuite.sim
deleted file mode 100644
index 9d4141cfe0c086f9a8863fffb00a9cb0f410e265..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/testSuite.sim
+++ /dev/null
@@ -1,11 +0,0 @@
-run unique/account/account_create.sim
-run unique/account/account_delete.sim
-run unique/account/account_len.sim
-run unique/account/authority.sim
-run unique/account/basic.sim
-run unique/account/paras.sim
-run unique/account/pass_alter.sim
-run unique/account/pass_len.sim
-run unique/account/usage.sim
-run unique/account/user_create.sim
-run unique/account/user_len.sim
diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim
deleted file mode 100644
index 3b9c20b159a6237f469fc1e48b5b3a3f4ca5f7b8..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/usage.sim
+++ /dev/null
@@ -1,154 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-#system sh/exec.sh -n monitor -s 1
-system sh/exec.sh -n monitorInterval -s 1
-sleep 2000
-sql connect
-
-print =============== show accounts
-
-print =============== create account
-sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 3/5 then
- return -1
-endi
-if $data03 != 0/5 then
- return -1
-endi
-if $data04 != 0/8000 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== check usage account
-sql create database d1 wal 2
-sql create database d2 wal 2
-sql create database d3 wal 2
-sql create database d4 wal 2
-sql create database d5 wal 2
-
-sql create table d1.t1 (ts timestamp, i int);
-sql create user u1 pass "u1"
-
-sql show accounts
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/8000 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== step2
-sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/10 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/1.000 then
- return -1
-endi
-
-print =============== step3
-sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/10 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/0.000 then
- return -1
-endi
-
-print =============== step4
-sql insert into d1.t1 values(now + 1s, 1)
-sql insert into d1.t1 values(now + 2s, 2)
-
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 3s, 2)
-sql_error insert into d1.t1 values(now + 4s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5
-sleep 10000
-print has write auth
-sql insert into d1.t1 values(now + 5s, 1)
-sql insert into d1.t1 values(now + 6s, 2)
-
-# no write auth
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 7s, 2)
-sql_error insert into d1.t1 values(now + 8s, 2)
-
-print =============== step5
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
-sleep 10000
-
-sql insert into d1.t1 values(now + 11s, 1)
-sql insert into d1.t1 values(now + 12s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 13s, 2)
-sql_error insert into d1.t1 values(now + 14s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
-sleep 10000
-print has write auth
-sql insert into d1.t1 values(now + 15s, 1)
-sql insert into d1.t1 values(now + 16s, 2)
-
-print =============== check grant
-sql_error create database d6
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim
deleted file mode 100644
index e54a380f0dbef8107de452354ea01bc58262d548..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/user_create.sim
+++ /dev/null
@@ -1,84 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print =============== step1
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-sql create user read PASS 'pass123'
-sql create user read PASS 'pass123' -x step1
- return -1
-step1:
-
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql alter user read PASS 'taosdata'
-
-print =============== step2
-sql close
-sql connect read
-sleep 2000
-
-sql alter user read PASS 'taosdata'
-
-print =============== step3
-sql drop user read -x step31
- return -1
-step31:
-sql drop user _root -x step32
- return -1
-step32:
-sql drop user monitor -x step33
- return -1
-step33:
-
-print =============== step4
-sql close
-sql connect
-sleep 2000
-
-sql alter user read privilege read
-sql show users
-print $data1_read
-if $data1_read != readable then
- return -1
-endi
-
-sql_error alter user read privilege super
-sql show users
-print $data1_read
-if $data1_read != readable then
- return -1
-endi
-
-sql alter user read privilege write
-sql show users
-if $data1_read != writable then
- return -1
-endi
-
-sql alter user read privilege 1 -x step43
- return -1
-step43:
-
-sql drop user _root -x step41
- return -1
-step41:
-
-sql drop user monitor -x step42
- return -1
-step42:
-
-sql drop user read
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim
deleted file mode 100644
index b8d448f0ffc9e43cbc0f0a5a849bda215e72e790..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/user_len.sim
+++ /dev/null
@@ -1,94 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = lm_us_db
-$tbPrefix = lm_us_tb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-
-print =============== step1
-sql drop user ac -x step0
- return -1
-step0:
-
-sql create user PASS '123' -x step1
- return -1
-step1:
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step2
-sql drop user a -x step2
-step2:
-sleep 1000
-sql create user a PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user a
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step3
-sql drop user abc01234567890123456789 -x step3
-step3:
-
-sql create user abc01234567890123456789 PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user abc01234567890123456789
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step4
-sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4
- return -1
-step4:
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step5
-sql drop user 123 -x step5
-step5:
-sql create user 123 PASS '123' -x step61
- return -1
-step61:
-
-sql create user a123 PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user a123
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim
deleted file mode 100644
index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/admin.sim
+++ /dev/null
@@ -1,192 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c http -v 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10
-system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-sleep 2000
-
-print ============================ dnode1 start
-
-print =============== step0 - prepare data
-sql create database d1
-sql use d1
-
-sql create table table_admin (ts timestamp, i int)
-
-sql insert into table_admin values('2017-12-25 21:28:41.022', 1)
-sql insert into table_admin values('2017-12-25 21:28:42.022', 2)
-sql insert into table_admin values('2017-12-25 21:28:43.022', 3)
-sql insert into table_admin values('2017-12-25 21:28:44.022', 4)
-sql insert into table_admin values('2017-12-25 21:28:45.022', 5)
-sql insert into table_admin values('2017-12-25 21:28:46.022', 6)
-sql insert into table_admin values('2017-12-25 21:28:47.022', 7)
-sql insert into table_admin values('2017-12-25 21:28:48.022', 8)
-sql insert into table_admin values('2017-12-25 21:28:49.022', 9)
-sql insert into table_admin values('2017-12-25 21:28:50.022', 10)
-
-print =============== step1 - login
-
-system_content curl 127.0.0.1:7111/admin/
-print 1-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- print actual: $system_content
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/xx
-print 2-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login
-print 3-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root
-print 4-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root/123
-print 5-> $system_content
-if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3
-print 6-> $system_content
-if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
-print 7-> $system_content
-if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1
-print 8-> $system_content
-if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
- return -1
-endi
-
-sleep 2000
-system_content curl 127.0.0.1:7111/admin/login/root/taosdata
-print 9 -----> $system_content
-
-if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then
- return -1
-endi
-
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
-#print 10-> $system_content
-#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then
-# return -1
-#endi
-
-print =============== step2 - logout
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout
-print 10 -----> $system_content
-
-if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/logout
-print 11 -----> $system_content
-
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-print =============== step3 - info
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
-print curl 127.0.0.1:7111/admin/info -----> $system_content
-if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
- return -1
-endi
-
-print =============== step4 - meta
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta
-print curl 127.0.0.1:7111/admin/meta -----> $system_content
-#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then
-# return -1
-#endi
-
-print =============== step5 - query data
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/all -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/sql -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
- return -1
-endi
-
-print =============== step6 - insert data
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/sql -----> $system_content
-if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all
-print curl 127.0.0.1:7111/admin/all -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
- print actual: $system_content
- print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}
- return -1
-endi
-
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-#print curl 127.0.0.1:7111/admin/sql -----> $system_content
-#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then
-# return -1
-#endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
-print curl 127.0.0.1:7111/admin/info -----> $system_content
-if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
- return -1
-endi
-
-print =============== step7 - use dbs
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all
-print 23-> $system_content
-if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then
- return -1
-endi
-
-print =============== step8 - monitor dbs
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls
-#print 24-> $system_content
-#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then
-# return -1
-# endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
deleted file mode 100644
index 7d1e6b03d4547a6b0b2a6a7857000a8a6518a002..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/opentsdb.sim
+++ /dev/null
@@ -1,247 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c http -v 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============================ dnode1 start
-
-print =============== step1 - parse
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
-print $system_content
-if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db
-print $system_content
-if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
-print $system_content
-if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2
-print $system_content
-if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-
-if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then
- return -1
-endi
-
-#######
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then
- return -1
-endi
-
-sleep 2000
-
-print =============== step2 - insert single data
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-print $system_content
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then
- return -1
-endi
-
-print =============== step3 - multi-query data
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-
-print $system_content
-
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then
- return -1
-endi
-
-print =============== step4 - summary-put data
-system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false
-
-print $system_content
-
-if $system_content != @{"failed":0,"success":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then
- return -1
-endi
-
-print =============== step5 - prepare data
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
-print $system_content
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
diff --git a/tests/script/unique/http/testSuite.sim b/tests/script/unique/http/testSuite.sim
deleted file mode 100644
index 3a9753e744b84bfea28e40e8b3554cb82d2ebb40..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/testSuite.sim
+++ /dev/null
@@ -1,2 +0,0 @@
-run unique/http/admin.sim
-run general/http/opentsdb.sim
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim
deleted file mode 100644
index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt20.sim
+++ /dev/null
@@ -1,88 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-system sh/cfg.sh -n dnode1 -c monitor -v 1
-system sh/cfg.sh -n dnode2 -c monitor -v 1
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-system sh/exec.sh -n dnode2 -s start
-sql connect
-
-print ============== step2
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-
-print ============== step3
-system sh/exec.sh -n dnode2 -s start
-sleep 10000
-
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print =============== step4
-sql select * from log.dn1
-$d1_first = $rows
-sql select * from log.dn2
-$d2_first = $rows
-
-$x = 0
-show4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show4
-endi
-if $data2_2 != slave then
- goto show4
-endi
-
-sleep 2000
-sql select * from log.dn1
-$d1_second = $rows
-sql select * from log.dn2
-$d2_second = $rows
-
-print dnode1 $d1_first $d1_second
-print dnode2 $d2_first $d2_second
-if $d1_first >= $d1_second then
- return -1
-endi
-
-if $d2_first >= $d2_second then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim
deleted file mode 100644
index 8409383309dbde5500b9719cd64fd74ca5e384b2..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt21.sim
+++ /dev/null
@@ -1,44 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode2 -s start
-sleep 10000
-
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 5 then
- return -1
- endi
-
-sql show mnodes -x show2
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim
deleted file mode 100644
index 399805312ba905d55bceffe011cfe074c831684e..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt22.sim
+++ /dev/null
@@ -1,114 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-sql_error drop dnode $hostname1 -x error1
-print should not drop master
-
-print ============== step4
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
-sql_error show mnodes
-print error of no master
-
-print ============== step5
-sql_error drop dnode $hostname1
-print error of no master
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql close
-sql connect
-
-$x = 0
-show6:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes -x show6
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show6
-endi
-if $data2_2 != slave then
- goto show6
-endi
-
-print ============== step7
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-show7:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data2_3
-if $data2_1 != master then
- goto show7
-endi
-if $data2_2 != slave then
- goto show7
-endi
-if $data3_3 != null then
- goto show7
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim
deleted file mode 100644
index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt23.sim
+++ /dev/null
@@ -1,141 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 8000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != null then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-
-print ============== step5
-sleep 2000
-sql create dnode $hostname2
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode2 -s start
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != null then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s stop
-sql_error show mnodes
-
-print ============== step7
-sql_error drop dnode $hostname1
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim
deleted file mode 100644
index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt24.sim
+++ /dev/null
@@ -1,84 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode1 -s stop
-sleep 2000
-sql_error show mnodes
-
-print ============== step4
-sql_error drop dnode $hostname1
-
-print ============== step5
-system sh/exec.sh -n dnode1 -s start
-sql_error create dnode $hostname1
-
-sql close
-sql connect
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step5
-
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto step5
-endi
-if $data2_2 != slave then
- goto step5
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim
deleted file mode 100644
index 9cca9c844806b138faf52186ffc3184d4876a1d6..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt25.sim
+++ /dev/null
@@ -1,95 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim
deleted file mode 100644
index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt26.sim
+++ /dev/null
@@ -1,123 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-
-print ============== step4
-sql drop dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-print ============== step5
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-sleep 3000
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim
deleted file mode 100644
index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt30.sim
+++ /dev/null
@@ -1,68 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000
-system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-sleep 3000
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != slave then
- goto step2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim
deleted file mode 100644
index ce7cdce35d8c0463564f46d26a0711d39340c8bf..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt33.sim
+++ /dev/null
@@ -1,214 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != null then
- goto step2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-step3:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step3
-endi
-if $dnode2Role != slave then
- goto step3
-endi
-if $dnode3Role != slave then
- goto step3
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != null then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/exec.sh -n dnode2 -s start
-
-print ============== step5
-sql create dnode $hostname2
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_4
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != slave then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s stop
-$x = 0
-step6:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step6
-$dnode1Role = $data2_1
-$dnode2Role = $data2_4
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != offline then
- goto step6
-endi
-#if $dnode2Role != master then
-# return -1
-#endi
-#if $dnode3Role != slave then
-# return -1
-#endi
-
-print ============== step7
-sql drop dnode $hostname1
-$x = 0
-step7:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step7
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != null then
- goto step7
-endi
-#if $dnode2Role != master then
-# return -1
-#endi
-#if $dnode3Role != slave then
-# return -1
-#endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim
deleted file mode 100644
index d8a46b0955f59273279bbbc5c89c07c05db672d7..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt34.sim
+++ /dev/null
@@ -1,269 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != null then
- goto step2
-endi
-if $dnode4Role != null then
- goto step2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-step3:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step3
-endi
-if $dnode2Role != slave then
- goto step3
-endi
-if $dnode3Role != slave then
- goto step3
-endi
-if $dnode4Role != null then
- goto step3
-endi
-
-
-print ============== step4
-system sh/exec.sh -n dnode4 -s start
-sql create dnode $hostname4
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != slave then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-if $dnode4Role != null then
- goto step4
-endi
-
-print ============== step5
-sql drop dnode $hostname2
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != null then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-if $dnode4Role != slave then
- goto step5
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/exec.sh -n dnode2 -s start
-
-print ============== step6
-sql create dnode $hostname2
-$x = 0
-step6:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step6
-endi
-if $dnode2Role != null then
- goto step6
-endi
-if $dnode3Role != slave then
- goto step6
-endi
-if $dnode4Role != slave then
- goto step6
-endi
-
-print ============== step7
-system sh/exec.sh -n dnode1 -s stop
-$x = 0
-step7:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step7
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != offline then
- goto step7
-endi
-
-print ============== step8
-sql drop dnode $hostname1
-step8:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step8
-$dnode1Role = $data2_1
-$dnode2Role = $data2_5
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != null then
- goto step8
-endi
-if $dnode2Role != slave then
- goto step8
-endi
-#if $dnode3Role != master then
-# return -1
-#endi
-#if $dnode4Role != slave then
-# return -1
-#endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim
deleted file mode 100644
index 5afb41905846bff3ce9894e928245a7d34078354..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmtr2.sim
+++ /dev/null
@@ -1,87 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step2
-sql create dnode $hostname2
-sql create dnode $hostname3
-
-print ============== step3
-print ========= start dnode2 and dnode3
-
-system sh/exec.sh -n dnode2 -s start
-sleep 1000
-system sh/exec.sh -n dnode3 -s start
-
-sleep 8000
-system sh/exec.sh -n dnode2 -s stop
-system sh/exec.sh -n dnode3 -s stop
-sleep 4000
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-sleep 4000
-system sh/exec.sh -n dnode2 -s stop
-system sh/exec.sh -n dnode3 -s stop
-sleep 4000
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-
-print ============== step4
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != slave then
- goto step4
-endi
-if $dnode3Role != null then
- goto step4
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim
deleted file mode 100644
index b9adbe06a282548d56d7e7feb8a36f99198d8c0d..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/testSuite.sim
+++ /dev/null
@@ -1,9 +0,0 @@
-run unique/mnode/mgmt21.sim
-run unique/mnode/mgmt22.sim
-run unique/mnode/mgmt23.sim
-run unique/mnode/mgmt24.sim
-run unique/mnode/mgmt25.sim
-run unique/mnode/mgmt26.sim
-run unique/mnode/mgmt33.sim
-run unique/mnode/mgmt34.sim
-run unique/mnode/mgmtr2.sim
diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim
deleted file mode 100644
index ff48c2236709635c8d1a790104b0185144a96866..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_balance.sim
+++ /dev/null
@@ -1,312 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 3
-$rowNum = 200
-
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print ============== step1
-$db = $dbPrefix
-sql create database $db
-sql use $db
-
-$i = 0
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 3
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 6
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 9
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 12
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-
-print =============== step2
-
-sql show tables
-if $rows != 16 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-$i = 0
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r0 = $rows
-print $st ==> $r0 $data00 $data01 $data10 $data11
-
-$i = 3
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r3 = $rows
-print $st ==> $r3 $data00 $data01 $data10 $data11
-
-$i = 6
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r6 = $rows
-print $st ==> $r6 $data00 $data01 $data10 $data11
-
-$i = 9
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r9 = $rows
-print $st ==> $r9 $data00 $data01 $data10 $data11
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 0 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 2 then
- goto show2
-endi
-if $dnode2Vnodes != 2 then
- goto show2
-endi
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-print =============== step5
-print sleep 22 seconds
-sleep 22000
-
-print =============== step6
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 3
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 6
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 9
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-print =============== step7
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r0 then
- return -1
-endi
-
-$i = 3
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r3 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r3 then
- return -1
-endi
-
-
-$i = 6
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r6 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r6 then
- return -1
-endi
-
-$i = 9
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r9 then
- return -1
-endi
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/metrics_replica1_dnode2.sim b/tests/script/unique/stream/metrics_replica1_dnode2.sim
deleted file mode 100644
index 20c37cefc39f8fa6393d49934adb046f409fca25..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica1_dnode2.sim
+++ /dev/null
@@ -1,260 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = m1d_db
-$tbPrefix = m1d_tb
-$mtPrefix = m1d_mt
-$stPrefix = m1d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 32000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode2.sim b/tests/script/unique/stream/metrics_replica2_dnode2.sim
deleted file mode 100644
index aa8c1871017982cecc695abc8f64d732a8a7fc4e..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode2.sim
+++ /dev/null
@@ -1,260 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d_db
-$tbPrefix = m2d_tb
-$mtPrefix = m2d_mt
-$stPrefix = m2d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim
deleted file mode 100644
index be2fcefe66ed6ca2e24a44cd22fa072201137b89..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim
+++ /dev/null
@@ -1,261 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2dv_db
-$tbPrefix = m2dv_tb
-$mtPrefix = m2dv_mt
-$stPrefix = m2dv_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim
deleted file mode 100644
index f7b17610c380d9f90a2cefd4af86ea766facdffa..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode3.sim
+++ /dev/null
@@ -1,270 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d3_db
-$tbPrefix = m2d3_tb
-$mtPrefix = m2d3_mt
-$stPrefix = m2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim
deleted file mode 100644
index 402712800313ff5b96f970d12ffe007f77bc26f7..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica3_dnode4.sim
+++ /dev/null
@@ -1,280 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode4 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-sql create dnode $hostname4
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-system sh/exec.sh -n dnode4 -s start
-
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.4 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d3_db
-$tbPrefix = m2d3_tb
-$mtPrefix = m2d3_mt
-$stPrefix = m2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_vnode_stop.sim b/tests/script/unique/stream/metrics_vnode_stop.sim
deleted file mode 100644
index cd84cb3cdf5f8096f4986a222cc371db3900f765..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_vnode_stop.sim
+++ /dev/null
@@ -1,188 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3
-system sh/exec.sh -n dnode2 -s stop
-
-print =============== step4
-print sleep 22 seconds
-sleep 22000
-
-print =============== step5
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-print ============= step6
-
-sql close
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode2 -s start
-sleep 2000
-
-$x = 0
-connectTbase2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql connect -x connectTbase2
-sleep 2000
-
-sql create dnode $hostname1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step7
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step8 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step9
-system sh/exec.sh -n dnode1 -s stop
-
-print =============== step10
-print sleep 22 seconds
-sleep 22000
-
-print =============== step11
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-
-
diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim
deleted file mode 100644
index 45e054e2efdfbd7f3d01e3a860c5ac227f3327fc..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_balance.sim
+++ /dev/null
@@ -1,238 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-$dbPrefix = tb_db
-$tbPrefix = tb_tb
-$mtPrefix = tb_mt
-$stPrefix = tb_st
-$tbNum = 10
-$rowNum = 200
-$totalNum = 200
-
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-print ============== step1
-$i = 0
-$db = $dbPrefix
-$mt = $mtPrefix
-$st = $stPrefix . $i
-
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-sql show tables
-if $rows != 13 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r1 = $rows
-print $st ==> $r1 $data00 $data01 $data10 $data11
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r5 = $rows
-print $st ==> $r5 $data00 $data01 $data10 $data11
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r8 = $rows
-print $st ==> $r8 $data00 $data01 $data10 $data11
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 0 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 2 then
- goto show2
-endi
-if $dnode2Vnodes != 2 then
- goto show2
-endi
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-print =============== step5
-print sleep 22 seconds
-sleep 22000
-
-print =============== step6
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-print =============== step7
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r1 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r1 then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r5 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r5 then
- return -1
-endi
-
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r8 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r8 then
- return -1
-endi
-
-
-if $r1 != $r5 then
- return -1
-endi
-
-if $r8 != $r5 then
- return -1
-endi
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim
deleted file mode 100644
index 964a0c025363fd650e8051312a812fffbddaea7d..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_move.sim
+++ /dev/null
@@ -1,269 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode3 -c statusInterval -v 1
-system sh/cfg.sh -n dnode4 -c statusInterval -v 1
-
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
-
-system sh/cfg.sh -n dnode1 -c wallevel -v 1
-system sh/cfg.sh -n dnode2 -c wallevel -v 1
-system sh/cfg.sh -n dnode3 -c wallevel -v 1
-system sh/cfg.sh -n dnode4 -c wallevel -v 1
-
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0
-
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
-
-system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 1000
-system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 1000
-system sh/cfg.sh -n dnode1 -c maxShellConns -v 1000
-system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 1000
-
-system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 1000
-system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 1000
-system sh/cfg.sh -n dnode2 -c maxShellConns -v 1000
-system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 1000
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 5
-$rowNum = 20
-$totalNum = 200
-
-print ============== step1
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-sleep 2000
-
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -20
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now $ms , $x , $x )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03
-if $data01 != $rowNum then
- return -1
-endi
-if $data02 != $rowNum then
- return -1
-endi
-if $data03 != $rowNum then
- return -1
-endi
-
-sql show tables
-if $rows != 5 then
- return -1
-endi
-
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-sql show tables
-if $rows != 6 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-sql select * from $tb
-if $rows != 20 then
- return -1
-endi
-
-sql select * from $mt
-if $rows != 100 then
- return -1
-endi
-
-sql select * from $st
-print select * from $st => $data01
-if $rows == 0 then
- return -1
-endi
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 6 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 7 then
- goto show2
-endi
-if $dnode2Vnodes != 7 then
- goto show2
-endi
-
-print =============== step5 drop dnode1
-system sh/exec.sh -n dnode1 -s stop
-print stop dnode1 and sleep 10000
-sleep 10000
-
-sql drop dnode $hostname1
-print drop dnode1 and sleep 9000
-sleep 9000
-
-$x = 0
-show6:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show6
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != NULL then
- goto show6
-endi
-if $dnode2Vnodes != 6 then
- goto show6
-endi
-
-print =============== step6
-
-print select * from $tb
-sql select * from $tb
-if $rows != 20 then
- return -1
-endi
-
-print select * from $mt
-sql select * from $mt
-if $rows != 80 then
- return -1
-endi
-
-
-print =============== step7
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-
-print =============== step8
-print sleep 22 seconds
-sleep 22000
-
-print select * from $st
-sql select * from $st
-if $rows == 0 then
- return -1
-endi
-
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/table_replica1_dnode2.sim b/tests/script/unique/stream/table_replica1_dnode2.sim
deleted file mode 100644
index ccc6026e9c92975ccdd4fd12366a11f50a818d3f..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica1_dnode2.sim
+++ /dev/null
@@ -1,137 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t1d_db
-$tbPrefix = t1d_tb
-$mtPrefix = t1d_mt
-$stPrefix = t1d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-sql show tables
-if $rows != 13 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-
-print =============== step4
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $rows $data00 $data01 $data10 $data11
-$rows1 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st => $rows $data00 $data01 $data10 $data11
-$rows5 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $rows $data00 $data01 $data10 $data11
-$rows8 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-if $rows8 != $rows5 then
- return -1
-endi
-
-if $rows8 != $rows1 then
- return -1
-endi
\ No newline at end of file
diff --git a/tests/script/unique/stream/table_replica2_dnode2.sim b/tests/script/unique/stream/table_replica2_dnode2.sim
deleted file mode 100644
index 947fa0d2f9093c802a9c99c74edddeffca102d38..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode2.sim
+++ /dev/null
@@ -1,312 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t2d_db
-$tbPrefix = t2d_tb
-$mtPrefix = t2d_mt
-$stPrefix = t2d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop database $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim
deleted file mode 100644
index 75300362393eaa543740307d4d11f9a4eabbbc50..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim
+++ /dev/null
@@ -1,314 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t2dv_db
-$tbPrefix = t2dv_tb
-$mtPrefix = t2dv_mt
-$stPrefix = t2dv_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim
deleted file mode 100644
index 49eb3563b3964f05f31d72a8fd1ff12f2b5b3a03..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode3.sim
+++ /dev/null
@@ -1,325 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = t2d3_db
-$tbPrefix = t2d3_tb
-$mtPrefix = t2d3_mt
-$stPrefix = t2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim
deleted file mode 100644
index 2cc443c72fc656b87ca8c1d330381ed5078cd755..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica3_dnode4.sim
+++ /dev/null
@@ -1,333 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode4 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-sql create dnode $hostname4
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-system sh/exec.sh -n dnode4 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.4 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = t3d_db
-$tbPrefix = t3d_tb
-$mtPrefix = t3d_mt
-$stPrefix = t3d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 3
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_vnode_stop.sim b/tests/script/unique/stream/table_vnode_stop.sim
deleted file mode 100644
index 625de32a8d7a1e5336dd10f313565bdbc0daf0fc..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_vnode_stop.sim
+++ /dev/null
@@ -1,189 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3
-system sh/exec.sh -n dnode2 -s stop
-
-print =============== step4
-print sleep 22 seconds
-sleep 22000
-
-print =============== step5
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-print ============= step6
-
-sql close
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-sleep 2000
-system sh/exec.sh -n dnode2 -s start
-
-$x = 0
-connectTbase2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql connect -x connectTbase2
-sleep 2000
-
-sql create dnode $hostname1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step7
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step8 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step9
-system sh/exec.sh -n dnode1 -s stop
-
-print =============== step10
-print sleep 22 seconds
-sleep 22000
-
-print =============== step11
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-
-
diff --git a/tests/script/unique/stream/testSuite.sim b/tests/script/unique/stream/testSuite.sim
deleted file mode 100644
index bbf5da3d376d9eccc02aa61b1122cadb5fc04813..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/testSuite.sim
+++ /dev/null
@@ -1,15 +0,0 @@
-#run unique/stream/table_replica1_dnode2.sim
-#run unique/stream/metrics_replica1_dnode2.sim
-#run unique/stream/table_replica2_dnode2.sim
-#run unique/stream/metrics_replica2_dnode2.sim
-#run unique/stream/table_replica2_dnode2_vnoden.sim
-#run unique/stream/metrics_replica2_dnode2_vnoden.sim
-#run unique/stream/table_replica2_dnode3.sim
-#run unique/stream/metrics_replica2_dnode3.sim
-#run unique/stream/table_replica3_dnode4.sim
-#run unique/stream/metrics_replica3_dnode4.sim
-#run unique/stream/table_vnode_stop.sim
-#run unique/stream/metrics_vnode_stop.sim
-##run unique/stream/table_balance.sim
-##run unique/stream/metrics_balance.sim
-##run unique/stream/table_move.sim
\ No newline at end of file
diff --git a/tests/system-test/0-others/cachelast.py b/tests/system-test/0-others/cachelast.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e912eda9a73627962f98891a56da2c7fd3ab7ef
--- /dev/null
+++ b/tests/system-test/0-others/cachelast.py
@@ -0,0 +1,148 @@
+import taos
+import sys ,os ,json
+import datetime
+import inspect
+import subprocess
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def illegal_params(self):
+
+ illegal_params = ["1","0","NULL","None","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"]
+
+ for value in illegal_params:
+
+ tdSql.error("create database testdb replica 1 cachelast '%s' " %value)
+
+ unexpected_numbers = [-1 , 0.0 , 3.0 , 4, 10 , 100]
+
+ for number in unexpected_numbers:
+ tdSql.error("create database testdb replica 1 cachelast %s " %number)
+
+
+ def prepare_datas(self):
+ for i in range(4):
+ tdSql.execute("create database test_db_%d replica 1 cachelast %d " %(i,i))
+ tdSql.execute("use test_db_%d"%i)
+ tdSql.execute("create stable st(ts timestamp , c1 int ,c2 float ) tags(ind int) ")
+ tdSql.execute("create table tb1 using st tags(1) ")
+ tdSql.execute("create table tb2 using st tags(2) ")
+
+ for k in range(10):
+ tdSql.execute(" insert into tb1 values(now , %d, %f)" %(k,k*10) )
+ tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
+
+ def check_cache_last_sets(self):
+
+
+ # check cache_last value for database
+
+ tdSql.query(" show databases ")
+ databases_infos = tdSql.queryResult
+ cache_lasts = {}
+ for db_info in databases_infos:
+ dbname = db_info[0]
+ # print(dbname)
+ cache_last_value = db_info[16]
+ # print(cache_last_value)
+ if dbname in ["information_schema" , "performance_schema"]:
+ continue
+ cache_lasts[dbname]=cache_last_value
+
+
+ # cache_last_set value
+ for k , v in cache_lasts.items():
+
+ if k.split("_")[-1]==str(v):
+ tdLog.info(" database %s cache_last value check pass, value is %d "%(k,v) )
+ else:
+ tdLog.exit(" database %s cache_last value check fail, value is %d "%(k,v) )
+
+ # # check storage layer implementation
+
+
+ # buildPath = self.getBuildPath()
+ # if (buildPath == ""):
+ # tdLog.exit("taosd not found!")
+ # else:
+ # tdLog.info("taosd found in %s" % buildPath)
+ # dataPath = buildPath + "/../sim/dnode1/data"
+ # abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
+ # tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
+
+ # tdSql.query(" show dnodes ")
+ # dnode_id = tdSql.queryResult[0][0]
+
+ # for dbname in cache_lasts.keys():
+ # print(dbname)
+ # tdSql.execute(" use %s" % dbname)
+ # tdSql.query(" show vgroups ")
+ # vgroups_infos = tdSql.queryResult
+ # for vgroup_info in vgroups_infos:
+ # vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
+ # vnode_info_of_db = f"cat {vnode_json}"
+ # vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
+ # infoDict = json.loads(vnode_info)
+ # vnode_json_of_dbname = f"{dnode_id}."+ dbname
+ # config = infoDict["config"]
+ # if infoDict["config"]["dbname"] == vnode_json_of_dbname:
+ # if "cachelast" in infoDict["config"]:
+ # if int(infoDict["config"]["cachelast"]) != cache_lasts[dbname]:
+ # tdLog.exit("cachelast value is error in vnode.json of vnode%d "%(vgroup_info[0]))
+ # else:
+ # tdLog.exit("cachelast not found in vnode.json of vnode%d "%(vgroup_info[0]))
+
+ def restart_check_cache_last_sets(self):
+
+ for i in range(3):
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+ time.sleep(3)
+ self.check_cache_last_sets()
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+
+
+ self.illegal_params()
+ self.prepare_datas()
+ self.check_cache_last_sets()
+ self.restart_check_cache_last_sets()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py
index f6dfe3f75c795ab8bd8eefc7b9d043d75854dc2e..046db93c4927d0aa39fbd2da4ac60cf12a6537c6 100644
--- a/tests/system-test/0-others/taosShell.py
+++ b/tests/system-test/0-others/taosShell.py
@@ -3,8 +3,12 @@ import taos
import sys
import time
import socket
-import pexpect
import os
+import platform
+if platform.system().lower() == 'windows':
+ import wexpect as taosExpect
+else:
+ import pexpect as taosExpect
from util.log import *
from util.sql import *
@@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
if len(key) == 0:
tdLog.exit("taos test key is null!")
- taosCmd = buildPath + '/build/bin/taos '
+ if platform.system().lower() == 'windows':
+ taosCmd = buildPath + '\\build\\bin\\taos.exe '
+ taosCmd = taosCmd.replace('\\','\\\\')
+ else:
+ taosCmd = buildPath + '/build/bin/taos '
if len(cfgDir) != 0:
taosCmd = taosCmd + '-c ' + cfgDir
@@ -36,25 +44,30 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
tdLog.info ("taos cmd: %s" % taosCmd)
- child = pexpect.spawn(taosCmd, timeout=3)
+ child = taosExpect.spawn(taosCmd, timeout=3)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
- i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
else:
- i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
- retResult = child.before.decode()
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
print(retResult)
#print(child.after.decode())
if i == 0:
print ('taos login success! Here can run sql, taos> ')
if len(sqlString) != 0:
child.sendline (sqlString)
- w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1)
+ w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1)
if w == 0:
return "TAOS_OK"
else:
+ print(1)
+ print(retResult)
return "TAOS_FAIL"
else:
if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V':
@@ -71,6 +84,12 @@ class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
# 'serverPort': 7080, 'firstEp': 'trd02:7080'}
hostname = socket.gethostname()
+ if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""):
+ try:
+ config = eval(tdDnodes.dnodes[0].remoteIP)
+ hostname = config["host"]
+ except Exception:
+ hostname = tdDnodes.dnodes[0].remoteIP
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
@@ -102,7 +121,7 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
@@ -275,11 +294,15 @@ class TDTestCase:
pwd=os.getcwd()
newDbName="dbf"
sqlFile = pwd + "/0-others/sql.txt"
- sql1 = "echo 'create database " + newDbName + "' > " + sqlFile
- sql2 = "echo 'use " + newDbName + "' >> " + sqlFile
- sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile
- sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
- sql5 = "echo 'show databases' >> " + sqlFile
+ sql1 = "echo create database " + newDbName + " > " + sqlFile
+ sql2 = "echo use " + newDbName + " >> " + sqlFile
+ if platform.system().lower() == 'windows':
+ sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile
+ sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
+ else:
+ sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile
+ sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
+ sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py
index 5f2f79982a58fe33e361f7c05926fc7c276f84d7..2369e4d580e491f52e8508c21934085f6ecf89a6 100644
--- a/tests/system-test/0-others/taosShellError.py
+++ b/tests/system-test/0-others/taosShellError.py
@@ -3,7 +3,11 @@ import taos
import sys
import time
import socket
-import pexpect
+import platform
+if platform.system().lower() == 'windows':
+ import wexpect as taosExpect
+else:
+ import pexpect as taosExpect
import os
from util.log import *
@@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
if len(key) == 0:
tdLog.exit("taos test key is null!")
- taosCmd = buildPath + '/build/bin/taos '
+ if platform.system().lower() == 'windows':
+ taosCmd = buildPath + '\\build\\bin\\taos.exe '
+ taosCmd = taosCmd.replace('\\','\\\\')
+ else:
+ taosCmd = buildPath + '/build/bin/taos '
if len(cfgDir) != 0:
taosCmd = taosCmd + '-c ' + cfgDir
@@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
tdLog.info ("taos cmd: %s" % taosCmd)
- child = pexpect.spawn(taosCmd, timeout=3)
+ child = taosExpect.spawn(taosCmd, timeout=3)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
- i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
else:
- i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
- retResult = child.before.decode()
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
print("cmd return result:\n%s\n"%retResult)
#print(child.after.decode())
if i == 0:
print ('taos login success! Here can run sql, taos> ')
if len(sqlString) != 0:
child.sendline (sqlString)
- w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1)
- retResult = child.before.decode()
+ w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1)
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
if w == 0:
return "TAOS_OK", retResult
else:
@@ -72,6 +86,12 @@ class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
# 'serverPort': 7080, 'firstEp': 'trd02:7080'}
hostname = socket.gethostname()
+ if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""):
+ try:
+ config = eval(tdDnodes.dnodes[0].remoteIP)
+ hostname = config["host"]
+ except Exception:
+ hostname = tdDnodes.dnodes[0].remoteIP
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
@@ -103,7 +123,7 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
@@ -216,11 +236,15 @@ class TDTestCase:
pwd=os.getcwd()
newDbName="dbf"
sqlFile = pwd + "/0-others/sql.txt"
- sql1 = "echo 'create database " + newDbName + "' > " + sqlFile
- sql2 = "echo 'use " + newDbName + "' >> " + sqlFile
- sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
- sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
- sql5 = "echo 'show databases' >> " + sqlFile
+ sql1 = "echo create database " + newDbName + " > " + sqlFile
+ sql2 = "echo use " + newDbName + " >> " + sqlFile
+ if platform.system().lower() == 'windows':
+ sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile
+ sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
+ else:
+ sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
+ sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
+ sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py
index bbaeacf328fd5422ccd018a79ce6d9c632a370a9..3c99ddb8d697da58b7af8abd1eac1fc703bb06cf 100644
--- a/tests/system-test/0-others/taosShellNetChk.py
+++ b/tests/system-test/0-others/taosShellNetChk.py
@@ -3,7 +3,11 @@ import taos
import sys
import time
import socket
-import pexpect
+import platform
+if platform.system().lower() == 'windows':
+ import wexpect as taosExpect
+else:
+ import pexpect as taosExpect
import os
from util.log import *
@@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
if len(key) == 0:
tdLog.exit("taos test key is null!")
- taosCmd = buildPath + '/build/bin/taos '
+ if platform.system().lower() == 'windows':
+ taosCmd = buildPath + '\\build\\bin\\taos.exe '
+ taosCmd = taosCmd.replace('\\','\\\\')
+ else:
+ taosCmd = buildPath + '/build/bin/taos '
if len(cfgDir) != 0:
taosCmd = taosCmd + '-c ' + cfgDir
@@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
tdLog.info ("taos cmd: %s" % taosCmd)
- child = pexpect.spawn(taosCmd, timeout=3)
+ child = taosExpect.spawn(taosCmd, timeout=3)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
- i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
else:
- i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
- retResult = child.before.decode()
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
print("expect() return code: %d, content:\n %s\n"%(i, retResult))
#print(child.after.decode())
if i == 0:
print ('taos login success! Here can run sql, taos> ')
if len(sqlString) != 0:
child.sendline (sqlString)
- w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1)
- retResult = child.before.decode()
+ w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1)
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
if w == 0:
return "TAOS_OK", retResult
else:
@@ -72,6 +86,12 @@ class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
# 'serverPort': 7080, 'firstEp': 'trd02:7080'}
hostname = socket.gethostname()
+ if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""):
+ try:
+ config = eval(tdDnodes.dnodes[0].remoteIP )
+ hostname = config["host"]
+ except Exception:
+ hostname = tdDnodes.dnodes[0].remoteIP
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
@@ -103,7 +123,7 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
@@ -168,21 +188,33 @@ class TDTestCase:
tdDnodes.stop(1)
role = 'server'
- taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c']
- taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &'
+ if platform.system().lower() == 'windows':
+ taosCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c']
+ taosCmd = taosCmd.replace('\\','\\\\')
+ taosCmd = taosCmd + ' -n ' + role
+ else:
+ taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c']
+ taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &'
print (taosCmd)
os.system(taosCmd)
pktLen = '2000'
pktNum = '10'
role = 'client'
- taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c']
+ if platform.system().lower() == 'windows':
+ taosCmd = buildPath + '\\build\\bin\\taos.exe -h 127.0.0.1 -c ' + keyDict['c']
+ taosCmd = taosCmd.replace('\\','\\\\')
+ else:
+ taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c']
taosCmd = taosCmd + ' -n ' + role + ' -l ' + pktLen + ' -N ' + pktNum
print (taosCmd)
- child = pexpect.spawn(taosCmd, timeout=3)
- i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
+ child = taosExpect.spawn(taosCmd, timeout=3)
+ i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
- retResult = child.before.decode()
+ if platform.system().lower() == 'windows':
+ retResult = child.before
+ else:
+ retResult = child.before.decode()
print("expect() return code: %d, content:\n %s\n"%(i, retResult))
#print(child.after.decode())
if i == 0:
@@ -195,7 +227,10 @@ class TDTestCase:
else:
tdLog.exit('taos -n client fail!')
- os.system('pkill taos')
+ if platform.system().lower() == 'windows':
+ os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9')
+ else:
+ os.system('pkill taos')
def stop(self):
tdSql.close()
diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py
index a3d3b052047faa12618a0b68846518269c9de3f5..a219c54e5925075a2c687c9cd134ada03c09e57e 100644
--- a/tests/system-test/0-others/taosdMonitor.py
+++ b/tests/system-test/0-others/taosdMonitor.py
@@ -2,7 +2,7 @@ import taos
import sys
import time
import socket
-import pexpect
+# import pexpect
import os
import http.server
import gzip
@@ -75,7 +75,7 @@ def telemetryInfoCheck(infoDict=''):
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
tdLog.exit("dnodes info is null!")
- mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" }
+ mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "leader" }
for k ,v in mnodes_info.items():
if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] :
diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py
index 3ab39f9e7bb14b40f7caaa2b6f3bff43869c1e21..203f87c085fe91a9a75cc4176065a893fc29cf1e 100644
--- a/tests/system-test/0-others/telemetry.py
+++ b/tests/system-test/0-others/telemetry.py
@@ -2,7 +2,7 @@ import taos
import sys
import time
import socket
-import pexpect
+# import pexpect
import os
import http.server
import gzip
diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py
index 679b41509891d1efe92507a81f7add51b9f76253..46d0a6968875a5e6c484c932abb41946f56bc8ee 100644
--- a/tests/system-test/0-others/udfTest.py
+++ b/tests/system-test/0-others/udfTest.py
@@ -134,7 +134,7 @@ class TDTestCase:
def create_udf_function(self):
- for i in range(10):
+ for i in range(5):
# create scalar functions
tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
@@ -644,16 +644,12 @@ class TDTestCase:
self.create_udf_function()
self.basic_udf_query()
self.loop_kill_udfd()
-
- self.unexpected_create()
tdSql.execute(" drop function udf1 ")
tdSql.execute(" drop function udf2 ")
self.create_udf_function()
time.sleep(2)
self.basic_udf_query()
self.test_function_name()
- self.restart_taosd_query_udf()
-
def stop(self):
diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2c6e3c10bd1520c58c4400fd58c741d2904a420
--- /dev/null
+++ b/tests/system-test/0-others/udf_create.py
@@ -0,0 +1,654 @@
+from distutils.log import error
+import taos
+import sys
+import time
+import os
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+import subprocess
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def prepare_udf_so(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+ print(projPath)
+
+ libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ os.system("mkdir /tmp/udf/")
+ os.system("cp %s /tmp/udf/ "%libudf1.replace("\n" ,""))
+ os.system("cp %s /tmp/udf/ "%libudf2.replace("\n" ,""))
+
+
+ def prepare_data(self):
+
+ tdSql.execute("drop database if exists db ")
+ tdSql.execute("create database if not exists db days 300")
+ tdSql.execute("use db")
+ tdSql.execute(
+ '''create table stb1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ tags (t1 int)
+ '''
+ )
+
+ tdSql.execute(
+ '''
+ create table t1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ '''
+ )
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ for i in range(9):
+ tdSql.execute(
+ f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+
+ tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+
+ tdSql.execute(
+ f'''insert into t1 values
+ ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ '''
+ )
+
+ tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
+ tdSql.execute(
+ f'''insert into tb values
+ ( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
+ ( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
+ ( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
+ ( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ '''
+ )
+
+ # udf functions with join
+ ts_start = 1652517451000
+ tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
+ tdSql.execute("create table sub1 using st tags(1)")
+ tdSql.execute("create table sub2 using st tags(2)")
+
+ for i in range(10):
+ ts = ts_start + i *1000
+ tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+ tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+
+
+ def create_udf_function(self):
+
+ for i in range(5):
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ # drop functions
+
+ tdSql.execute("drop function udf1")
+ tdSql.execute("drop function udf2")
+
+ functions = tdSql.getResult("show functions")
+ for function in functions:
+ if "udf1" in function[0] or "udf2" in function[0]:
+ tdLog.info("drop udf functions failed ")
+ tdLog.exit("drop udf functions failed")
+
+ tdLog.info("drop two udf functions success ")
+
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ def basic_udf_query(self):
+
+ # scalar functions
+
+ tdSql.execute("use db ")
+ tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,1)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(0,4,1.000000000)
+ tdSql.checkData(0,5,88)
+ tdSql.checkData(0,6,"binary1")
+ tdSql.checkData(0,7,88)
+
+ tdSql.checkData(3,0,3)
+ tdSql.checkData(3,1,88)
+ tdSql.checkData(3,2,33333)
+ tdSql.checkData(3,3,88)
+ tdSql.checkData(3,4,33.000000000)
+ tdSql.checkData(3,5,88)
+ tdSql.checkData(3,6,"binary1")
+ tdSql.checkData(3,7,88)
+
+ tdSql.checkData(11,0,None)
+ tdSql.checkData(11,1,None)
+ tdSql.checkData(11,2,None)
+ tdSql.checkData(11,3,None)
+ tdSql.checkData(11,4,None)
+ tdSql.checkData(11,5,None)
+ tdSql.checkData(11,6,"binary1")
+ tdSql.checkData(11,7,88)
+
+ tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,None)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(0,4,None)
+ tdSql.checkData(0,5,None)
+ tdSql.checkData(0,6,None)
+ tdSql.checkData(0,7,None)
+
+ tdSql.checkData(20,0,8)
+ tdSql.checkData(20,1,88)
+ tdSql.checkData(20,2,88888)
+ tdSql.checkData(20,3,88)
+ tdSql.checkData(20,4,888)
+ tdSql.checkData(20,5,88)
+ tdSql.checkData(20,6,88)
+ tdSql.checkData(20,7,88)
+
+
+ # aggregate functions
+ tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
+ tdSql.checkData(0,0,15.362291496)
+ tdSql.checkData(0,1,10000949.553189287)
+ tdSql.checkData(0,2,168.633425216)
+
+ # Arithmetic compute
+ tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
+ tdSql.checkData(0,0,115.362291496)
+ tdSql.checkData(0,1,10000849.553189287)
+ tdSql.checkData(0,2,16863.342521576)
+ tdSql.checkData(0,3,1.686334252)
+
+ tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
+ tdSql.checkData(0,0,25.514701644)
+ tdSql.checkData(0,1,265.247614504)
+
+ tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
+ tdSql.checkData(0,0,125.514701644)
+ tdSql.checkData(0,1,165.247614504)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+ # # bug for crash when query sub table
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
+ tdSql.checkData(0,0,378.215547010)
+ tdSql.checkData(0,1,353.808067460)
+ tdSql.checkData(0,2,2114.237451187)
+ tdSql.checkData(0,3,2.125468151)
+
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
+ tdSql.checkData(0,0,490.358032462)
+ tdSql.checkData(0,1,400.460106627)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+
+ # regular table with aggregate functions
+
+ tdSql.error("select udf1(num1) , count(num1) from tb;")
+ tdSql.error("select udf1(num1) , avg(num1) from tb;")
+ tdSql.error("select udf1(num1) , twa(num1) from tb;")
+ tdSql.error("select udf1(num1) , irate(num1) from tb;")
+ tdSql.error("select udf1(num1) , sum(num1) from tb;")
+ tdSql.error("select udf1(num1) , stddev(num1) from tb;")
+ tdSql.error("select udf1(num1) , mode(num1) from tb;")
+ tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
+ # stable
+ tdSql.error("select udf1(c1) , count(c1) from stb1;")
+ tdSql.error("select udf1(c1) , avg(c1) from stb1;")
+ tdSql.error("select udf1(c1) , twa(c1) from stb1;")
+ tdSql.error("select udf1(c1) , irate(c1) from stb1;")
+ tdSql.error("select udf1(c1) , sum(c1) from stb1;")
+ tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
+ tdSql.error("select udf1(c1) , mode(c1) from stb1;")
+ tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
+
+ # regular table with select functions
+
+ tdSql.query("select udf1(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , first(num1) from tb;")
+
+ tdSql.error("select abs(num1) , first(num1) from tb;")
+
+ tdSql.error("select udf1(num1) , last(num1) from tb;")
+
+ tdSql.error("select round(num1) , last(num1) from tb;")
+
+ tdSql.query("select udf1(num1) , top(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , last_row(num1) from tb;")
+
+ tdSql.error("select round(num1) , last_row(num1) from tb;")
+
+
+ # stable
+ tdSql.query("select udf1(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(c1) , first(c1) from stb1;")
+
+ tdSql.error("select udf1(c1) , last(c1) from stb1;")
+
+ tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+
+ tdSql.error("select udf1(c1) , last_row(c1) from stb1;")
+ tdSql.error("select ceil(c1) , last_row(c1) from stb1;")
+
+ # regular table with compute functions
+
+ tdSql.query("select udf1(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+ tdSql.query("select floor(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+
+ # # bug need fix
+
+ #tdSql.query("select udf1(num1) , csum(num1) from tb;")
+ #tdSql.checkRows(9)
+ #tdSql.query("select ceil(num1) , csum(num1) from tb;")
+ #tdSql.checkRows(9)
+ #tdSql.query("select udf1(c1) , csum(c1) from stb1;")
+ #tdSql.checkRows(22)
+ #tdSql.query("select floor(c1) , csum(c1) from stb1;")
+ #tdSql.checkRows(22)
+
+ # stable with compute functions
+ tdSql.query("select udf1(c1) , abs(c1) from stb1;")
+ tdSql.checkRows(25)
+ tdSql.query("select abs(c1) , ceil(c1) from stb1;")
+ tdSql.checkRows(25)
+
+ # nest query
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
+ tdSql.checkRows(25)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,8)
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
+ tdSql.checkRows(13)
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,8)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,7)
+
+ # bug fix for crash
+ # order by udf function result
+ for _ in range(50):
+ tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
+ print(tdSql.queryResult)
+
+ # udf functions with filter
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+
+ tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,9)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,-99.990000000)
+ tdSql.checkData(0,3,88)
+
+ tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,0)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,10)
+
+ tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,88)
+
+ tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,0)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,88)
+ tdSql.checkData(1,2,10)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,16.881943016)
+ tdSql.checkData(0,1,168.819430161)
+ tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+
+ # udf functions with group by
+ tdSql.query("select udf1(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf1(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+
+ tdSql.query("select udf2(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf2(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+ tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
+ tdSql.checkRows(2)
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
+ tdSql.checkRows(11)
+
+ # udf mix with order by
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
+ tdSql.checkRows(11)
+
+
+ def multi_cols_udf(self):
+ tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,1)
+ tdSql.checkData(0,2,1.000000000)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,1)
+ tdSql.checkData(1,2,1.110000000)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
+ tdSql.checkData(1,0,8)
+ tdSql.checkData(1,1,88.880000000)
+ tdSql.checkData(1,2,88)
+
+ tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
+ tdSql.checkRows(22)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+ def try_query_sql(self):
+ udf1_sqls = [
+ "select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" ,
+ "select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" ,
+ "select udf1(num1) , max(num1) from tb;" ,
+ "select udf1(num1) , min(num1) from tb;" ,
+ #"select udf1(num1) , top(num1,1) from tb;" ,
+ #"select udf1(num1) , bottom(num1,1) from tb;" ,
+ "select udf1(c1) , max(c1) from stb1;" ,
+ "select udf1(c1) , min(c1) from stb1;" ,
+ #"select udf1(c1) , top(c1 ,1) from stb1;" ,
+ #"select udf1(c1) , bottom(c1,1) from stb1;" ,
+ "select udf1(num1) , abs(num1) from tb;" ,
+ #"select udf1(num1) , csum(num1) from tb;" ,
+ #"select udf1(c1) , csum(c1) from stb1;" ,
+ "select udf1(c1) , abs(c1) from stb1;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" ,
+ "select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" ,
+ "select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf1(c1) from ct1 group by c1" ,
+ "select udf1(c1) from stb1 group by c1" ,
+ "select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" ,
+ "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
+ "select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
+ "select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
+ "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
+ ]
+ udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(c1) from stb1 group by 1-udf1(c1)" ,
+ "select udf2(num1) ,udf2(num2), udf2(num3) from tb" ,
+ "select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" ,
+ "select udf2(c1) ,udf2(c6) from stb1 " ,
+ "select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " ,
+ "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" ,
+ "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " ,
+ "select udf2(c1) from ct1 group by c1" ,
+ "select udf2(c1) from stb1 group by c1" ,
+ "select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" ,
+ "select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" ,
+ "select udf2(c1) from stb1 group by udf1(c1)" ,
+ "select udf2(c1) from stb1 group by floor(c1)" ,
+ "select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" ,
+
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"]
+
+ return udf1_sqls ,udf2_sqls
+
+
+
+ def unexpected_create(self):
+
+ tdLog.info(" create function with out bufsize ")
+ tdSql.query("drop function udf1 ")
+ tdSql.query("drop function udf2 ")
+
+ # create function without buffer
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int")
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double")
+ udf1_sqls ,udf2_sqls = self.try_query_sql()
+
+ for scalar_sql in udf1_sqls:
+ tdSql.query(scalar_sql)
+ for aggregate_sql in udf2_sqls:
+ tdSql.error(aggregate_sql)
+
+ # create function without aggregate
+
+ tdLog.info(" create function with out aggregate ")
+ tdSql.query("drop function udf1 ")
+ tdSql.query("drop function udf2 ")
+
+ # create function without buffer
+ tdSql.execute("create aggregate function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.execute("create function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ udf1_sqls ,udf2_sqls = self.try_query_sql()
+
+ for scalar_sql in udf1_sqls:
+ tdSql.error(scalar_sql)
+ for aggregate_sql in udf2_sqls:
+ tdSql.error(aggregate_sql)
+
+ tdSql.execute(" create function db as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.execute(" create aggregate function test as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.error(" select db(c1) from stb1 ")
+ tdSql.error(" select db(c1,c6), db(c6) from stb1 ")
+ tdSql.error(" select db(num1,num2), db(num1) from tb ")
+ tdSql.error(" select test(c1) from stb1 ")
+ tdSql.error(" select test(c1,c6), test(c6) from stb1 ")
+ tdSql.error(" select test(num1,num2), test(num1) from tb ")
+
+
+
+ def loop_kill_udfd(self):
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+
+ cfgPath = buildPath + "/../sim/dnode1/cfg"
+ udfdPath = buildPath +'/build/bin/udfd'
+
+ for i in range(3):
+
+ tdLog.info(" loop restart udfd %d_th" % i)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ # stop udfd cmds
+ get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'"
+ processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
+ stop_udfd = " kill -9 %s" % processID
+ os.system(stop_udfd)
+
+ time.sleep(2)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+ # # start udfd cmds
+ # start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
+ # tdLog.info("start udfd : %s " % start_udfd)
+
+ def test_function_name(self):
+ tdLog.info(" create function name is not build_in functions ")
+ tdSql.execute(" drop function udf1 ")
+ tdSql.execute(" drop function udf2 ")
+ tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8")
+ tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8")
+ tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function tbname as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function function as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function stable as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function union as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function 123 as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function 123db as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function mnode as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+
+ def restart_taosd_query_udf(self):
+
+ self.create_udf_function()
+
+ for i in range(5):
+ tdLog.info(" this is %d_th restart taosd " %i)
+ tdSql.execute("use db ")
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkRows(1)
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ time.sleep(2)
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+
+ print(" env is ok for all ")
+ self.prepare_udf_so()
+ self.prepare_data()
+ self.create_udf_function()
+ self.basic_udf_query()
+ self.unexpected_create()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py
new file mode 100644
index 0000000000000000000000000000000000000000..24d3b5a9c3cf702c4839e83ff02794f5bf08fcb5
--- /dev/null
+++ b/tests/system-test/0-others/udf_restart_taosd.py
@@ -0,0 +1,654 @@
+from distutils.log import error
+import taos
+import sys
+import time
+import os
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+import subprocess
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def prepare_udf_so(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+ print(projPath)
+
+ libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ os.system("mkdir /tmp/udf/")
+ os.system("cp %s /tmp/udf/ "%libudf1.replace("\n" ,""))
+ os.system("cp %s /tmp/udf/ "%libudf2.replace("\n" ,""))
+
+
+ def prepare_data(self):
+
+ tdSql.execute("drop database if exists db ")
+ tdSql.execute("create database if not exists db days 300")
+ tdSql.execute("use db")
+ tdSql.execute(
+ '''create table stb1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ tags (t1 int)
+ '''
+ )
+
+ tdSql.execute(
+ '''
+ create table t1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ '''
+ )
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ for i in range(9):
+ tdSql.execute(
+ f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+
+ tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+
+ tdSql.execute(
+ f'''insert into t1 values
+ ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ '''
+ )
+
+ tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
+ tdSql.execute(
+ f'''insert into tb values
+ ( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
+ ( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
+ ( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
+ ( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ '''
+ )
+
+ # udf functions with join
+ ts_start = 1652517451000
+ tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
+ tdSql.execute("create table sub1 using st tags(1)")
+ tdSql.execute("create table sub2 using st tags(2)")
+
+ for i in range(10):
+ ts = ts_start + i *1000
+ tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+ tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+
+
+ def create_udf_function(self):
+
+ for i in range(5):
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ # drop functions
+
+ tdSql.execute("drop function udf1")
+ tdSql.execute("drop function udf2")
+
+ functions = tdSql.getResult("show functions")
+ for function in functions:
+ if "udf1" in function[0] or "udf2" in function[0]:
+ tdLog.info("drop udf functions failed ")
+ tdLog.exit("drop udf functions failed")
+
+ tdLog.info("drop two udf functions success ")
+
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ def basic_udf_query(self):
+
+ # scalar functions
+
+ tdSql.execute("use db ")
+ tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,1)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(0,4,1.000000000)
+ tdSql.checkData(0,5,88)
+ tdSql.checkData(0,6,"binary1")
+ tdSql.checkData(0,7,88)
+
+ tdSql.checkData(3,0,3)
+ tdSql.checkData(3,1,88)
+ tdSql.checkData(3,2,33333)
+ tdSql.checkData(3,3,88)
+ tdSql.checkData(3,4,33.000000000)
+ tdSql.checkData(3,5,88)
+ tdSql.checkData(3,6,"binary1")
+ tdSql.checkData(3,7,88)
+
+ tdSql.checkData(11,0,None)
+ tdSql.checkData(11,1,None)
+ tdSql.checkData(11,2,None)
+ tdSql.checkData(11,3,None)
+ tdSql.checkData(11,4,None)
+ tdSql.checkData(11,5,None)
+ tdSql.checkData(11,6,"binary1")
+ tdSql.checkData(11,7,88)
+
+ tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,None)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(0,4,None)
+ tdSql.checkData(0,5,None)
+ tdSql.checkData(0,6,None)
+ tdSql.checkData(0,7,None)
+
+ tdSql.checkData(20,0,8)
+ tdSql.checkData(20,1,88)
+ tdSql.checkData(20,2,88888)
+ tdSql.checkData(20,3,88)
+ tdSql.checkData(20,4,888)
+ tdSql.checkData(20,5,88)
+ tdSql.checkData(20,6,88)
+ tdSql.checkData(20,7,88)
+
+
+ # aggregate functions
+ tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
+ tdSql.checkData(0,0,15.362291496)
+ tdSql.checkData(0,1,10000949.553189287)
+ tdSql.checkData(0,2,168.633425216)
+
+ # Arithmetic compute
+ tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
+ tdSql.checkData(0,0,115.362291496)
+ tdSql.checkData(0,1,10000849.553189287)
+ tdSql.checkData(0,2,16863.342521576)
+ tdSql.checkData(0,3,1.686334252)
+
+ tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
+ tdSql.checkData(0,0,25.514701644)
+ tdSql.checkData(0,1,265.247614504)
+
+ tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
+ tdSql.checkData(0,0,125.514701644)
+ tdSql.checkData(0,1,165.247614504)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+ # # bug for crash when query sub table
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
+ tdSql.checkData(0,0,378.215547010)
+ tdSql.checkData(0,1,353.808067460)
+ tdSql.checkData(0,2,2114.237451187)
+ tdSql.checkData(0,3,2.125468151)
+
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
+ tdSql.checkData(0,0,490.358032462)
+ tdSql.checkData(0,1,400.460106627)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+
+ # regular table with aggregate functions
+
+ tdSql.error("select udf1(num1) , count(num1) from tb;")
+ tdSql.error("select udf1(num1) , avg(num1) from tb;")
+ tdSql.error("select udf1(num1) , twa(num1) from tb;")
+ tdSql.error("select udf1(num1) , irate(num1) from tb;")
+ tdSql.error("select udf1(num1) , sum(num1) from tb;")
+ tdSql.error("select udf1(num1) , stddev(num1) from tb;")
+ tdSql.error("select udf1(num1) , mode(num1) from tb;")
+ tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
+ # stable
+ tdSql.error("select udf1(c1) , count(c1) from stb1;")
+ tdSql.error("select udf1(c1) , avg(c1) from stb1;")
+ tdSql.error("select udf1(c1) , twa(c1) from stb1;")
+ tdSql.error("select udf1(c1) , irate(c1) from stb1;")
+ tdSql.error("select udf1(c1) , sum(c1) from stb1;")
+ tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
+ tdSql.error("select udf1(c1) , mode(c1) from stb1;")
+ tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
+
+ # regular table with select functions
+
+ tdSql.query("select udf1(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , first(num1) from tb;")
+
+ tdSql.error("select abs(num1) , first(num1) from tb;")
+
+ tdSql.error("select udf1(num1) , last(num1) from tb;")
+
+ tdSql.error("select round(num1) , last(num1) from tb;")
+
+ tdSql.query("select udf1(num1) , top(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , last_row(num1) from tb;")
+
+ tdSql.error("select round(num1) , last_row(num1) from tb;")
+
+
+ # stable
+ tdSql.query("select udf1(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(c1) , first(c1) from stb1;")
+
+ tdSql.error("select udf1(c1) , last(c1) from stb1;")
+
+ tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+
+ tdSql.error("select udf1(c1) , last_row(c1) from stb1;")
+ tdSql.error("select ceil(c1) , last_row(c1) from stb1;")
+
+ # regular table with compute functions
+
+ tdSql.query("select udf1(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+ tdSql.query("select floor(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+
+ # # bug need fix
+
+ #tdSql.query("select udf1(num1) , csum(num1) from tb;")
+ #tdSql.checkRows(9)
+ #tdSql.query("select ceil(num1) , csum(num1) from tb;")
+ #tdSql.checkRows(9)
+ #tdSql.query("select udf1(c1) , csum(c1) from stb1;")
+ #tdSql.checkRows(22)
+ #tdSql.query("select floor(c1) , csum(c1) from stb1;")
+ #tdSql.checkRows(22)
+
+ # stable with compute functions
+ tdSql.query("select udf1(c1) , abs(c1) from stb1;")
+ tdSql.checkRows(25)
+ tdSql.query("select abs(c1) , ceil(c1) from stb1;")
+ tdSql.checkRows(25)
+
+ # nest query
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
+ tdSql.checkRows(25)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,8)
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
+ tdSql.checkRows(13)
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,8)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,7)
+
+ # bug fix for crash
+ # order by udf function result
+ for _ in range(50):
+ tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
+ print(tdSql.queryResult)
+
+ # udf functions with filter
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+
+ tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,9)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,-99.990000000)
+ tdSql.checkData(0,3,88)
+
+ tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,0)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,10)
+
+ tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,88)
+
+ tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,0)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,88)
+ tdSql.checkData(1,2,10)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,16.881943016)
+ tdSql.checkData(0,1,168.819430161)
+ tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+
+ # udf functions with group by
+ tdSql.query("select udf1(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf1(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+
+ tdSql.query("select udf2(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf2(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+ tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
+ tdSql.checkRows(2)
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
+ tdSql.checkRows(11)
+
+ # udf mix with order by
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
+ tdSql.checkRows(11)
+
+
+ def multi_cols_udf(self):
+ tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,1)
+ tdSql.checkData(0,2,1.000000000)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,1)
+ tdSql.checkData(1,2,1.110000000)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
+ tdSql.checkData(1,0,8)
+ tdSql.checkData(1,1,88.880000000)
+ tdSql.checkData(1,2,88)
+
+ tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
+ tdSql.checkRows(22)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+ def try_query_sql(self):
+ udf1_sqls = [
+ "select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" ,
+ "select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" ,
+ "select udf1(num1) , max(num1) from tb;" ,
+ "select udf1(num1) , min(num1) from tb;" ,
+ #"select udf1(num1) , top(num1,1) from tb;" ,
+ #"select udf1(num1) , bottom(num1,1) from tb;" ,
+ "select udf1(c1) , max(c1) from stb1;" ,
+ "select udf1(c1) , min(c1) from stb1;" ,
+ #"select udf1(c1) , top(c1 ,1) from stb1;" ,
+ #"select udf1(c1) , bottom(c1,1) from stb1;" ,
+ "select udf1(num1) , abs(num1) from tb;" ,
+ #"select udf1(num1) , csum(num1) from tb;" ,
+ #"select udf1(c1) , csum(c1) from stb1;" ,
+ "select udf1(c1) , abs(c1) from stb1;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" ,
+ "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" ,
+ "select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" ,
+ "select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf1(c1) from ct1 group by c1" ,
+ "select udf1(c1) from stb1 group by c1" ,
+ "select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" ,
+ "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
+ "select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
+ "select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
+ "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
+ ]
+ udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(c1) from stb1 group by 1-udf1(c1)" ,
+ "select udf2(num1) ,udf2(num2), udf2(num3) from tb" ,
+ "select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" ,
+ "select udf2(c1) ,udf2(c6) from stb1 " ,
+ "select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " ,
+ "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" ,
+ "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " ,
+ "select udf2(c1) from ct1 group by c1" ,
+ "select udf2(c1) from stb1 group by c1" ,
+ "select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" ,
+ "select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" ,
+ "select udf2(c1) from stb1 group by udf1(c1)" ,
+ "select udf2(c1) from stb1 group by floor(c1)" ,
+ "select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" ,
+
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
+ "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"]
+
+ return udf1_sqls ,udf2_sqls
+
+
+
+ def unexpected_create(self):
+
+ tdLog.info(" create function with out bufsize ")
+ tdSql.query("drop function udf1 ")
+ tdSql.query("drop function udf2 ")
+
+ # create function without buffer
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int")
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double")
+ udf1_sqls ,udf2_sqls = self.try_query_sql()
+
+ for scalar_sql in udf1_sqls:
+ tdSql.query(scalar_sql)
+ for aggregate_sql in udf2_sqls:
+ tdSql.error(aggregate_sql)
+
+ # create function without aggregate
+
+ tdLog.info(" create function with out aggregate ")
+ tdSql.query("drop function udf1 ")
+ tdSql.query("drop function udf2 ")
+
+ # create function without buffer
+ tdSql.execute("create aggregate function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.execute("create function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ udf1_sqls ,udf2_sqls = self.try_query_sql()
+
+ for scalar_sql in udf1_sqls:
+ tdSql.error(scalar_sql)
+ for aggregate_sql in udf2_sqls:
+ tdSql.error(aggregate_sql)
+
+ tdSql.execute(" create function db as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.execute(" create aggregate function test as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ")
+ tdSql.error(" select db(c1) from stb1 ")
+ tdSql.error(" select db(c1,c6), db(c6) from stb1 ")
+ tdSql.error(" select db(num1,num2), db(num1) from tb ")
+ tdSql.error(" select test(c1) from stb1 ")
+ tdSql.error(" select test(c1,c6), test(c6) from stb1 ")
+ tdSql.error(" select test(num1,num2), test(num1) from tb ")
+
+
+
+ def loop_kill_udfd(self):
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+
+ cfgPath = buildPath + "/../sim/dnode1/cfg"
+ udfdPath = buildPath +'/build/bin/udfd'
+
+ for i in range(3):
+
+ tdLog.info(" loop restart udfd %d_th" % i)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ # stop udfd cmds
+ get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'"
+ processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
+ stop_udfd = " kill -9 %s" % processID
+ os.system(stop_udfd)
+
+ time.sleep(2)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+ # # start udfd cmds
+ # start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
+ # tdLog.info("start udfd : %s " % start_udfd)
+
+ def test_function_name(self):
+ tdLog.info(" create function name is not build_in functions ")
+ tdSql.execute(" drop function udf1 ")
+ tdSql.execute(" drop function udf2 ")
+ tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8")
+ tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8")
+ tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function tbname as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function function as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function stable as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function union as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function 123 as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function 123db as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+ tdSql.error("create aggregate function mnode as '/tmp/udf/libudf2.so' outputtype double bufSize 8")
+
+ def restart_taosd_query_udf(self):
+
+ for i in range(3):
+ tdLog.info(" this is %d_th restart taosd " %i)
+ tdSql.execute("use db ")
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkRows(1)
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ time.sleep(2)
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+
+ print(" env is ok for all ")
+ self.prepare_udf_so()
+ self.prepare_data()
+ self.create_udf_function()
+ self.basic_udf_query()
+ self.multi_cols_udf()
+ self.restart_taosd_query_udf()
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ddeba46652d7d724f15cea0476c7baa8c60cc30
--- /dev/null
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -0,0 +1,1333 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import traceback
+import random
+from taos.error import SchemalessError
+import time
+from copy import deepcopy
+import numpy as np
+from util.log import *
+from util.cases import *
+from util.sql import *
+import threading
+from util.types import TDSmlProtocolType, TDSmlTimestampType
+from util.common import tdCom
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ self._conn = conn
+
+ def createDb(self, name="test", db_update_tag=0):
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms' schemaless 1")
+ else:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms' update 1 schemaless 1")
+ tdSql.execute(f'use {name}')
+
+ def timeTrans(self, time_value, ts_type):
+ if int(time_value) == 0:
+ ts = time.time()
+ else:
+ if ts_type == TDSmlTimestampType.NANO_SECOND.value or ts_type is None:
+ ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000000000
+ elif ts_type == TDSmlTimestampType.MICRO_SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000000
+ elif ts_type == TDSmlTimestampType.MILLI_SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000
+ elif ts_type == TDSmlTimestampType.SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1
+ ulsec = repr(ts).split('.')[1][:6]
+ if len(ulsec) < 6 and int(ulsec) != 0:
+ ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
+ elif int(ulsec) == 0:
+ ulsec *= 6
+ # * follow two rows added for tsCheckCase
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ return td_ts
+ #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
+ return td_ts
+ #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
+
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
+
+ def getTdTypeValue(self, value, vtype="col"):
+ """
+ vtype must be col or tag
+ """
+ if vtype == "col":
+ if value.lower().endswith("i8"):
+ td_type = "TINYINT"
+ td_tag_value = ''.join(list(value)[:-2])
+ elif value.lower().endswith("i16"):
+ td_type = "SMALLINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i32"):
+ td_type = "INT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i64"):
+ td_type = "BIGINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().lower().endswith("u64"):
+ td_type = "BIGINT UNSIGNED"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("f32"):
+ td_type = "FLOAT"
+ td_tag_value = ''.join(list(value)[:-3])
+ td_tag_value = '{}'.format(np.float32(td_tag_value))
+ elif value.lower().endswith("f64"):
+ td_type = "DOUBLE"
+ td_tag_value = ''.join(list(value)[:-3])
+ if "e" in value.lower():
+ td_tag_value = str(float(td_tag_value))
+ elif value.lower().startswith('l"'):
+ td_type = "NCHAR"
+ td_tag_value = ''.join(list(value)[2:-1])
+ elif value.startswith('"') and value.endswith('"'):
+ td_type = "VARCHAR"
+ td_tag_value = ''.join(list(value)[1:-1])
+ elif value.lower() == "t" or value.lower() == "true":
+ td_type = "BOOL"
+ td_tag_value = "True"
+ elif value.lower() == "f" or value.lower() == "false":
+ td_type = "BOOL"
+ td_tag_value = "False"
+ elif value.isdigit():
+ td_type = "DOUBLE"
+ td_tag_value = str(float(value))
+ else:
+ td_type = "DOUBLE"
+ if "e" in value.lower():
+ td_tag_value = str(float(value))
+ else:
+ td_tag_value = value
+ elif vtype == "tag":
+ td_type = "NCHAR"
+ td_tag_value = str(value)
+ return td_type, td_tag_value
+
+ def typeTrans(self, type_list):
+ type_num_list = []
+ for tp in type_list:
+ if tp.upper() == "TIMESTAMP":
+ type_num_list.append(9)
+ elif tp.upper() == "BOOL":
+ type_num_list.append(1)
+ elif tp.upper() == "TINYINT":
+ type_num_list.append(2)
+ elif tp.upper() == "SMALLINT":
+ type_num_list.append(3)
+ elif tp.upper() == "INT":
+ type_num_list.append(4)
+ elif tp.upper() == "BIGINT":
+ type_num_list.append(5)
+ elif tp.upper() == "FLOAT":
+ type_num_list.append(6)
+ elif tp.upper() == "DOUBLE":
+ type_num_list.append(7)
+ elif tp.upper() == "VARCHAR":
+ type_num_list.append(8)
+ elif tp.upper() == "NCHAR":
+ type_num_list.append(10)
+ elif tp.upper() == "BIGINT UNSIGNED":
+ type_num_list.append(14)
+ return type_num_list
+
+ def inputHandle(self, input_sql, ts_type):
+ input_sql_split_list = input_sql.split(" ")
+
+ stb_tag_list = input_sql_split_list[0].split(',')
+ stb_col_list = input_sql_split_list[1].split(',')
+ time_value = self.timeTrans(input_sql_split_list[2], ts_type)
+
+ stb_name = stb_tag_list[0]
+ stb_tag_list.pop(0)
+
+ tag_name_list = []
+ tag_value_list = []
+ td_tag_value_list = []
+ td_tag_type_list = []
+
+ col_name_list = []
+ col_value_list = []
+ td_col_value_list = []
+ td_col_type_list = []
+ for elm in stb_tag_list:
+ if "id=" in elm.lower():
+ tb_name = elm.split('=')[1]
+ tag_name_list.append(elm.split("=")[0])
+ td_tag_value_list.append(tb_name)
+ td_tag_type_list.append("NCHAR")
+ else:
+ tag_name_list.append(elm.split("=")[0])
+ tag_value_list.append(elm.split("=")[1])
+ tb_name = ""
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+
+ for elm in stb_col_list:
+ col_name_list.append(elm.split("=")[0])
+ col_value_list.append(elm.split("=")[1])
+ td_col_value_list.append(self.getTdTypeValue(elm.split("=")[1])[1])
+ td_col_type_list.append(self.getTdTypeValue(elm.split("=")[1])[0])
+
+ final_field_list = []
+ final_field_list.extend(col_name_list)
+ final_field_list.extend(tag_name_list)
+
+ final_type_list = []
+ final_type_list.append("TIMESTAMP")
+ final_type_list.extend(td_col_type_list)
+ final_type_list.extend(td_tag_type_list)
+ final_type_list = self.typeTrans(final_type_list)
+
+ final_value_list = []
+ final_value_list.append(time_value)
+ final_value_list.extend(td_col_value_list)
+ final_value_list.extend(td_tag_value_list)
+ return final_value_list, final_field_list, final_type_list, stb_name, tb_name
+
+ def gen_influxdb_line(self, stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9,
+ ts, id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag):
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if id_noexist_tag is not None:
+ input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if ct_add_tag is not None:
+ input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if id_change_tag is not None:
+ input_sql = f'{stb_name},t0={t0},t1={t1},{id}={tb_name},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if id_double_tag is not None:
+ input_sql = f'{stb_name},{id}=\"{tb_name}_1\",t0={t0},t1={t1},{id}=\"{tb_name}_2\",t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if ct_add_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
+ if ct_am_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
+ if id_noexist_tag is not None:
+ input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
+ if ct_ma_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ if id_noexist_tag is not None:
+ input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ if ct_min_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ if c_multi_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} c10={c9} {ts}'
+ if t_multi_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if c_blank_tag is not None:
+ input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} {ts}'
+ if t_blank_tag is not None:
+ input_sql = f'{stb_name} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if chinese_tag is not None:
+ input_sql = f'{stb_name},to=L"涛思数据" c0=L"涛思数据" {ts}'
+ return input_sql
+
+ def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32",
+ t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
+ t8="L\"ncharTagValue\"", c0="", c1="127i8", c2="32767i16", c3="2147483647i32",
+ c4="9223372036854775807i64", c5="11.12345f32", c6="22.123456789f64", c7="\"binaryColValue\"",
+ c8="L\"ncharColValue\"", c9="7u64", ts=None,
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ ct_add_tag=None, ct_am_tag=None, ct_ma_tag=None, ct_min_tag=None, c_multi_tag=None, t_multi_tag=None,
+ c_blank_tag=None, t_blank_tag=None, chinese_tag=None, t_add_tag=None, t_mul_tag=None, point_trans_tag=None,
+ tcp_keyword_tag=None, multi_field_tag=None, protocol=None):
+ if stb_name == "":
+ stb_name = tdCom.getLongName(6, "letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if t0 == "":
+ t0 = "t"
+ if c0 == "":
+ c0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True"])
+ if value == "":
+ value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"])
+ if id_upper_tag is not None:
+ id = "ID"
+ else:
+ id = "id"
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ if ts is None:
+ ts = "1626006833639000000"
+ input_sql = self.gen_influxdb_line(stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, ts,
+ id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag)
+ return input_sql, stb_name
+
+ def genMulTagColStr(self, gen_type, count):
+ """
+ gen_type must be "tag"/"col"
+ """
+ if gen_type == "tag":
+ return ','.join(map(lambda i: f't{i}=f', range(count))) + " "
+ if gen_type == "col":
+ return ','.join(map(lambda i: f'c{i}=t', range(count))) + " "
+
+ def genLongSql(self, tag_count, col_count):
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ tag_str = self.genMulTagColStr("tag", tag_count)
+ col_str = self.genMulTagColStr("col", col_count)
+ ts = "1626006833640000000"
+ long_sql = stb_name + ',' + f'id={tb_name}' + ',' + tag_str + col_str + ts
+ return long_sql, stb_name
+
+ def getNoIdTbName(self, stb_name):
+ query_sql = f"select tbname from {stb_name}"
+ tb_name = self.resHandle(query_sql, True)[0][0]
+ return tb_name
+
+ def resHandle(self, query_sql, query_tag, protocol=None):
+ tdSql.execute('reset query cache')
+ if protocol == "telnet-tcp":
+ time.sleep(0.5)
+ row_info = tdSql.query(query_sql, query_tag)
+ col_info = tdSql.getColNameList(query_sql, query_tag)
+ res_row_list = []
+ sub_list = []
+ for row_mem in row_info:
+ for i in row_mem:
+ if "11.1234" in str(i) and str(i) != "11.12345f32" and str(i) != "11.12345027923584F32":
+ sub_list.append("11.12345027923584")
+ elif "22.1234" in str(i) and str(i) != "22.123456789f64" and str(i) != "22.123456789F64":
+ sub_list.append("22.123456789")
+ else:
+ sub_list.append(str(i))
+ res_row_list.append(sub_list)
+ res_field_list_without_ts = col_info[0][1:]
+ res_type_list = col_info[1]
+ return res_row_list, res_field_list_without_ts, res_type_list
+
+ def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None, ts_type=None, precision=None):
+ expect_list = self.inputHandle(input_sql, ts_type)
+ if precision == None:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, ts_type)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, precision)
+ query_sql = f"{query_sql} {stb_name} {condition}"
+ res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True)
+ if ts == 0:
+ res_ts = self.dateToTs(res_row_list[0][0])
+ current_time = time.time()
+ if current_time - res_ts < 60:
+ tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:])
+ else:
+ print("timeout")
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ else:
+ if none_check_tag is not None:
+ none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"]
+ none_index_list.reverse()
+ for j in none_index_list:
+ res_row_list[0].pop(j)
+ expect_list[0].pop(j)
+ tdSql.checkEqual(sorted(res_row_list[0]), sorted(expect_list[0]))
+ tdSql.checkEqual(sorted(res_field_list_without_ts), sorted(expect_list[1]))
+ tdSql.checkEqual(res_type_list, expect_list[2])
+
+ def cleanStb(self):
+ query_sql = "show stables"
+ res_row_list = tdSql.query(query_sql, True)
+ stb_list = map(lambda x: x[0], res_row_list)
+ for stb in stb_list:
+ tdSql.execute(f'drop table if exists {stb}')
+
+ def initCheckCase(self):
+ """
+ normal tags and cols, one for every elm
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+
+ def boolTypeCheckCase(self):
+ """
+ check all normal type
+ """
+ tdCom.cleanTb()
+ full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
+ for t_type in full_type_list:
+ input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
+ self.resCmp(input_sql, stb_name)
+
+ def symbolsCheckCase(self):
+ """
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ """
+ '''
+ please test :
+ binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
+ '''
+ tdCom.cleanTb()
+ binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
+ nchar_symbols = f'L{binary_symbols}'
+ input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols)
+ self.resCmp(input_sql, stb_name)
+
+ def tsCheckCase(self):
+ """
+ test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
+ # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
+ """
+ tdCom.cleanTb()
+ ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
+ for ts in ts_list:
+ input_sql, stb_name = self.genFullTypeSql(ts=ts)
+ self.resCmp(input_sql, stb_name, ts=ts)
+
+ def idSeqCheckCase(self):
+ """
+ check id.index in tags
+ eg: t0=**,id=**,t1=**
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
+ self.resCmp(input_sql, stb_name)
+
+ def idUpperCheckCase(self):
+ """
+ check id param
+ eg: id and ID
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
+ self.resCmp(input_sql, stb_name)
+
+ def noIdCheckCase(self):
+ """
+ id not exist
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ query_sql = f"select tbname from {stb_name}"
+ res_row_list = self.resHandle(query_sql, True)[0]
+ if len(res_row_list[0][0]) > 0:
+ tdSql.checkColNameList(res_row_list, res_row_list)
+ else:
+ tdSql.checkColNameList(res_row_list, "please check noIdCheckCase")
+
+ def maxColTagCheckCase(self):
+ """
+ max tag count is 128
+ max col count is ??
+ """
+ for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
+ tdCom.cleanTb()
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
+ tdCom.cleanTb()
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idIllegalNameCheckCase(self):
+ """
+ test illegal id name
+ mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?"
+ """
+ tdCom.cleanTb()
+ rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?")
+ for i in rstr:
+ stb_name=f"aaa{i}bbb"
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name)
+ self.resCmp(input_sql, f'`{stb_name}`')
+ tdSql.execute(f'drop table if exists `{stb_name}`')
+
+ def idStartWithNumCheckCase(self):
+ """
+ id is start with num
+ """
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def nowTsCheckCase(self):
+ """
+ check now unsupported
+ """
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="now")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def dateFormatTsCheckCase(self):
+ """
+ check date format ts unsupported
+ """
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def illegalTsCheckCase(self):
+ """
+ check ts format like 16260068336390us19
+ """
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self):
+ """
+ check full type tag value limit
+ """
+ tdCom.cleanTb()
+ # i8
+ for t1 in ["-128i8", "127i8"]:
+ input_sql, stb_name = self.genFullTypeSql(t1=t1)
+ self.resCmp(input_sql, stb_name)
+ for t1 in ["-129i8", "128i8"]:
+ input_sql = self.genFullTypeSql(t1=t1)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i16
+ for t2 in ["-32768i16", "32767i16"]:
+ input_sql, stb_name = self.genFullTypeSql(t2=t2)
+ self.resCmp(input_sql, stb_name)
+ for t2 in ["-32769i16", "32768i16"]:
+ input_sql = self.genFullTypeSql(t2=t2)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i32
+ for t3 in ["-2147483648i32", "2147483647i32"]:
+ input_sql, stb_name = self.genFullTypeSql(t3=t3)
+ self.resCmp(input_sql, stb_name)
+ for t3 in ["-2147483649i32", "2147483648i32"]:
+ input_sql = self.genFullTypeSql(t3=t3)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i64
+ for t4 in ["-9223372036854775808i64", "9223372036854775807i64"]:
+ input_sql, stb_name = self.genFullTypeSql(t4=t4)
+ self.resCmp(input_sql, stb_name)
+ for t4 in ["-9223372036854775809i64", "9223372036854775808i64"]:
+ input_sql = self.genFullTypeSql(t4=t4)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ for t5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
+ input_sql, stb_name = self.genFullTypeSql(t5=t5)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 4028234664*(10**38)
+ for t5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
+ input_sql = self.genFullTypeSql(t5=t5)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ for t6 in [f'{-1.79769*(10**308)}f64', f'{-1.79769*(10**308)}f64']:
+ input_sql, stb_name = self.genFullTypeSql(t6=t6)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 1.797693134862316*(10**308)
+ for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
+ input_sql = self.genFullTypeSql(c6=c6)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # binary
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # nchar
+ # * legal nchar could not be larger than 16374/4
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4094, "letters")}" c0=f 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def colValueLengthCheckCase(self):
+ """
+ check full type col value limit
+ """
+ tdCom.cleanTb()
+ # i8
+ for c1 in ["-128i8", "127i8"]:
+ input_sql, stb_name = self.genFullTypeSql(c1=c1)
+ self.resCmp(input_sql, stb_name)
+
+ for c1 in ["-129i8", "128i8"]:
+ input_sql = self.genFullTypeSql(c1=c1)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # i16
+ for c2 in ["-32768i16"]:
+ input_sql, stb_name = self.genFullTypeSql(c2=c2)
+ self.resCmp(input_sql, stb_name)
+ for c2 in ["-32769i16", "32768i16"]:
+ input_sql = self.genFullTypeSql(c2=c2)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i32
+ for c3 in ["-2147483648i32"]:
+ input_sql, stb_name = self.genFullTypeSql(c3=c3)
+ self.resCmp(input_sql, stb_name)
+ for c3 in ["-2147483649i32", "2147483648i32"]:
+ input_sql = self.genFullTypeSql(c3=c3)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i64
+ for c4 in ["-9223372036854775808i64"]:
+ input_sql, stb_name = self.genFullTypeSql(c4=c4)
+ self.resCmp(input_sql, stb_name)
+ for c4 in ["-9223372036854775809i64", "9223372036854775808i64"]:
+ input_sql = self.genFullTypeSql(c4=c4)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
+ input_sql, stb_name = self.genFullTypeSql(c5=c5)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 4028234664*(10**38)
+ for c5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
+ input_sql = self.genFullTypeSql(c5=c5)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ for c6 in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
+ input_sql, stb_name = self.genFullTypeSql(c6=c6)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 1.797693134862316*(10**308)
+ for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
+ input_sql = self.genFullTypeSql(c6=c6)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # # # binary
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}" 1626006833639000000'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4094, "letters")}" 1626006833639000000'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ def tagColIllegalValueCheckCase(self):
+
+ """
+ test illegal tag col value
+ """
+ tdCom.cleanTb()
+ # bool
+ for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
+ input_sql1 = self.genFullTypeSql(t0=i)[0]
+ try:
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_sql2 = self.genFullTypeSql(c0=i)[0]
+ try:
+ self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i8 i16 i32 i64 f32 f64
+ for input_sql in [
+ self.genFullTypeSql(t1="1s2i8")[0],
+ self.genFullTypeSql(t2="1s2i16")[0],
+ self.genFullTypeSql(t3="1s2i32")[0],
+ self.genFullTypeSql(t4="1s2i64")[0],
+ self.genFullTypeSql(t5="11.1s45f32")[0],
+ self.genFullTypeSql(t6="11.1s45f64")[0],
+ self.genFullTypeSql(c1="1s2i8")[0],
+ self.genFullTypeSql(c2="1s2i16")[0],
+ self.genFullTypeSql(c3="1s2i32")[0],
+ self.genFullTypeSql(c4="1s2i64")[0],
+ self.genFullTypeSql(c5="11.1s45f32")[0],
+ self.genFullTypeSql(c6="11.1s45f64")[0],
+ self.genFullTypeSql(c9="1s1u64")[0]
+ ]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check binary and nchar blank
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql1 = f'{stb_name},t0=t c0=f,c1="abc aaa" 1626006833639000000'
+ input_sql2 = f'{stb_name},t0=t c0=f,c1=L"abc aaa" 1626006833639000000'
+ input_sql3 = f'{stb_name},t0=t,t1="abc aaa" c0=f 1626006833639000000'
+ input_sql4 = f'{stb_name},t0=t,t1=L"abc aaa" c0=f 1626006833639000000'
+ for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check accepted binary and nchar symbols
+ # # * ~!@#$¥%^&*()-+={}|[]、「」:;
+ for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
+ input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000'
+ input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ def duplicateIdTagColInsertCheckCase(self):
+ """
+ check duplicate Id Tag Col
+ """
+ tdCom.cleanTb()
+ input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_sql = self.genFullTypeSql()[0]
+ input_sql_tag = input_sql.replace("t5", "t6")
+ try:
+ self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_sql = self.genFullTypeSql()[0]
+ input_sql_col = input_sql.replace("c5", "c6")
+ try:
+ self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_sql = self.genFullTypeSql()[0]
+ input_sql_col = input_sql.replace("c5", "C6")
+ try:
+ self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ ##### stb exist #####
+ @tdCom.smlPass
+ def noIdStbExistCheckCase(self):
+ """
+ case no id when stb exist
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
+ self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"')
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ # TODO cover other case
+
+ def duplicateInsertExistCheckCase(self):
+ """
+ check duplicate insert when stb exist
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self.resCmp(input_sql, stb_name)
+
+ @tdCom.smlPass
+ def tagColBinaryNcharLengthCheckCase(self):
+ """
+ check length increase
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"", c7="\"binaryTagValuebinaryTagValue\"", c8="L\"ncharTagValuencharTagValue\"")
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
+
+ @tdCom.smlPass
+ def tagColAddDupIDCheckCase(self):
+ """
+ check column and tag count add, stb and tb duplicate
+ * tag: alter table ...
+ * col: when update==0 and ts is same, unchange
+ * so this case tag&&value will be added,
+ * col is added without value when update==0
+ * col is added with value when update==1
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ for db_update_tag in [0, 1]:
+ if db_update_tag == 1 :
+ self.createDb("test_update", db_update_tag=db_update_tag)
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
+ self.resCmp(input_sql, stb_name)
+ self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", c0="f", ct_add_tag=True)
+ if db_update_tag == 1 :
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
+ else:
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ @tdCom.smlPass
+ def tagColAddCheckCase(self):
+ """
+ check column and tag count add
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
+ self.resCmp(input_sql, stb_name)
+ tb_name_1 = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", c0="f", ct_add_tag=True)
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"')
+ res_row_list = self.resHandle(f"select c10,c11,t10,t11 from {tb_name}", True)[0]
+ tdSql.checkEqual(res_row_list[0], ['None', 'None', 'None', 'None'])
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+
+ def tagMd5Check(self):
+ """
+ condition: stb not change
+ insert two table, keep tag unchange, change col
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name1 = self.getNoIdTbName(stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", c0="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name2 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+ tdSql.checkEqual(tb_name1, tb_name2)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", c0="f", id_noexist_tag=True, ct_add_tag=True)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tb_name3 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ tdSql.checkNotEqual(tb_name1, tb_name3)
+
+ # * tag binary max is 16384, col+ts binary max 49151
+ def tagColBinaryMaxLengthCheckCase(self):
+ """
+ every binary and nchar must be length+2
+ """
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
+ input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ # # * check col,col+ts max in describe ---> 16143
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(3)
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(3)
+
+ # * tag nchar max is 16374/4, col+ts nchar max 49151
+ def tagColNcharMaxLengthCheckCase(self):
+ """
+ check nchar length limit
+ """
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
+ code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ # * legal nchar could not be larger than 16374/4
+ input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(3)
+ input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(3)
+
+ def batchInsertCheckCase(self):
+ """
+ test batch insert
+ """
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000",
+ f"{stb_name},t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532",
+ "stf567890,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000",
+ "st123456,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000",
+ f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532",
+ f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000"
+ ]
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ def multiInsertCheckCase(self, count):
+ """
+ test multi insert
+ """
+ tdCom.cleanTb()
+ sql_list = []
+ stb_name = tdCom.getLongName(8, "letters")
+ # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ for i in range(count):
+ input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
+ sql_list.append(input_sql)
+ print(sql_list)
+ self._conn.schemaless_insert(sql_list, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+
+ def batchErrorInsertCheckCase(self):
+ """
+ test batch error insert
+ """
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"]
+ try:
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def genSqlList(self, count=5, stb_name="", tb_name=""):
+ """
+ stb --> supertable
+ tb --> table
+ ts --> timestamp, same default
+ col --> column, same default
+ tag --> tag, same default
+ d --> different
+ s --> same
+ a --> add
+ m --> minus
+ """
+ d_stb_d_tb_list = list()
+ s_stb_s_tb_list = list()
+ s_stb_s_tb_a_col_a_tag_list = list()
+ s_stb_s_tb_m_col_m_tag_list = list()
+ s_stb_d_tb_list = list()
+ s_stb_d_tb_a_col_m_tag_list = list()
+ s_stb_d_tb_a_tag_m_col_list = list()
+ s_stb_s_tb_d_ts_list = list()
+ s_stb_s_tb_d_ts_a_col_m_tag_list = list()
+ s_stb_s_tb_d_ts_a_tag_m_col_list = list()
+ s_stb_d_tb_d_ts_list = list()
+ s_stb_d_tb_d_ts_a_col_m_tag_list = list()
+ s_stb_d_tb_d_ts_a_tag_m_col_list = list()
+ for i in range(count):
+ d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", c0="f"))
+ s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"'))
+ s_stb_s_tb_a_col_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_add_tag=True))
+ s_stb_s_tb_m_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_min_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True))
+ s_stb_d_tb_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_am_tag=True))
+ s_stb_d_tb_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_ma_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0))
+ s_stb_s_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_am_tag=True))
+ s_stb_s_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_ma_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0))
+ s_stb_d_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_am_tag=True))
+ s_stb_d_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_ma_tag=True))
+
+ return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_col_a_tag_list, s_stb_s_tb_m_col_m_tag_list, \
+ s_stb_d_tb_list, s_stb_d_tb_a_col_m_tag_list, s_stb_d_tb_a_tag_m_col_list, s_stb_s_tb_d_ts_list, \
+ s_stb_s_tb_d_ts_a_col_m_tag_list, s_stb_s_tb_d_ts_a_tag_m_col_list, s_stb_d_tb_d_ts_list, \
+ s_stb_d_tb_d_ts_a_col_m_tag_list, s_stb_d_tb_d_ts_a_tag_m_col_list
+
+
+ def genMultiThreadSeq(self, sql_list):
+ tlist = list()
+ for insert_sql in sql_list:
+ t = threading.Thread(target=self._conn.schemaless_insert, args=([insert_sql[0]], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value,))
+ tlist.append(t)
+ return tlist
+
+ def multiThreadRun(self, tlist):
+ for t in tlist:
+ t.start()
+ for t in tlist:
+ t.join()
+
+ def stbInsertMultiThreadCheckCase(self):
+ """
+ thread input different stb
+ """
+ tdCom.cleanTb()
+ input_sql = self.genSqlList()[0]
+ self.multiThreadRun(self.genMultiThreadSeq(input_sql))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, result keep first data
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataAtcInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, add columes and tags, result keep first data
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_a_col_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_col_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataMtcInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, minus columes and tags, result keep first data
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_m_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_col_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbDtbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataAcMtInsertMultiThreadCheckCase(self):
+ """
+ #! concurrency conflict
+ """
+ """
+ thread input same stb, different tb, different data, add col, mul tag
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_col_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataAtMcInsertMultiThreadCheckCase(self):
+ """
+ #! concurrency conflict
+ """
+ """
+ thread input same stb, different tb, different data, add tag, mul col
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_m_col_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[7]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsAcMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add col, mul tag
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_col_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name} where t8 is not NULL")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {tb_name} where c11 is not NULL;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataDtsAtMcInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add tag, mul col
+ """
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_a_tag_m_col_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[9]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_m_col_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ for c in ["c7", "c8", "c9"]:
+ tdSql.query(f"select * from {stb_name} where {c} is NULL")
+ tdSql.checkRows(5)
+ for t in ["t10", "t11"]:
+ tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase(self):
+ """
+ # ! concurrency conflict
+ """
+ """
+ thread input same stb, different tb, data, ts, add col, mul tag
+ """
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_a_col_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def test(self):
+ input_sql1 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ddzhiksj\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnhwlgvj\",c8=L\"ncharTagValue\",c9=7u64 1626006933640000000ns"
+ input_sql2 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 1626006933640000000ns"
+ try:
+ self._conn.insert_lines([input_sql1])
+ self._conn.insert_lines([input_sql2])
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # self._conn.insert_lines([input_sql2])
+ # input_sql3 = f'abcd,id="cc¥Ec",t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="ndsfdrum",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="igwoehkm",c8=L"ncharColValue",c9=7u64 0'
+ # print(input_sql3)
+ # input_sql4 = 'hmemeb,id="kilrcrldgf",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fysodjql",t8=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="waszbfvc",c8=L"ncharColValue",c9=7u64 0'
+ # code = self._conn.insert_lines([input_sql3])
+ # print(code)
+ # self._conn.insert_lines([input_sql4])
+
+ def runAll(self):
+ self.initCheckCase()
+ self.boolTypeCheckCase()
+ self.symbolsCheckCase()
+ # self.tsCheckCase()
+ self.idSeqCheckCase()
+ self.idUpperCheckCase()
+ self.noIdCheckCase()
+ # self.maxColTagCheckCase()
+ self.idIllegalNameCheckCase()
+ self.idStartWithNumCheckCase()
+ self.nowTsCheckCase()
+ self.dateFormatTsCheckCase()
+ self.illegalTsCheckCase()
+ # self.tagValueLengthCheckCase()
+ self.colValueLengthCheckCase()
+ self.tagColIllegalValueCheckCase()
+ self.duplicateIdTagColInsertCheckCase()
+ self.noIdStbExistCheckCase()
+ self.duplicateInsertExistCheckCase()
+ self.tagColBinaryNcharLengthCheckCase()
+ self.tagColAddDupIDCheckCase()
+ self.tagColAddCheckCase()
+ self.tagMd5Check()
+ # self.tagColBinaryMaxLengthCheckCase()
+ # self.tagColNcharMaxLengthCheckCase()
+ self.batchInsertCheckCase()
+ self.multiInsertCheckCase(10)
+ self.batchErrorInsertCheckCase()
+ # MultiThreads
+ # self.stbInsertMultiThreadCheckCase()
+ # self.sStbStbDdataInsertMultiThreadCheckCase()
+ # self.sStbStbDdataAtcInsertMultiThreadCheckCase()
+ # self.sStbStbDdataMtcInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataInsertMultiThreadCheckCase()
+
+ # # # ! concurrency conflict
+ # # self.sStbDtbDdataAcMtInsertMultiThreadCheckCase()
+ # # self.sStbDtbDdataAtMcInsertMultiThreadCheckCase()
+
+ # self.sStbStbDdataDtsInsertMultiThreadCheckCase()
+
+ # # # ! concurrency conflict
+ # # self.sStbStbDdataDtsAcMtInsertMultiThreadCheckCase()
+ # # self.sStbStbDdataDtsAtMcInsertMultiThreadCheckCase()
+
+ # self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
+
+ # # ! concurrency conflict
+ # # self.sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase()
+
+
+
+ def run(self):
+ print("running {}".format(__file__))
+ self.createDb()
+ try:
+ self.runAll()
+ except Exception as err:
+ print(''.join(traceback.format_exception(None, err, err.__traceback__)))
+ raise err
+ # self.tagColIllegalValueCheckCase()
+ # self.test()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py
index f0f35831dbd5a276c98e2eede114ea14b7bcc5b2..8d2870fc2cf068153a424d2b1613188c018c6463 100644
--- a/tests/system-test/1-insert/insertWithMoreVgroup.py
+++ b/tests/system-test/1-insert/insertWithMoreVgroup.py
@@ -294,7 +294,7 @@ class TDTestCase:
return
def test_case3(self):
- self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10000)
+ self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 1, 1*10)
# self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000)
# self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000)
@@ -349,17 +349,17 @@ class TDTestCase:
# run case
def run(self):
- # create database and tables。
- self.test_case1()
- tdLog.debug(" LIMIT test_case1 ............ [OK]")
+ # # create database and tables。
+ # self.test_case1()
+ # tdLog.debug(" LIMIT test_case1 ............ [OK]")
# # taosBenchmark : create database and table
# self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
- # # taosBenchmark:create database/table and insert data
- # self.test_case3()
- # tdLog.debug(" LIMIT test_case3 ............ [OK]")
+ # taosBenchmark:create database/table and insert data
+ self.test_case3()
+ tdLog.debug(" LIMIT test_case3 ............ [OK]")
# # test qnode
diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json
index 1c9aa1f28cb0d1eba5b2cf9488dc9d5be2d3f7c2..5dea41476c8cf7777b5a548f470577e03c576663 100644
--- a/tests/system-test/1-insert/manyVgroups.json
+++ b/tests/system-test/1-insert/manyVgroups.json
@@ -10,7 +10,7 @@
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
- "interlace_rows": 100000,
+ "interlace_rows": 0,
"num_of_records_per_req": 100,
"databases": [
{
@@ -29,8 +29,8 @@
"batch_create_tbl_num": 50000,
"data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 10,
- "interlace_rows": 100000,
+ "insert_rows": 1,
+ "interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 10000000,
"disorder_ratio": 0,
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9bc5bbaf421394cf936bb4aaa031649a4ffa8f5
--- /dev/null
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -0,0 +1,1788 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import traceback
+import random
+from taos.error import SchemalessError
+import time
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+from util.types import TDSmlProtocolType
+import threading
+import json
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ self._conn = conn
+ self.defaultJSONStrType_value = "NCHAR"
+
+ def createDb(self, name="test", db_update_tag=0, protocol=None):
+ if protocol == "telnet-tcp":
+ name = "opentsdb_telnet"
+
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms' schemaless 1")
+ else:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms' update 1 schemaless 1")
+ tdSql.execute(f'use {name}')
+
+ def timeTrans(self, time_value):
+ if type(time_value) is int:
+ if time_value != 0:
+ if len(str(time_value)) == 13:
+ ts = int(time_value)/1000
+ elif len(str(time_value)) == 10:
+ ts = int(time_value)/1
+ else:
+ ts = time_value/1000000
+ else:
+ ts = time.time()
+ elif type(time_value) is dict:
+ if time_value["type"].lower() == "ns":
+ ts = time_value["value"]/1000000000
+ elif time_value["type"].lower() == "us":
+ ts = time_value["value"]/1000000
+ elif time_value["type"].lower() == "ms":
+ ts = time_value["value"]/1000
+ elif time_value["type"].lower() == "s":
+ ts = time_value["value"]/1
+ else:
+ ts = time_value["value"]/1000000
+ ulsec = repr(ts).split('.')[1][:6]
+ if len(ulsec) < 6 and int(ulsec) != 0:
+ ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
+ elif int(ulsec) == 0:
+ ulsec *= 6
+ # * follow two rows added for tsCheckCase
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ return td_ts
+ #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
+ return td_ts
+
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
+
+ def typeTrans(self, type_list):
+ type_num_list = []
+ for tp in type_list:
+ if type(tp) is dict:
+ tp = tp['type']
+ if tp.upper() == "TIMESTAMP":
+ type_num_list.append(9)
+ elif tp.upper() == "BOOL":
+ type_num_list.append(1)
+ elif tp.upper() == "TINYINT":
+ type_num_list.append(2)
+ elif tp.upper() == "SMALLINT":
+ type_num_list.append(3)
+ elif tp.upper() == "INT":
+ type_num_list.append(4)
+ elif tp.upper() == "BIGINT":
+ type_num_list.append(5)
+ elif tp.upper() == "FLOAT":
+ type_num_list.append(6)
+ elif tp.upper() == "DOUBLE":
+ type_num_list.append(7)
+ elif tp.upper() == "VARCHAR":
+ type_num_list.append(8)
+ elif tp.upper() == "NCHAR":
+ type_num_list.append(10)
+ elif tp.upper() == "BIGINT UNSIGNED":
+ type_num_list.append(14)
+ return type_num_list
+
+ def inputHandle(self, input_json):
+ stb_name = input_json["metric"]
+ stb_tag_dict = input_json["tags"]
+ stb_col_dict = input_json["value"]
+ ts_value = self.timeTrans(input_json["timestamp"])
+ tag_name_list = []
+ tag_value_list = []
+ td_tag_value_list = []
+ td_tag_type_list = []
+
+ col_name_list = []
+ col_value_list = []
+ td_col_value_list = []
+ td_col_type_list = []
+
+ # handle tag
+ for key,value in stb_tag_dict.items():
+ if "id" == key.lower():
+ tb_name = value
+ else:
+ if type(value) is dict:
+ tag_value_list.append(str(value["value"]))
+ td_tag_value_list.append(str(value["value"]))
+ tag_name_list.append(key.lower())
+ if value["type"].lower() == "binary":
+ td_tag_type_list.append("VARCHAR")
+ else:
+ td_tag_type_list.append(value["type"].upper())
+ tb_name = ""
+ else:
+ tag_value_list.append(str(value))
+ # td_tag_value_list.append(str(value))
+ tag_name_list.append(key.lower())
+ tb_name = ""
+
+ if type(value) is bool:
+ td_tag_type_list.append("BOOL")
+ td_tag_value_list.append(str(value))
+ elif type(value) is int:
+ td_tag_type_list.append("DOUBLE")
+ td_tag_value_list.append(str(float(value)))
+ elif type(value) is float:
+ td_tag_type_list.append("DOUBLE")
+ td_tag_value_list.append(str(float(value)))
+ elif type(value) is str:
+ if self.defaultJSONStrType_value == "NCHAR":
+ td_tag_type_list.append("NCHAR")
+ td_tag_value_list.append(str(value))
+ else:
+ td_tag_type_list.append("VARCHAR")
+ td_tag_value_list.append(str(value))
+
+ # handle col
+ if type(stb_col_dict) is dict:
+ if stb_col_dict["type"].lower() == "bool":
+ bool_value = f'{stb_col_dict["value"]}'
+ col_value_list.append(bool_value)
+ td_col_type_list.append(stb_col_dict["type"].upper())
+ col_name_list.append("_value")
+ td_col_value_list.append(str(stb_col_dict["value"]))
+ else:
+ col_value_list.append(stb_col_dict["value"])
+ if stb_col_dict["type"].lower() == "binary":
+ td_col_type_list.append("VARCHAR")
+ else:
+ td_col_type_list.append(stb_col_dict["type"].upper())
+ col_name_list.append("_value")
+ td_col_value_list.append(str(stb_col_dict["value"]))
+ else:
+ col_name_list.append("_value")
+ col_value_list.append(str(stb_col_dict))
+ # td_col_value_list.append(str(stb_col_dict))
+ if type(stb_col_dict) is bool:
+ td_col_type_list.append("BOOL")
+ td_col_value_list.append(str(stb_col_dict))
+ elif type(stb_col_dict) is int:
+ td_col_type_list.append("DOUBLE")
+ td_col_value_list.append(str(float(stb_col_dict)))
+ elif type(stb_col_dict) is float:
+ td_col_type_list.append("DOUBLE")
+ td_col_value_list.append(str(float(stb_col_dict)))
+ elif type(stb_col_dict) is str:
+ if self.defaultJSONStrType_value == "NCHAR":
+ td_col_type_list.append("NCHAR")
+ td_col_value_list.append(str(stb_col_dict))
+ else:
+ td_col_type_list.append("VARCHAR")
+ td_col_value_list.append(str(stb_col_dict))
+
+ final_field_list = []
+ final_field_list.extend(col_name_list)
+ final_field_list.extend(tag_name_list)
+
+ final_type_list = []
+ final_type_list.append("TIMESTAMP")
+ final_type_list.extend(td_col_type_list)
+ final_type_list.extend(td_tag_type_list)
+ final_type_list = self.typeTrans(final_type_list)
+
+ final_value_list = []
+ final_value_list.append(ts_value)
+ final_value_list.extend(td_col_value_list)
+ final_value_list.extend(td_tag_value_list)
+ return final_value_list, final_field_list, final_type_list, stb_name, tb_name
+
+ def genTsColValue(self, value, t_type=None, value_type="obj"):
+ if value_type == "obj":
+ if t_type == None:
+ ts_col_value = value
+ else:
+ ts_col_value = {"value": value, "type": t_type}
+ elif value_type == "default":
+ ts_col_value = value
+ return ts_col_value
+
+ def genTagValue(self, t0_type="bool", t0_value="", t1_type="tinyint", t1_value=127, t2_type="smallint", t2_value=32767,
+ t3_type="int", t3_value=2147483647, t4_type="bigint", t4_value=9223372036854775807,
+ t5_type="float", t5_value=11.12345027923584, t6_type="double", t6_value=22.123456789,
+ t7_type="binary", t7_value="binaryTagValue", t8_type="nchar", t8_value="ncharTagValue", value_type="obj"):
+ if t0_value == "":
+ t0_value = random.choice([True, False])
+ if value_type == "obj":
+ tag_value = {
+ "t0": {"value": t0_value, "type": t0_type},
+ "t1": {"value": t1_value, "type": t1_type},
+ "t2": {"value": t2_value, "type": t2_type},
+ "t3": {"value": t3_value, "type": t3_type},
+ "t4": {"value": t4_value, "type": t4_type},
+ "t5": {"value": t5_value, "type": t5_type},
+ "t6": {"value": t6_value, "type": t6_type},
+ "t7": {"value": t7_value, "type": t7_type},
+ "t8": {"value": t8_value, "type": t8_type}
+ }
+ elif value_type == "default":
+ # t5_value = t6_value
+ tag_value = {
+ "t0": t0_value,
+ "t1": t1_value,
+ "t2": t2_value,
+ "t3": t3_value,
+ "t4": t4_value,
+ "t5": t5_value,
+ "t6": t6_value,
+ "t7": t7_value,
+ "t8": t8_value
+ }
+ return tag_value
+
+ def genFullTypeJson(self, ts_value="", col_value="", tag_value="", stb_name="", tb_name="",
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ chinese_tag=None, multi_field_tag=None, point_trans_tag=None, value_type="obj"):
+ if value_type == "obj":
+ if stb_name == "":
+ stb_name = tdCom.getLongName(6, "letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if ts_value == "":
+ ts_value = self.genTsColValue(1626006833639000000, "ns")
+ if col_value == "":
+ col_value = self.genTsColValue(random.choice([True, False]), "bool")
+ if tag_value == "":
+ tag_value = self.genTagValue()
+ # if id_upper_tag is not None:
+ # id = "ID"
+ # else:
+ # id = "id"
+ # if id_mixul_tag is not None:
+ # id = random.choice(["iD", "Id"])
+ # else:
+ # id = "id"
+ # if id_noexist_tag is None:
+ # tag_value[id] = tb_name
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_noexist_tag is not None:
+ if t_add_tag is not None:
+ tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_change_tag is not None:
+ tag_value.pop('t8')
+ tag_value["t8"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_double_tag is not None:
+ tag_value["ID"] = f'"{tb_name}_2"'
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_add_tag is not None:
+ tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"}
+ tag_value["t11"] = {"value": True, "type": "bool"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_mul_tag is not None:
+ tag_value.pop('t8')
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_multi_tag is not None:
+ col_value = [{"value": True, "type": "bool"}, {"value": False, "type": "bool"}]
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_blank_tag is not None:
+ tag_value = ""
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if chinese_tag is not None:
+ tag_value = {"t0": {"value": "涛思数据", "type": "nchar"}}
+ col_value = {"value": "涛思数据", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_blank_tag is not None:
+ sql_json.pop("value")
+ if multi_field_tag is not None:
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value}
+ if point_trans_tag is not None:
+ sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value}
+
+ elif value_type == "default":
+ if stb_name == "":
+ stb_name = tdCom.getLongName(6, "letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if ts_value == "":
+ ts_value = 1626006834
+ if col_value == "":
+ col_value = random.choice([True, False])
+ if tag_value == "":
+ tag_value = self.genTagValue(value_type=value_type)
+ # if id_upper_tag is not None:
+ # id = "ID"
+ # else:
+ # id = "id"
+ # if id_mixul_tag is not None:
+ # id = "iD"
+ # else:
+ # id = "id"
+ # if id_noexist_tag is None:
+ # tag_value[id] = tb_name
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_noexist_tag is not None:
+ if t_add_tag is not None:
+ tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_change_tag is not None:
+ tag_value.pop('t7')
+ tag_value["t7"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_double_tag is not None:
+ tag_value["ID"] = f'"{tb_name}_2"'
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_add_tag is not None:
+ tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"}
+ tag_value["t11"] = True
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_mul_tag is not None:
+ tag_value.pop('t7')
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_multi_tag is not None:
+ col_value = True,False
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_blank_tag is not None:
+ tag_value = ""
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_blank_tag is not None:
+ sql_json.pop("value")
+ if multi_field_tag is not None:
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value}
+ if point_trans_tag is not None:
+ sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ return sql_json, stb_name
+
+ def genMulTagColDict(self, genType, count=1, value_type="obj"):
+ """
+ genType must be tag/col
+ """
+ tag_dict = dict()
+ col_dict = dict()
+ if value_type == "obj":
+ if genType == "tag":
+ for i in range(0, count):
+ tag_dict[f't{i}'] = {'value': True, 'type': 'bool'}
+ return tag_dict
+ if genType == "col":
+ col_dict = {'value': True, 'type': 'bool'}
+ return col_dict
+ elif value_type == "default":
+ if genType == "tag":
+ for i in range(0, count):
+ tag_dict[f't{i}'] = True
+ return tag_dict
+ if genType == "col":
+ col_dict = True
+ return col_dict
+
+ def genLongJson(self, tag_count, value_type="obj"):
+ stb_name = tdCom.getLongName(7, mode="letters")
+ # tb_name = f'{stb_name}_1'
+ tag_dict = self.genMulTagColDict("tag", tag_count, value_type)
+ col_dict = self.genMulTagColDict("col", 1, value_type)
+ # tag_dict["id"] = tb_name
+ ts_dict = {'value': 1626006833639000000, 'type': 'ns'}
+ long_json = {"metric": stb_name, "timestamp": ts_dict, "value": col_dict, "tags": tag_dict}
+ return long_json, stb_name
+
+ def getNoIdTbName(self, stb_name):
+ query_sql = f"select tbname from {stb_name}"
+ tb_name = self.resHandle(query_sql, True)[0][0]
+ return tb_name
+
+ def resHandle(self, query_sql, query_tag):
+ tdSql.execute('reset query cache')
+ row_info = tdSql.query(query_sql, query_tag)
+ col_info = tdSql.getColNameList(query_sql, query_tag)
+ res_row_list = []
+ sub_list = []
+ for row_mem in row_info:
+ for i in row_mem:
+ if "11.1234" in str(i) and str(i) != "11.12345f32" and str(i) != "11.12345027923584F32":
+ sub_list.append("11.12345027923584")
+ elif "22.1234" in str(i) and str(i) != "22.123456789f64" and str(i) != "22.123456789F64":
+ sub_list.append("22.123456789")
+ else:
+ sub_list.append(str(i))
+ res_row_list.append(sub_list)
+ res_field_list_without_ts = col_info[0][1:]
+ res_type_list = col_info[1]
+ return res_row_list, res_field_list_without_ts, res_type_list
+
+ def resCmp(self, input_json, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None, none_type_check=None):
+ expect_list = self.inputHandle(input_json)
+ print("----", json.dumps(input_json))
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ print("!!!!!----", json.dumps(input_json))
+ query_sql = f"{query_sql} {stb_name} {condition}"
+ res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True)
+ if ts == 0:
+ res_ts = self.dateToTs(res_row_list[0][0])
+ current_time = time.time()
+ if current_time - res_ts < 60:
+ tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:])
+ else:
+ print("timeout")
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ else:
+ if none_check_tag is not None:
+ none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"]
+ none_index_list.reverse()
+ for j in none_index_list:
+ res_row_list[0].pop(j)
+ expect_list[0].pop(j)
+ tdSql.checkEqual(sorted(res_row_list[0]), sorted(expect_list[0]))
+ tdSql.checkEqual(sorted(res_field_list_without_ts), sorted(expect_list[1]))
+ tdSql.checkEqual(res_type_list, expect_list[2])
+
+ def initCheckCase(self, value_type="obj"):
+ """
+ normal tags and cols, one for every elm
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def boolTypeCheckCase(self):
+ """
+ check all normal type
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
+ for t_type in full_type_list:
+ input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0],
+ self.genFullTypeJson(col_value=self.genTsColValue(value=t_type, t_type="bool"))[0]]
+ for input_json in input_json_list:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def symbolsCheckCase(self, value_type="obj"):
+ """
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ """
+ '''
+ please test :
+ binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
+ nchar_symbols = binary_symbols
+ input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
+ tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
+ input_sql2, stb_name2 = self.genFullTypeJson(col_value=self.genTsColValue(value=nchar_symbols, t_type="nchar", value_type=value_type),
+ tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
+ self.resCmp(input_sql1, stb_name1)
+ self.resCmp(input_sql2, stb_name2)
+
+ def tsCheckCase(self, value_type="obj"):
+ """
+ test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
+ # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0]
+ for ts in ts_list:
+ if "s" in str(ts):
+ input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(tdCom.splitNumLetter(ts)[0]), t_type=tdCom.splitNumLetter(ts)[1]))
+ self.resCmp(input_json, stb_name, ts=ts)
+ else:
+ input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s", value_type=value_type))
+ self.resCmp(input_json, stb_name, ts=ts)
+ if int(ts) == 0:
+ if value_type == "obj":
+ input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ns")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="us")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ms")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s"))]
+ elif value_type == "default":
+ input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), value_type=value_type))]
+ for input_json in input_json_list:
+ self.resCmp(input_json[0], input_json[1], ts=ts)
+ else:
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type=""))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # check result
+ #! bug
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms' schemaless 1")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_ms", "timestamp": {"value": 1626006833640, "type": "ms"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_ms", "timestamp": {"value": 1626006833641, "type": "ms"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'us' schemaless 1")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_us", "timestamp": {"value": 1626006833639000, "type": "us"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_us", "timestamp": {"value": 1626006833639001, "type": "us"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_us', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.639000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.639001")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ns' schemaless 1")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_ns", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_ns", "timestamp": {"value": 1626006833639000001, "type": "ns"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_ns', True)
+ tdSql.checkEqual(str(res[0][0]), "1626006833639000000")
+ tdSql.checkEqual(str(res[1][0]), "1626006833639000001")
+ self.createDb()
+
+ def idSeqCheckCase(self, value_type="obj"):
+ """
+ check id.index in tags
+ eg: t0=**,id=**,t1=**
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def idLetterCheckCase(self, value_type="obj"):
+ """
+ check id param
+ eg: id and ID
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(id_change_tag=True, id_upper_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def noIdCheckCase(self, value_type="obj"):
+ """
+ id not exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ query_sql = f"select tbname from {stb_name}"
+ res_row_list = self.resHandle(query_sql, True)[0]
+ if len(res_row_list[0][0]) > 0:
+ tdSql.checkColNameList(res_row_list, res_row_list)
+ else:
+ tdSql.checkColNameList(res_row_list, "please check noIdCheckCase")
+
+ def maxColTagCheckCase(self, value_type="obj"):
+ """
+ max tag count is 128
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ for input_json in [self.genLongJson(128, value_type)[0]]:
+ tdCom.cleanTb()
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ for input_json in [self.genLongJson(129, value_type)[0]]:
+ tdCom.cleanTb()
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idIllegalNameCheckCase(self, value_type="obj"):
+ """
+ test illegal id name
+ mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
+ for i in rstr:
+ input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idStartWithNumCheckCase(self, value_type="obj"):
+ """
+ id is start with num
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def nowTsCheckCase(self, value_type="obj"):
+ """
+ check now unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def dateFormatTsCheckCase(self, value_type="obj"):
+ """
+ check date format ts unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def illegalTsCheckCase(self, value_type="obj"):
+ """
+ check ts format like 16260068336390us19
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tbnameCheckCase(self, value_type="obj"):
+ """
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tdSql.execute('reset query cache')
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name_192, tb_name=tb_name_192, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ for input_json in [self.genFullTypeJson(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"), value_type=value_type)[0]]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ stbname = tdCom.getLongName(len=10, mode="letters")
+ input_json = {'metric': f'A{stbname}', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t1': {'value': 127, 'type': 'tinyint'}, "t2": 127}}
+ stb_name = f'`A{stbname}`'
+ self.resCmp(input_json, stb_name)
+ tdSql.execute(f"drop table {stb_name}")
+
+ def tagNameLengthCheckCase(self):
+ """
+ check tag name limit <= 62
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tag_name = tdCom.getLongName(61, "letters")
+ tag_name = f't{tag_name}'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': "bcdaaa", 'tags': {tag_name: {'value': False, 'type': 'bool'}}}
+ self.resCmp(input_json, stb_name)
+ input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000001, 'type': 'ns'}, 'value': "bcdaaaa", 'tags': {tdCom.getLongName(65, "letters"): {'value': False, 'type': 'bool'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self, value_type="obj"):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for t1 in [-127, 127]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t1 in [-128, 128]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i16
+ for t2 in [-32767, 32767]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t2 in [-32768, 32768]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i32
+ for t3 in [-2147483647, 2147483647]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t3 in [-2147483648, 2147483648]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i64
+ for t4 in [-9223372036854775807, 9223372036854775807]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+
+ for t4 in [-9223372036854775808, 9223372036854775808]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ for t5 in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 3.4028234664*(10**38)
+ for t5 in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ for t6 in [-1.79769*(10**308), -1.79769*(10**308)]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t6 in [float(-1.797693134862316*(10**308)), -1.797693134862316*(10**308)]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ if value_type == "obj":
+ # binary
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ elif value_type == "default":
+ stb_name = tdCom.getLongName(7, "letters")
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16374, "letters")}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4093, "letters")}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16375, "letters")}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4094, "letters")}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def colValueLengthCheckCase(self, value_type="obj"):
+ """
+ check full type col value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for value in [-128, 127]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-129, 128]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # i16
+ tdCom.cleanTb()
+ for value in [-32768]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-32769, 32768]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i32
+ tdCom.cleanTb()
+ for value in [-2147483648]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-2147483649, 2147483648]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i64
+ tdCom.cleanTb()
+ for value in [-9223372036854775808]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # ! bug
+ # tdCom.cleanTb()
+ # for value in [-9223372036854775809, 9223372036854775808]:
+ # print(value)
+ # input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0]
+ # print(json.dumps(input_json))
+ # try:
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ tdCom.cleanTb()
+ for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 4028234664*(10**38)
+ tdCom.cleanTb()
+ for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ tdCom.cleanTb()
+ for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 1.797693134862316*(10**308)
+ tdCom.cleanTb()
+ for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # if value_type == "obj":
+ # # binary
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # tdCom.cleanTb()
+ # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # try:
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # tdCom.cleanTb()
+ # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # try:
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+ # elif value_type == "default":
+ # # binary
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # tdCom.cleanTb()
+ # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4094, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ # try:
+ # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ def tagColIllegalValueCheckCase(self, value_type="obj"):
+
+ """
+ test illegal tag col value
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # bool
+ for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
+ try:
+ input_json1 = self.genFullTypeJson(tag_value=self.genTagValue(t0_value=i))[0]
+ self._conn.schemaless_insert([json.dumps(input_json1)], 2, None)
+ input_json2 = self.genFullTypeJson(col_value=self.genTsColValue(value=i, t_type="bool"))[0]
+ self._conn.schemaless_insert([json.dumps(input_json2)], 2, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i8 i16 i32 i64 f32 f64
+ for input_json in [
+ self.genFullTypeJson(tag_value=self.genTagValue(t1_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t2_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t3_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t4_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t5_value="11.1s45"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t6_value="11.1s45"))[0],
+ ]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check binary and nchar blank
+ input_sql1 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="binary", value_type=value_type))[0]
+ input_sql2 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="nchar", value_type=value_type))[0]
+ input_sql3 = self.genFullTypeJson(tag_value=self.genTagValue(t7_value="abc aaa", value_type=value_type))[0]
+ input_sql4 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value="abc aaa", value_type=value_type))[0]
+ for input_json in [input_sql1, input_sql2, input_sql3, input_sql4]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check accepted binary and nchar symbols
+ # # * ~!@#$¥%^&*()-+={}|[]、「」:;
+ for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
+ input_json1 = self.genFullTypeJson(col_value=self.genTsColValue(value=f"abc{symbol}aaa", t_type="binary", value_type=value_type))[0]
+ input_json2 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value=f"abc{symbol}aaa", value_type=value_type))[0]
+ self._conn.schemaless_insert([json.dumps(input_json1)], TDSmlProtocolType.JSON.value, None)
+ self._conn.schemaless_insert([json.dumps(input_json2)], TDSmlProtocolType.JSON.value, None)
+
+ def duplicateIdTagColInsertCheckCase(self, value_type="obj"):
+ """
+ check duplicate Id Tag Col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0]
+ print(input_json)
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=11.12345027923584, t6_type="float", t6_value=22.12345027923584, value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json).replace("t6", "t5")], 2, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ ##### stb exist #####
+ def noIdStbExistCheckCase(self, value_type="obj"):
+ """
+ case no id when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name, condition='where tbname like "t_%"')
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+
+ def duplicateInsertExistCheckCase(self, value_type="obj"):
+ """
+ check duplicate insert when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name)
+
+ def tagColBinaryNcharLengthCheckCase(self, value_type="obj"):
+ """
+ check length increase
+ """
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue", value_type=value_type))
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"')
+
+ def lengthIcreaseCrashCheckCase(self):
+ """
+ check length increase
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = "test_crash"
+ input_json = self.genFullTypeJson(stb_name=stb_name)[0]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ os.system('python3 query/schemalessQueryCrash.py &')
+ time.sleep(2)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue"))
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ time.sleep(3)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def tagColAddDupIDCheckCase(self, value_type="obj"):
+ """
+ check tag count add, stb and tb duplicate
+ * tag: alter table ...
+ * col: when update==0 and ts is same, unchange
+ * so this case tag&&value will be added,
+ * col is added without value when update==0
+ * col is added with value when update==1
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ for db_update_tag in [0, 1]:
+ if db_update_tag == 1 :
+ self.createDb("test_update", db_update_tag=db_update_tag)
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=False, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True)
+ if db_update_tag == 1 :
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ else:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ self.createDb()
+
+ def tagAddCheckCase(self, value_type="obj"):
+ """
+ check tag count add
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tb_name_1 = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name_1, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True)
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name_1}"')
+ res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0]
+ tdSql.checkEqual(res_row_list[0], ['None', 'None'])
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+
+ def tagMd5Check(self, value_type="obj"):
+ """
+ condition: stb not change
+ insert two table, keep tag unchange, change col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
+ self.resCmp(input_json, stb_name)
+ tb_name1 = self.getNoIdTbName(stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
+ self.resCmp(input_json, stb_name)
+ tb_name2 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+ tdSql.checkEqual(tb_name1, tb_name2)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True, t_add_tag=True)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tb_name3 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ tdSql.checkNotEqual(tb_name1, tb_name3)
+
+ # * tag binary max is 16384, col+ts binary max 49151
+ def tagColBinaryMaxLengthCheckCase(self, value_type="obj"):
+ """
+ every binary and nchar must be length+2
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ tag_value = {"t0": {"value": True, "type": "bool"}}
+ tag_value["id"] = tb_name
+ col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type)
+ input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
+ if value_type == "obj":
+ tag_value["t1"] = {"value": tdCom.getLongName(16374, "letters"), "type": "binary"}
+ tag_value["t2"] = {"value": tdCom.getLongName(5, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t1"] = tdCom.getLongName(16374, "letters")
+ tag_value["t2"] = tdCom.getLongName(5, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t1"] = tdCom.getLongName(4093, "letters")
+ tag_value["t2"] = tdCom.getLongName(1, "letters")
+ tag_value.pop('id')
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ if value_type == "obj":
+ tag_value["t2"] = {"value": tdCom.getLongName(6, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t2"] = tdCom.getLongName(6, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t2"] = tdCom.getLongName(2, "letters")
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ # * tag nchar max is 16374/4, col+ts nchar max 49151
+ def tagColNcharMaxLengthCheckCase(self, value_type="obj"):
+ """
+ check nchar length limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ tag_value = {"t0": True}
+ tag_value["id"] = tb_name
+ col_value= True
+ input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # * legal nchar could not be larger than 16374/4
+ if value_type == "obj":
+ tag_value["t1"] = {"value": tdCom.getLongName(4093, "letters"), "type": "nchar"}
+ tag_value["t2"] = {"value": tdCom.getLongName(1, "letters"), "type": "nchar"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t1"] = tdCom.getLongName(16374, "letters")
+ tag_value["t2"] = tdCom.getLongName(5, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t1"] = tdCom.getLongName(4093, "letters")
+ tag_value["t2"] = tdCom.getLongName(1, "letters")
+ tag_value.pop('id')
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ if value_type == "obj":
+ tag_value["t2"] = {"value": tdCom.getLongName(2, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t2"] = tdCom.getLongName(6, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t2"] = tdCom.getLongName(2, "letters")
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def batchInsertCheckCase(self, value_type="obj"):
+ """
+ test batch insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = "stb_name"
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": {"value": 2, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": {"value": 3, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}},
+ {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 4, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t2": {"value": 5, "type": "double"}, "t3": {"value": "t4", "type": "binary"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ if value_type != "obj":
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": 1, "tags": {"t1": 3, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": 2, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": 3, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}},
+ {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": 4, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t2": 5.0, "t3": {"value": "t4", "type": "binary"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "double"}, "tags": {"t2": 5.0, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "double"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "double"}, "tags": {"t1": 4, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query('show stables')
+ tdSql.checkRows(3)
+ tdSql.query('show tables')
+ tdSql.checkRows(6)
+ tdSql.query('select * from st123456')
+ tdSql.checkRows(5)
+
+ def multiInsertCheckCase(self, count, value_type="obj"):
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = list()
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ for i in range(count):
+ input_json = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True)[0]
+ sql_list.append(input_json)
+ self._conn.schemaless_insert([json.dumps(sql_list)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
+
+ def batchErrorInsertCheckCase(self):
+ """
+ test batch error insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiColsInsertCheckCase(self, value_type="obj"):
+ """
+ test multi cols insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankColInsertCheckCase(self, value_type="obj"):
+ """
+ test blank col insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankTagInsertCheckCase(self, value_type="obj"):
+ """
+ test blank tag insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def chineseCheckCase(self):
+ """
+ check nchar ---> chinese
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(chinese_tag=True)
+ self.resCmp(input_json, stb_name)
+
+ def multiFieldCheckCase(self, value_type="obj"):
+ '''
+ multi_field
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def spellCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}},
+ {"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}},
+ {"metric": f'{stb_name}_3', "timestamp": {"value": 1626006833639000002, "type": "NS"}, "value": {"value": 2147483647, "type": "iNt"}, "tags": {"t1": {"value": 2147483647, "type": "iNt"}}},
+ {"metric": f'{stb_name}_4', "timestamp": {"value": 1626006833639019, "type": "Us"}, "value": {"value": 9223372036854775807, "type": "bigInt"}, "tags": {"t1": {"value": 9223372036854775807, "type": "bigInt"}}},
+ {"metric": f'{stb_name}_5', "timestamp": {"value": 1626006833639018, "type": "uS"}, "value": {"value": 11.12345027923584, "type": "flOat"}, "tags": {"t1": {"value": 11.12345027923584, "type": "flOat"}}},
+ {"metric": f'{stb_name}_6', "timestamp": {"value": 1626006833639017, "type": "US"}, "value": {"value": 22.123456789, "type": "douBle"}, "tags": {"t1": {"value": 22.123456789, "type": "douBle"}}},
+ {"metric": f'{stb_name}_7', "timestamp": {"value": 1626006833640, "type": "Ms"}, "value": {"value": "vozamcts", "type": "binaRy"}, "tags": {"t1": {"value": "vozamcts", "type": "binaRy"}}},
+ {"metric": f'{stb_name}_8', "timestamp": {"value": 1626006833641, "type": "mS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
+ {"metric": f'{stb_name}_9', "timestamp": {"value": 1626006833642, "type": "MS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
+ {"metric": f'{stb_name}_10', "timestamp": {"value": 1626006834, "type": "S"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}]
+
+ for input_sql in input_json_list:
+ stb_name = input_sql["metric"]
+ self.resCmp(input_sql, stb_name)
+
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ query_sql = 'select * from `rFa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), True, 'rFas$ta_1', 'ncharTagValue', 2147483647, 9223372036854775807, 22.123456789, 'binaryTagValue', 32767, 11.12345027923584, False, 127)])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['_ts', '_value', 'id', 't!@#$%^&*()_+[];:<>?,9', 't$3', 't%4', 't&6', 't*7', 't@2', 't^5', 'Tt!0', 'tT@1'])
+ tdSql.execute('drop table `rFa$sta`')
+
+ def pointTransCheckCase(self, value_type="obj"):
+ """
+ metric value "." trans to "_"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.execute("drop table `.point.trans.test`")
+
+ def genSqlList(self, count=5, stb_name="", tb_name="", value_type="obj"):
+ """
+ stb --> supertable
+ tb --> table
+ ts --> timestamp, same default
+ col --> column, same default
+ tag --> tag, same default
+ d --> different
+ s --> same
+ a --> add
+ m --> minus
+ """
+ d_stb_d_tb_list = list()
+ s_stb_s_tb_list = list()
+ s_stb_s_tb_a_tag_list = list()
+ s_stb_s_tb_m_tag_list = list()
+ s_stb_d_tb_list = list()
+ s_stb_d_tb_m_tag_list = list()
+ s_stb_d_tb_a_tag_list = list()
+ s_stb_s_tb_d_ts_list = list()
+ s_stb_s_tb_d_ts_m_tag_list = list()
+ s_stb_s_tb_d_ts_a_tag_list = list()
+ s_stb_d_tb_d_ts_list = list()
+ s_stb_d_tb_d_ts_m_tag_list = list()
+ s_stb_d_tb_d_ts_a_tag_list = list()
+ for i in range(count):
+ d_stb_d_tb_list.append(self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)))
+ s_stb_s_tb_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type)))
+ s_stb_s_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_add_tag=True))
+ s_stb_s_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_mul_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True))
+ s_stb_d_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_mul_tag=True))
+ s_stb_d_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_add_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns")))
+ s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_mul_tag=True))
+ s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_add_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(1626006833639000000, "ns")))
+ s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_mul_tag=True))
+ s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_add_tag=True))
+
+ return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \
+ s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \
+ s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \
+ s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list
+
+ def genMultiThreadSeq(self, sql_list):
+ tlist = list()
+ for insert_sql in sql_list:
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([json.dumps(insert_sql[0])], TDSmlProtocolType.JSON.value, None))
+ tlist.append(t)
+ return tlist
+
+ def multiThreadRun(self, tlist):
+ for t in tlist:
+ t.start()
+ for t in tlist:
+ t.join()
+
+ def stbInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input different stb
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genSqlList(value_type=value_type)[0]
+ self.multiThreadRun(self.genMultiThreadSeq(input_json))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[1]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, add columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[2]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataMtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, minus columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[3]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbDtbDdataInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, different data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "vqowydbc", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "plgkckpv", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "cujyqvlj", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "twjxisat", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz')]
+
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(2)
+
+ def sStbDtbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, different data, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_list = [({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "hkgjiwdj", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "vozamcts", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "rljjrrul", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "bmcanhbs", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "basanglx", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "enqkyvmb", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "clsajzpp", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "eivaegjk", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "jitwseso", "tags": {"id": tb_name, "t0": {"value": True, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "yhlwkddq", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rhnikvfq', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 'id': tb_name}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name} where t8 is not NULL")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_a_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'tclbosqc', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ for t in ["t10", "t11"]:
+ tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
+ tdSql.checkRows(0)
+
+ def sStbDtbDdataDtsInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, data, ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'llqzvgvw', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rlpuzodt', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(3)
+
+ def test(self):
+ try:
+ input_json = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64'
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # input_json, stb_name = self.genFullTypeJson()
+ # self.resCmp(input_json, stb_name)
+ except SchemalessError as err:
+ print(err.errno)
+
+ def runAll(self):
+ for value_type in ["obj", "default"]:
+ self.initCheckCase(value_type)
+ self.symbolsCheckCase(value_type)
+ # self.tsCheckCase(value_type)
+ self.idSeqCheckCase(value_type)
+ self.idLetterCheckCase(value_type)
+ self.noIdCheckCase(value_type)
+ self.maxColTagCheckCase(value_type)
+ self.idIllegalNameCheckCase(value_type)
+ self.idStartWithNumCheckCase(value_type)
+ self.nowTsCheckCase(value_type)
+ self.dateFormatTsCheckCase(value_type)
+ self.illegalTsCheckCase(value_type)
+ self.tbnameCheckCase(value_type)
+ # self.tagValueLengthCheckCase(value_type)
+ self.colValueLengthCheckCase(value_type)
+ self.tagColIllegalValueCheckCase(value_type)
+ # self.duplicateIdTagColInsertCheckCase(value_type)
+ self.noIdStbExistCheckCase(value_type)
+ self.duplicateInsertExistCheckCase(value_type)
+ # self.tagColBinaryNcharLengthCheckCase(value_type)
+ # self.tagColAddDupIDCheckCase(value_type)
+ # self.tagAddCheckCase(value_type)
+ # self.tagMd5Check(value_type)
+ # self.tagColBinaryMaxLengthCheckCase(value_type)
+ # self.tagColNcharMaxLengthCheckCase(value_type)
+ # self.batchInsertCheckCase(value_type)
+ # self.multiInsertCheckCase(10, value_type)
+ self.multiColsInsertCheckCase(value_type)
+ self.blankColInsertCheckCase(value_type)
+ self.blankTagInsertCheckCase(value_type)
+ self.multiFieldCheckCase(value_type)
+ # self.stbInsertMultiThreadCheckCase(value_type)
+ self.pointTransCheckCase(value_type)
+ self.tagNameLengthCheckCase()
+ self.boolTypeCheckCase()
+ self.batchErrorInsertCheckCase()
+ self.chineseCheckCase()
+ # self.spellCheckCase()
+ self.tbnameTagsColsNameCheckCase()
+ # # MultiThreads
+ # self.sStbStbDdataInsertMultiThreadCheckCase()
+ # self.sStbStbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase()
+ # self.lengthIcreaseCrashCheckCase()
+
+ def run(self):
+ print("running {}".format(__file__))
+ self.createDb()
+ try:
+ self.runAll()
+ except Exception as err:
+ print(''.join(traceback.format_exception(None, err, err.__traceback__)))
+ raise err
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
new file mode 100644
index 0000000000000000000000000000000000000000..23404330ed450bca999dc593b0675d4eb7d54eb0
--- /dev/null
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -0,0 +1,1489 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import traceback
+import random
+from taos.error import SchemalessError
+import time
+import numpy as np
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+from util.types import TDSmlProtocolType, TDSmlTimestampType
+import threading
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ self._conn = conn
+ self.smlChildTableName_value = "id"
+
+ def createDb(self, name="test", db_update_tag=0, protocol=None):
+ if protocol == "telnet-tcp":
+ name = "opentsdb_telnet"
+
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'us' schemaless 1")
+ else:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ns' update 1 schemaless 1")
+ tdSql.execute(f'use {name}')
+
+ def timeTrans(self, time_value, ts_type):
+ if int(time_value) == 0:
+ ts = time.time()
+ else:
+ if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
+ elif ts_type == TDSmlTimestampType.SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1
+ ulsec = repr(ts).split('.')[1][:6]
+ if len(ulsec) < 6 and int(ulsec) != 0:
+ ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
+ elif int(ulsec) == 0:
+ ulsec *= 6
+ # * follow two rows added for tsCheckCase
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ return td_ts
+ #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
+ return td_ts
+ #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
+
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
+
+ def getTdTypeValue(self, value, vtype="col"):
+ if vtype == "col":
+ if value.lower().endswith("i8"):
+ td_type = "TINYINT"
+ td_tag_value = ''.join(list(value)[:-2])
+ elif value.lower().endswith("i16"):
+ td_type = "SMALLINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i32"):
+ td_type = "INT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i64"):
+ td_type = "BIGINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("u64"):
+ td_type = "BIGINT UNSIGNED"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("f32"):
+ td_type = "FLOAT"
+ td_tag_value = ''.join(list(value)[:-3])
+ td_tag_value = '{}'.format(np.float32(td_tag_value))
+ elif value.lower().endswith("f64"):
+ td_type = "DOUBLE"
+ td_tag_value = ''.join(list(value)[:-3])
+ if "e" in value.lower():
+ td_tag_value = str(float(td_tag_value))
+ elif value.lower().startswith('l"'):
+ td_type = "NCHAR"
+ td_tag_value = ''.join(list(value)[2:-1])
+ elif value.startswith('"') and value.endswith('"'):
+ td_type = "BINARY"
+ td_tag_value = ''.join(list(value)[1:-1])
+ elif value.lower() == "t" or value.lower() == "true":
+ td_type = "BOOL"
+ td_tag_value = "True"
+ elif value.lower() == "f" or value.lower() == "false":
+ td_type = "BOOL"
+ td_tag_value = "False"
+ elif value.isdigit():
+ td_type = "DOUBLE"
+ td_tag_value = str(float(value))
+ else:
+ td_type = "DOUBLE"
+ if "e" in value.lower():
+ td_tag_value = str(float(value))
+ else:
+ td_tag_value = value
+ elif vtype == "tag":
+ td_type = "NCHAR"
+ td_tag_value = str(value)
+ return td_type, td_tag_value
+
+ def typeTrans(self, type_list):
+ type_num_list = []
+ for tp in type_list:
+ if tp.upper() == "TIMESTAMP":
+ type_num_list.append(9)
+ elif tp.upper() == "BOOL":
+ type_num_list.append(1)
+ elif tp.upper() == "TINYINT":
+ type_num_list.append(2)
+ elif tp.upper() == "SMALLINT":
+ type_num_list.append(3)
+ elif tp.upper() == "INT":
+ type_num_list.append(4)
+ elif tp.upper() == "BIGINT":
+ type_num_list.append(5)
+ elif tp.upper() == "FLOAT":
+ type_num_list.append(6)
+ elif tp.upper() == "DOUBLE":
+ type_num_list.append(7)
+ elif tp.upper() == "BINARY":
+ type_num_list.append(8)
+ elif tp.upper() == "NCHAR":
+ type_num_list.append(10)
+ elif tp.upper() == "BIGINT UNSIGNED":
+ type_num_list.append(14)
+ return type_num_list
+
+ def inputHandle(self, input_sql, ts_type, protocol=None):
+ input_sql_split_list = input_sql.split(" ")
+ if protocol == "telnet-tcp":
+ input_sql_split_list.pop(0)
+ stb_name = input_sql_split_list[0]
+ stb_tag_list = input_sql_split_list[3:]
+ stb_tag_list[-1] = stb_tag_list[-1].strip()
+ stb_col_value = input_sql_split_list[2]
+ ts_value = self.timeTrans(input_sql_split_list[1], ts_type)
+
+ tag_name_list = []
+ tag_value_list = []
+ td_tag_value_list = []
+ td_tag_type_list = []
+
+ col_name_list = []
+ col_value_list = []
+ td_col_value_list = []
+ td_col_type_list = []
+
+ for elm in stb_tag_list:
+ if self.smlChildTableName_value == "ID":
+ if "id=" in elm.lower():
+ tb_name = elm.split('=')[1]
+ else:
+ tag_name_list.append(elm.split("=")[0].lower())
+ tag_value_list.append(elm.split("=")[1])
+ tb_name = ""
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+ else:
+ if "id" == elm.split("=")[0].lower():
+ tag_name_list.insert(0, elm.split("=")[0])
+ tag_value_list.insert(0, elm.split("=")[1])
+ td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+ else:
+ tag_name_list.append(elm.split("=")[0])
+ tag_value_list.append(elm.split("=")[1])
+ tb_name = ""
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+
+ col_name_list.append('_value')
+ col_value_list.append(stb_col_value)
+
+ td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1])
+ td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0])
+
+ final_field_list = []
+ final_field_list.extend(col_name_list)
+ final_field_list.extend(tag_name_list)
+
+ final_type_list = []
+ final_type_list.append("TIMESTAMP")
+ final_type_list.extend(td_col_type_list)
+ final_type_list.extend(td_tag_type_list)
+ final_type_list = self.typeTrans(final_type_list)
+
+ final_value_list = []
+ final_value_list.append(ts_value)
+ final_value_list.extend(td_col_value_list)
+ final_value_list.extend(td_tag_value_list)
+ return final_value_list, final_field_list, final_type_list, stb_name, tb_name
+
+ def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32",
+ t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
+ t8="L\"ncharTagValue\"", ts="1626006833641",
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None):
+ if stb_name == "":
+ stb_name = tdCom.getLongName(len=6, mode="letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if t0 == "":
+ t0 = "t"
+ if value == "":
+ value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"])
+ if id_upper_tag is not None:
+ id = "ID"
+ else:
+ id = "id"
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if id_noexist_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_add_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}'
+ if id_change_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if id_double_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_add_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}'
+ if t_mul_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if id_noexist_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if c_multi_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if c_blank_tag is not None:
+ sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_blank_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value}'
+ if chinese_tag is not None:
+ sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"'
+ if multi_field_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}'
+ if point_trans_tag is not None:
+ sql_seq = f'.point.trans.test {ts} {value} t0={t0}'
+ if tcp_keyword_tag is not None:
+ sql_seq = f'put {ts} {value} t0={t0}'
+ if protocol == "telnet-tcp":
+ sql_seq = 'put ' + sql_seq + '\n'
+ return sql_seq, stb_name
+
+ def genMulTagColStr(self, genType, count=1):
+ """
+ genType must be tag/col
+ """
+ tag_str = ""
+ col_str = ""
+ if genType == "tag":
+ for i in range(0, count):
+ if i < (count-1):
+ tag_str += f't{i}=f '
+ else:
+ tag_str += f't{i}=f'
+ return tag_str
+ if genType == "col":
+ col_str = "t"
+ return col_str
+
+ def genLongSql(self, tag_count):
+ stb_name = tdCom.getLongName(7, mode="letters")
+ tag_str = self.genMulTagColStr("tag", tag_count)
+ col_str = self.genMulTagColStr("col")
+ ts = "1626006833641"
+ long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str
+ return long_sql, stb_name
+
+ def getNoIdTbName(self, stb_name, protocol=None):
+ query_sql = f"select tbname from {stb_name}"
+ tb_name = self.resHandle(query_sql, True, protocol)[0][0]
+ return tb_name
+
+ def resHandle(self, query_sql, query_tag, protocol=None):
+ tdSql.execute('reset query cache')
+ if protocol == "telnet-tcp":
+ time.sleep(0.5)
+ row_info = tdSql.query(query_sql, query_tag)
+ col_info = tdSql.getColNameList(query_sql, query_tag)
+ res_row_list = []
+ sub_list = []
+ for row_mem in row_info:
+ for i in row_mem:
+ sub_list.append(str(i))
+ res_row_list.append(sub_list)
+ res_field_list_without_ts = col_info[0][1:]
+ res_type_list = col_info[1]
+ return res_row_list, res_field_list_without_ts, res_type_list
+
+ def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None, protocol=None):
+ expect_list = self.inputHandle(input_sql, ts_type, protocol)
+ if protocol == "telnet-tcp":
+ tdCom.tcpClient(input_sql)
+ else:
+ if precision == None:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision)
+ query_sql = f"{query_sql} {stb_name} {condition}"
+ res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True, protocol)
+ if ts == 0:
+ res_ts = self.dateToTs(res_row_list[0][0])
+ current_time = time.time()
+ if current_time - res_ts < 60:
+ tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:])
+ else:
+ print("timeout")
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ else:
+ if none_check_tag is not None:
+ none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"]
+ none_index_list.reverse()
+ for j in none_index_list:
+ res_row_list[0].pop(j)
+ expect_list[0].pop(j)
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ tdSql.checkEqual(res_field_list_without_ts, expect_list[1])
+ for i in range(len(res_type_list)):
+ tdSql.checkEqual(res_type_list[i], expect_list[2][i])
+
+ def initCheckCase(self, protocol=None):
+ """
+ normal tags and cols, one for every elm
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def boolTypeCheckCase(self, protocol=None):
+ """
+ check all normal type
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
+ for t_type in full_type_list:
+ input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def symbolsCheckCase(self, protocol=None):
+ """
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ """
+ '''
+ please test :
+ binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
+ nchar_symbols = f'L{binary_symbols}'
+ input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
+ input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
+ self.resCmp(input_sql1, stb_name1, protocol=protocol)
+ self.resCmp(input_sql2, stb_name2, protocol=protocol)
+
+ def tsCheckCase(self):
+ """
+ test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=None)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006834)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms' schemaless 1")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ def openTstbTelnetTsCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts=0)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+ for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]:
+ try:
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idSeqCheckCase(self, protocol=None):
+ """
+ check id.index in tags
+ eg: t0=**,id=**,t1=**
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def idLetterCheckCase(self, protocol=None):
+ """
+ check id param
+ eg: id and ID
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def noIdCheckCase(self, protocol=None):
+ """
+ id not exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ query_sql = f"select tbname from {stb_name}"
+ res_row_list = self.resHandle(query_sql, True)[0]
+ if len(res_row_list[0][0]) > 0:
+ tdSql.checkColNameList(res_row_list, res_row_list)
+ else:
+ tdSql.checkColNameList(res_row_list, "please check noIdCheckCase")
+
+ def maxColTagCheckCase(self):
+ """
+ max tag count is 128
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ for input_sql in [self.genLongSql(128)[0]]:
+ tdCom.cleanTb()
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ for input_sql in [self.genLongSql(129)[0]]:
+ tdCom.cleanTb()
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def stbTbNameCheckCase(self, protocol=None):
+ """
+ test illegal id name
+ mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?")
+ for i in rstr:
+ input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol)
+ self.resCmp(input_sql, f'`{stb_name}`', protocol=protocol)
+ tdSql.execute(f'drop table if exists `{stb_name}`')
+
+ def idStartWithNumCheckCase(self, protocol=None):
+ """
+ id is start with num
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def nowTsCheckCase(self):
+ """
+ check now unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="now")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def dateFormatTsCheckCase(self):
+ """
+ check date format ts unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def illegalTsCheckCase(self):
+ """
+ check ts format like 16260068336390us19
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tbnameCheckCase(self):
+ """
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
+ self.resCmp(input_sql, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ if self.smlChildTableName_value == "ID":
+ for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd'
+ else:
+ input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_sql = 'Abcdffgg 1626006833640 False T1=127i8'
+ stb_name = f'`{input_sql.split(" ")[0]}`'
+ self.resCmp(input_sql, stb_name)
+ tdSql.execute('drop table `Abcdffgg`')
+
+ def tagNameLengthCheckCase(self):
+ """
+ check tag name limit <= 62
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tag_name = tdCom.getLongName(61, "letters")
+ tag_name = f'T{tag_name}'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f'
+ self.resCmp(input_sql, stb_name)
+ input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # nchar
+ # * legal nchar could not be larger than 16374/4
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def colValueLengthCheckCase(self):
+ """
+ check full type col value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for value in ["-128i8", "127i8"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-129i8", "128i8"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # i16
+ tdCom.cleanTb()
+ for value in ["-32768i16"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-32769i16", "32768i16"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i32
+ tdCom.cleanTb()
+ for value in ["-2147483648i32"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-2147483649i32", "2147483648i32"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i64
+ tdCom.cleanTb()
+ for value in ["-9223372036854775808i64"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-9223372036854775809i64", "9223372036854775808i64"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ tdCom.cleanTb()
+ for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 4028234664*(10**38)
+ tdCom.cleanTb()
+ for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ tdCom.cleanTb()
+ for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ # # * limit set to 1.797693134862316*(10**308)
+ # tdCom.cleanTb()
+ # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
+ # input_sql = self.genFullTypeSql(value=value)[0]
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # # binary
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # tdCom.cleanTb()
+ # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # tdCom.cleanTb()
+ # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ def tagColIllegalValueCheckCase(self):
+
+ """
+ test illegal tag col value
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # bool
+ for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
+ input_sql1, stb_name = self.genFullTypeSql(t0=i)
+ self.resCmp(input_sql1, stb_name)
+ input_sql2, stb_name = self.genFullTypeSql(value=i)
+ self.resCmp(input_sql2, stb_name)
+
+ # i8 i16 i32 i64 f32 f64
+ for input_sql in [
+ self.genFullTypeSql(value="1s2i8")[0],
+ self.genFullTypeSql(value="1s2i16")[0],
+ self.genFullTypeSql(value="1s2i32")[0],
+ self.genFullTypeSql(value="1s2i64")[0],
+ self.genFullTypeSql(value="11.1s45f32")[0],
+ self.genFullTypeSql(value="11.1s45f64")[0],
+ ]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check accepted binary and nchar symbols
+ # # * ~!@#$¥%^&*()-+={}|[]、「」:;
+ for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
+ input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t'
+ input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"'
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None)
+ # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None)
+
+ def blankCheckCase(self):
+ '''
+ check blank case
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" ']
+ input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
+ f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(" ")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+
+ def duplicateIdTagColInsertCheckCase(self):
+ """
+ check duplicate Id Tag Col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_sql = self.genFullTypeSql()[0]
+ input_sql_tag = input_sql.replace("t5", "t6")
+ try:
+ self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ ##### stb exist #####
+ @tdCom.smlPass
+ def noIdStbExistCheckCase(self):
+ """
+ case no id when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f")
+ self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"')
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def duplicateInsertExistCheckCase(self):
+ """
+ check duplicate insert when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ self.resCmp(input_sql, stb_name)
+
+ @tdCom.smlPass
+ def tagColBinaryNcharLengthCheckCase(self):
+ """
+ check length increase
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"")
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
+
+ @tdCom.smlPass
+ def tagColAddDupIDCheckCase(self):
+ """
+ check tag count add, stb and tb duplicate
+ * tag: alter table ...
+ * col: when update==0 and ts is same, unchange
+ * so this case tag&&value will be added,
+ * col is added without value when update==0
+ * col is added with value when update==1
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ for db_update_tag in [0, 1]:
+ if db_update_tag == 1 :
+ self.createDb("test_update", db_update_tag=db_update_tag)
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t")
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True)
+ if db_update_tag == 1 :
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ self.createDb()
+
+ @tdCom.smlPass
+ def tagColAddCheckCase(self):
+ """
+ check tag count add
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
+ self.resCmp(input_sql, stb_name)
+ tb_name_1 = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True)
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"')
+ res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0]
+ tdSql.checkEqual(res_row_list[0], ['None', 'None'])
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+
+ def tagMd5Check(self):
+ """
+ condition: stb not change
+ insert two table, keep tag unchange, change col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name1 = self.getNoIdTbName(stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name2 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+ tdSql.checkEqual(tb_name1, tb_name2)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tb_name3 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ tdSql.checkNotEqual(tb_name1, tb_name3)
+
+ # * tag nchar max is 16374/4, col+ts nchar max 49151
+ def tagColNcharMaxLengthCheckCase(self):
+ """
+ check nchar length limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # * legal nchar could not be larger than 16374/4
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def batchInsertCheckCase(self):
+ """
+ test batch insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+
+ lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
+ "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"',
+ "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"",
+ f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"',
+ f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"',
+ "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64"
+ ]
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value)
+ tdSql.query('show stables')
+ tdSql.checkRows(3)
+ tdSql.query('show tables')
+ tdSql.checkRows(6)
+ tdSql.query('select * from st123456')
+ tdSql.checkRows(5)
+
+ def multiInsertCheckCase(self, count):
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = []
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
+ for i in range(count):
+ input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
+ sql_list.append(input_sql)
+ self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
+
+ def batchErrorInsertCheckCase(self):
+ """
+ test batch error insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
+ f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
+ try:
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiColsInsertCheckCase(self):
+ """
+ test multi cols insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankColInsertCheckCase(self):
+ """
+ test blank col insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankTagInsertCheckCase(self):
+ """
+ test blank tag insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def chineseCheckCase(self):
+ """
+ check nchar ---> chinese
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
+ self.resCmp(input_sql, stb_name)
+
+ def multiFieldCheckCase(self):
+ '''
+ multi_field
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def spellCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(' ')[0]
+ self.resCmp(input_sql, stb_name)
+
+ def pointTransCheckCase(self, protocol=None):
+ """
+ metric value "." trans to "_"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0]
+ if protocol == 'telnet-tcp':
+ stb_name = f'`{input_sql.split(" ")[1]}`'
+ else:
+ stb_name = f'`{input_sql.split(" ")[0]}`'
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ tdSql.execute("drop table `.point.trans.test`")
+
+ def defaultTypeCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name)
+
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ if self.smlChildTableName_value == "ID":
+ input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ query_sql = 'select * from `rFa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['ts', '_value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9'])
+ tdSql.execute('drop table `rFa$sta`')
+ else:
+ input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ query_sql = 'select * from `rFa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['_ts', '_value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1'])
+ tdSql.execute('drop table `rFa$sta`')
+
+ def tcpKeywordsCheckCase(self, protocol="telnet-tcp"):
+ """
+ stb = "put"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0]
+ stb_name = f'`{input_sql.split(" ")[1]}`'
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def genSqlList(self, count=5, stb_name="", tb_name=""):
+ """
+ stb --> supertable
+ tb --> table
+ ts --> timestamp, same default
+ col --> column, same default
+ tag --> tag, same default
+ d --> different
+ s --> same
+ a --> add
+ m --> minus
+ """
+ d_stb_d_tb_list = list()
+ s_stb_s_tb_list = list()
+ s_stb_s_tb_a_tag_list = list()
+ s_stb_s_tb_m_tag_list = list()
+ s_stb_d_tb_list = list()
+ s_stb_d_tb_m_tag_list = list()
+ s_stb_d_tb_a_tag_list = list()
+ s_stb_s_tb_d_ts_list = list()
+ s_stb_s_tb_d_ts_m_tag_list = list()
+ s_stb_s_tb_d_ts_a_tag_list = list()
+ s_stb_d_tb_d_ts_list = list()
+ s_stb_d_tb_d_ts_m_tag_list = list()
+ s_stb_d_tb_d_ts_a_tag_list = list()
+ for i in range(count):
+ d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f"))
+ s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"'))
+ s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True))
+ s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True))
+ s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True))
+ s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0))
+ s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True))
+ s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0))
+ s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True))
+ s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True))
+
+ return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \
+ s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \
+ s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \
+ s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list
+
+
+ def genMultiThreadSeq(self, sql_list):
+ tlist = list()
+ for insert_sql in sql_list:
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None))
+ tlist.append(t)
+ return tlist
+
+ def multiThreadRun(self, tlist):
+ for t in tlist:
+ t.start()
+ for t in tlist:
+ t.join()
+
+ def stbInsertMultiThreadCheckCase(self):
+ """
+ thread input different stb
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genSqlList()[0]
+ print(input_sql)
+ self.multiThreadRun(self.genMultiThreadSeq(input_sql))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+
+ def sStbStbDdataAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, add columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+
+ def sStbStbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, minus columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+
+ def sStbDtbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(3)
+
+ def sStbDtbDdataAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name} where t8 is not NULL")
+ tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1)
+
+ def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ for t in ["t10", "t11"]:
+ tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
+ tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5)
+
+ def sStbDtbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def test(self):
+ try:
+ input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ except SchemalessError as err:
+ print(err.errno)
+
+ def runAll(self):
+ self.initCheckCase()
+ self.boolTypeCheckCase()
+ self.symbolsCheckCase()
+ self.tsCheckCase()
+ self.openTstbTelnetTsCheckCase()
+ self.idSeqCheckCase()
+ self.idLetterCheckCase()
+ self.noIdCheckCase()
+ self.maxColTagCheckCase()
+ self.stbTbNameCheckCase()
+ self.idStartWithNumCheckCase()
+ self.nowTsCheckCase()
+ self.dateFormatTsCheckCase()
+ self.illegalTsCheckCase()
+ self.tbnameCheckCase()
+ self.tagNameLengthCheckCase()
+ # self.tagValueLengthCheckCase()
+ self.colValueLengthCheckCase()
+ self.tagColIllegalValueCheckCase()
+ self.blankCheckCase()
+ self.duplicateIdTagColInsertCheckCase()
+ self.noIdStbExistCheckCase()
+ self.duplicateInsertExistCheckCase()
+ self.tagColBinaryNcharLengthCheckCase()
+ self.tagColAddDupIDCheckCase()
+ self.tagColAddCheckCase()
+ self.tagMd5Check()
+ # self.tagColNcharMaxLengthCheckCase()
+ # self.batchInsertCheckCase()
+ # self.multiInsertCheckCase(10)
+ self.batchErrorInsertCheckCase()
+ self.multiColsInsertCheckCase()
+ self.blankColInsertCheckCase()
+ self.blankTagInsertCheckCase()
+ self.chineseCheckCase()
+ self.multiFieldCheckCase()
+ self.spellCheckCase()
+ self.pointTransCheckCase()
+ self.defaultTypeCheckCase()
+ self.tbnameTagsColsNameCheckCase()
+ # # # MultiThreads
+ # self.stbInsertMultiThreadCheckCase()
+ # self.sStbStbDdataInsertMultiThreadCheckCase()
+ # self.sStbStbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsInsertMultiThreadCheckCase()
+ # # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase()
+
+ def run(self):
+ print("running {}".format(__file__))
+
+ try:
+ self.createDb()
+ self.runAll()
+ # self.createDb(protocol="telnet-tcp")
+ # self.initCheckCase('telnet-tcp')
+ # self.boolTypeCheckCase('telnet-tcp')
+ # self.symbolsCheckCase('telnet-tcp')
+ # self.idSeqCheckCase('telnet-tcp')
+ # self.idLetterCheckCase('telnet-tcp')
+ # self.noIdCheckCase('telnet-tcp')
+ # self.stbTbNameCheckCase('telnet-tcp')
+ # self.idStartWithNumCheckCase('telnet-tcp')
+ # self.pointTransCheckCase('telnet-tcp')
+ # self.tcpKeywordsCheckCase()
+ except Exception as err:
+ print(''.join(traceback.format_exception(None, err, err.__traceback__)))
+ raise err
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/performanceInsert.json b/tests/system-test/1-insert/performanceInsert.json
new file mode 100644
index 0000000000000000000000000000000000000000..de410c30f2fa1846d0318def447d1d09aff2cfea
--- /dev/null
+++ b/tests/system-test/1-insert/performanceInsert.json
@@ -0,0 +1,79 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos/",
+ "host": "test216",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 8,
+ "thread_count_create_tbl": 8,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 1000,
+ "num_of_records_per_req": 100000,
+ "databases": [
+ {
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "vgroups": 24
+ },
+ "super_tables": [
+ {
+ "name": "stb",
+ "child_table_exists": "no",
+ "childtable_count": 100000,
+ "childtable_prefix": "stb_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 50000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 5,
+ "interlace_rows": 100000,
+ "insert_interval": 0,
+ "max_sql_len": 10000000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "2022-05-01 00:00:00.000",
+ "sample_format": "csv",
+ "use_sample_ts": "no",
+ "tags_file": "",
+ "columns": [
+ {
+ "type": "INT"
+ },
+ {
+ "type": "TINYINT",
+ "count": 1
+ },
+ {"type": "DOUBLE"},
+
+ {
+ "type": "BINARY",
+ "len": 40,
+ "count": 1
+ },
+ {
+ "type": "nchar",
+ "len": 20,
+ "count": 1
+ }
+ ],
+ "tags": [
+ {
+ "type": "TINYINT",
+ "count": 1
+ },
+ {
+ "type": "BINARY",
+ "len": 16,
+ "count": 1
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tests/system-test/1-insert/performanceQuery.json b/tests/system-test/1-insert/performanceQuery.json
new file mode 100644
index 0000000000000000000000000000000000000000..fe2991bd0f5f74401b437e24b6a6f8e4cd5ed721
--- /dev/null
+++ b/tests/system-test/1-insert/performanceQuery.json
@@ -0,0 +1,42 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "test216",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": 100,
+ "query_mode": "taosc",
+ "specified_table_query": {
+ "query_interval": 0,
+ "threads": 8,
+ "sqls": [
+ {
+ "sql": "select count(*) from stb_0 ",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select last_row(*) from stb_1 ",
+ "result": "./query_res1.txt"
+ },
+ {
+ "sql": "select last(*) from stb_2 ",
+ "result": "./query_res2.txt"
+ },
+ {
+ "sql": "select first(*) from stb_3 ",
+ "result": "./query_res3.txt"
+ },
+ {
+ "sql": "select avg(c0),min(c2),max(c1) from stb_4",
+ "result": "./query_res4.txt"
+ },
+ {
+ "sql": "select avg(c0),min(c2),max(c1) from stb_5 where ts <= '2022-05-01 20:00:00.500' and ts >= '2022-05-01 00:00:00.000' ",
+ "result": "./query_res5.txt"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/tests/system-test/1-insert/test_stmt_insert_query_ex.py b/tests/system-test/1-insert/test_stmt_insert_query_ex.py
new file mode 100644
index 0000000000000000000000000000000000000000..376b60d615941323bedcf40d591817e30c8da05a
--- /dev/null
+++ b/tests/system-test/1-insert/test_stmt_insert_query_ex.py
@@ -0,0 +1,282 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import threading as thd
+import multiprocessing as mp
+from numpy.lib.function_base import insert
+import taos
+from taos import *
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+import datetime as dt
+from datetime import datetime
+from ctypes import *
+import time
+# constant define
+WAITS = 5 # wait seconds
+
+class TDTestCase:
+ #
+ # --------------- main frame -------------------
+ def caseDescription(self):
+ '''
+ limit and offset keyword function test cases;
+ case1: limit offset base function test
+ case2: offset return valid
+ '''
+ return
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # init
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ # tdSql.prepare()
+ # self.create_tables();
+ self.ts = 1500000000000
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+ # --------------- case -------------------
+
+
+ def newcon(self,host,cfg):
+ user = "root"
+ password = "taosdata"
+ port =6030
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ print(con)
+ return con
+
+ def test_stmt_insert_multi(self,conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_stmt_multi"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+
+ conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+ )
+ # conn.load_table_info("log")
+
+ start = datetime.now()
+ stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+ params = new_multi_binds(16)
+ params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+ params[1].bool((True, None, False))
+ params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+ params[3].tinyint([0, 127, None])
+ params[4].smallint([3, None, 2])
+ params[5].int([3, 4, None])
+ params[6].bigint([3, 4, None])
+ params[7].tinyint_unsigned([3, 4, None])
+ params[8].smallint_unsigned([3, 4, None])
+ params[9].int_unsigned([3, 4, None])
+ params[10].bigint_unsigned([3, 4, None])
+ params[11].float([3, None, 1])
+ params[12].double([3, None, 1.2])
+ params[13].binary(["abc", "dddafadfadfadfadfa", None])
+ params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+ params[15].timestamp([None, None, 1626861392591])
+ # print(type(stmt))
+ stmt.bind_param_batch(params)
+ stmt.execute()
+ end = datetime.now()
+ print("elapsed time: ", end - start)
+ assert stmt.affected_rows == 3
+
+ #query
+ querystmt=conn.statement("select ?,bu from log")
+ queryparam=new_bind_params(1)
+ print(type(queryparam))
+ queryparam[0].binary("ts")
+ querystmt.bind_param(queryparam)
+ querystmt.execute()
+ result=querystmt.use_result()
+ # rows=result.fetch_all()
+ # print( querystmt.use_result())
+
+ # result = conn.query("select * from log")
+ rows=result.fetch_all()
+ # rows=result.fetch_all()
+ print(rows)
+ assert rows[1][0] == "ts"
+ assert rows[0][1] == 3
+
+ #query
+ querystmt1=conn.statement("select * from log where bu < ?")
+ queryparam1=new_bind_params(1)
+ print(type(queryparam1))
+ queryparam1[0].int(4)
+ querystmt1.bind_param(queryparam1)
+ querystmt1.execute()
+ result1=querystmt1.use_result()
+ rows1=result1.fetch_all()
+ assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000"
+ assert rows1[0][10] == 3
+
+
+ stmt.close()
+
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+ def test_stmt_set_tbname_tag(self,conn):
+ dbname = "pytest_taos_stmt_set_tbname_tag"
+
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s PRECISION 'us' " % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp) tags (t1 timestamp, t2 bool,\
+ t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
+ t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)")
+
+ stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
+ values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+ tags = new_bind_params(16)
+ tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds)
+ tags[1].bool(True)
+ tags[2].null()
+ tags[3].tinyint(2)
+ tags[4].smallint(3)
+ tags[5].int(4)
+ tags[6].bigint(5)
+ tags[7].tinyint_unsigned(6)
+ tags[8].smallint_unsigned(7)
+ tags[9].int_unsigned(8)
+ tags[10].bigint_unsigned(9)
+ tags[11].float(10.1)
+ tags[12].double(10.11)
+ tags[13].binary("hello")
+ tags[14].nchar("stmt")
+ tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
+ stmt.set_tbname_tags("tb1", tags)
+ params = new_multi_binds(16)
+ params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111))
+ params[1].bool((True, None, False))
+ params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+ params[3].tinyint([0, 127, None])
+ params[4].smallint([3, None, 2])
+ params[5].int([3, 4, None])
+ params[6].bigint([3, 4, None])
+ params[7].tinyint_unsigned([3, 4, None])
+ params[8].smallint_unsigned([3, 4, None])
+ params[9].int_unsigned([3, 4, None])
+ params[10].bigint_unsigned([3, 4, 5])
+ params[11].float([3, None, 1])
+ params[12].double([3, None, 1.2])
+ params[13].binary(["abc", "dddafadfadfadfadfa", None])
+ params[14].nchar(["涛思数据", None, "a? long string with 中文字符"])
+ params[15].timestamp([None, None, 1626861392591])
+
+ stmt.bind_param_batch(params)
+ stmt.execute()
+
+ assert stmt.affected_rows == 3
+
+ #query
+ querystmt1=conn.statement("select * from log where bu < ?")
+ queryparam1=new_bind_params(1)
+ print(type(queryparam1))
+ queryparam1[0].int(5)
+ querystmt1.bind_param(queryparam1)
+ querystmt1.execute()
+ result1=querystmt1.use_result()
+ rows1=result1.fetch_all()
+ print("1",rows1)
+
+ querystmt2=conn.statement("select abs(?) from log where bu < ?")
+ queryparam2=new_bind_params(2)
+ print(type(queryparam2))
+ queryparam2[0].int(5)
+ queryparam2[1].int(5)
+ querystmt2.bind_param(queryparam2)
+ querystmt2.execute()
+ result2=querystmt2.use_result()
+ rows2=result2.fetch_all()
+ print("2",rows2)
+
+ querystmt3=conn.statement("select abs(?) from log where nn= 'a? long string with 中文字符' ")
+ queryparam3=new_bind_params(1)
+ print(type(queryparam3))
+ queryparam3[0].int(5)
+ querystmt3.bind_param(queryparam3)
+ querystmt3.execute()
+ result3=querystmt3.use_result()
+ rows3=result3.fetch_all()
+ print("3",rows3)
+ # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111"
+ # assert rows1[0][10] == 3
+ # assert rows1[1][10] == 4
+
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ config = buildPath+ "../sim/dnode1/cfg/"
+ host="localhost"
+ connectstmt=self.newcon(host,config)
+ self.test_stmt_insert_multi(connectstmt)
+ connectstmt=self.newcon(host,config)
+ self.test_stmt_set_tbname_tag(connectstmt)
+
+ return
+
+
+# add case with filename
+#
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/1-insert/test_stmt_muti_insert_query.py b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..486bcd806219c73fa344e5422727c46fe03cde5e
--- /dev/null
+++ b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
@@ -0,0 +1,181 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import threading as thd
+import multiprocessing as mp
+from numpy.lib.function_base import insert
+import taos
+from taos import *
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+import datetime as dt
+from datetime import datetime
+from ctypes import *
+import time
+# constant define
+WAITS = 5 # wait seconds
+
+class TDTestCase:
+ #
+ # --------------- main frame -------------------
+ def caseDescription(self):
+ '''
+ limit and offset keyword function test cases;
+ case1: limit offset base function test
+ case2: offset return valid
+ '''
+ return
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # init
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ # tdSql.prepare()
+ # self.create_tables();
+ self.ts = 1500000000000
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+ # --------------- case -------------------
+
+
+ def newcon(self,host,cfg):
+ user = "root"
+ password = "taosdata"
+ port =6030
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ print(con)
+ return con
+
+ def test_stmt_insert_multi(self,conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_stmt_multi"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+
+ conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+ )
+ # conn.load_table_info("log")
+
+ start = datetime.now()
+ stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+ params = new_multi_binds(16)
+ params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+ params[1].bool((True, None, False))
+ params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+ params[3].tinyint([0, 127, None])
+ params[4].smallint([3, None, 2])
+ params[5].int([3, 4, None])
+ params[6].bigint([3, 4, None])
+ params[7].tinyint_unsigned([3, 4, None])
+ params[8].smallint_unsigned([3, 4, None])
+ params[9].int_unsigned([3, 4, None])
+ params[10].bigint_unsigned([3, 4, None])
+ params[11].float([3, None, 1])
+ params[12].double([3, None, 1.2])
+ params[13].binary(["abc", "dddafadfadfadfadfa", None])
+ params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+ params[15].timestamp([None, None, 1626861392591])
+ # print(type(stmt))
+ stmt.bind_param_batch(params)
+ stmt.execute()
+ end = datetime.now()
+ print("elapsed time: ", end - start)
+ assert stmt.affected_rows == 3
+
+ #query
+ querystmt=conn.statement("select ?,bu from log")
+ queryparam=new_bind_params(1)
+ print(type(queryparam))
+ queryparam[0].binary("ts")
+ querystmt.bind_param(queryparam)
+ querystmt.execute()
+ result=querystmt.use_result()
+ # rows=result.fetch_all()
+ # print( querystmt.use_result())
+
+ # result = conn.query("select * from log")
+ rows=result.fetch_all()
+ # rows=result.fetch_all()
+ print(rows)
+ assert rows[1][0] == "ts"
+ assert rows[0][1] == 3
+
+ #query
+ querystmt1=conn.statement("select * from log where bu < ?")
+ queryparam1=new_bind_params(1)
+ print(type(queryparam1))
+ queryparam1[0].int(4)
+ querystmt1.bind_param(queryparam1)
+ querystmt1.execute()
+ result1=querystmt1.use_result()
+ rows1=result1.fetch_all()
+ print(rows1)
+ assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000"
+ assert rows1[0][10] == 3
+
+
+ stmt.close()
+
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ config = buildPath+ "../sim/dnode1/cfg/"
+ host="localhost"
+ connectstmt=self.newcon(host,config)
+ self.test_stmt_insert_multi(connectstmt)
+ return
+
+
+# add case with filename
+#
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
new file mode 100644
index 0000000000000000000000000000000000000000..54d5cfbafb0b3f98d55f310accccb19ef693c08b
--- /dev/null
+++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
@@ -0,0 +1,176 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import threading as thd
+import multiprocessing as mp
+from numpy.lib.function_base import insert
+import taos
+from taos import *
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+import datetime as dt
+from datetime import datetime
+from ctypes import *
+import time
+# constant define
+WAITS = 5 # wait seconds
+
+class TDTestCase:
+ #
+ # --------------- main frame -------------------
+ def caseDescription(self):
+ '''
+ limit and offset keyword function test cases;
+ case1: limit offset base function test
+ case2: offset return valid
+ '''
+ return
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # init
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ # tdSql.prepare()
+ # self.create_tables();
+ self.ts = 1500000000000
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+ # --------------- case -------------------
+
+
+ def newcon(self,host,cfg):
+ user = "root"
+ password = "taosdata"
+ port =6030
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ print(con)
+ return con
+
+ def test_stmt_set_tbname_tag(self,conn):
+ dbname = "pytest_taos_stmt_set_tbname_tag"
+
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s PRECISION 'us' " % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\
+ t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
+ t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)")
+
+ stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
+ values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+ tags = new_bind_params(16)
+ tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds)
+ tags[1].bool(True)
+ tags[2].null()
+ tags[3].tinyint(2)
+ tags[4].smallint(3)
+ tags[5].int(4)
+ tags[6].bigint(5)
+ tags[7].tinyint_unsigned(6)
+ tags[8].smallint_unsigned(7)
+ tags[9].int_unsigned(8)
+ tags[10].bigint_unsigned(9)
+ tags[11].float(10.1)
+ tags[12].double(10.11)
+ tags[13].binary("hello")
+ tags[14].nchar("stmt")
+ tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
+ stmt.set_tbname_tags("tb1", tags)
+ params = new_multi_binds(16)
+ params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111))
+ params[1].bool((True, None, False))
+ params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+ params[3].tinyint([0, 127, None])
+ params[4].smallint([3, None, 2])
+ params[5].int([3, 4, None])
+ params[6].bigint([3, 4, None])
+ params[7].tinyint_unsigned([3, 4, None])
+ params[8].smallint_unsigned([3, 4, None])
+ params[9].int_unsigned([3, 4, None])
+ params[10].bigint_unsigned([3, 4, 5])
+ params[11].float([3, None, 1])
+ params[12].double([3, None, 1.2])
+ params[13].binary(["abc", "dddafadfadfadfadfa", None])
+ params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+ params[15].timestamp([None, None, 1626861392591])
+ params[16].binary(["涛思数据16", None, "a long string with 中文-字符"])
+
+ stmt.bind_param_batch(params)
+ stmt.execute()
+
+ assert stmt.affected_rows == 3
+
+ #query
+ querystmt1=conn.statement("select * from log where bu < ?")
+ queryparam1=new_bind_params(1)
+ print(type(queryparam1))
+ queryparam1[0].int(5)
+ querystmt1.bind_param(queryparam1)
+ querystmt1.execute()
+ result1=querystmt1.use_result()
+ rows1=result1.fetch_all()
+ print(rows1)
+ # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111"
+ # assert rows1[0][10] == 3
+ # assert rows1[1][10] == 4
+
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ # conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ config = buildPath+ "../sim/dnode1/cfg/"
+ host="localhost"
+ connectstmt=self.newcon(host,config)
+ self.test_stmt_set_tbname_tag(connectstmt)
+
+ return
+
+
+# add case with filename
+#
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/2-query/To_iso8601.py b/tests/system-test/2-query/To_iso8601.py
index cd22ffb90c1fbf86e81dfabecbcb1ae0e536cd39..57bcca638ce26aace35d76707c12699fe2e8d1c4 100644
--- a/tests/system-test/2-query/To_iso8601.py
+++ b/tests/system-test/2-query/To_iso8601.py
@@ -95,7 +95,7 @@ class TDTestCase:
# tdSql.query("select to_iso8601(-1) from ntb")
tdSql.query("select to_iso8601(9223372036854775807) from ntb")
tdSql.checkRows(3)
-
+ # bug TD-14896
# tdSql.query("select to_iso8601(10000000000) from ntb")
# tdSql.checkData(0,0,None)
# tdSql.query("select to_iso8601(-1) from ntb")
@@ -106,11 +106,6 @@ class TDTestCase:
tdSql.error("select to_iso8601(1.5) from db.ntb")
tdSql.error("select to_iso8601('a') from ntb")
tdSql.error("select to_iso8601(c2) from ntb")
-
-
-
-
-
tdSql.query("select to_iso8601(now) from stb")
tdSql.query("select to_iso8601(now()) from stb")
tdSql.checkRows(3)
@@ -126,7 +121,7 @@ class TDTestCase:
tdSql.checkRows(3)
tdSql.query("select to_iso8601(ts)+'a' from stb ")
tdSql.checkRows(3)
- # tdSql.query()
+
tdSql.query("select to_iso8601(today()) *null from stb")
tdSql.checkRows(3)
tdSql.checkData(0,0,None)
@@ -152,7 +147,9 @@ class TDTestCase:
tdSql.checkRows(3)
tdSql.checkData(0,0,None)
+ # bug TD-14896
# tdSql.query("select to_iso8601(-1) from ntb")
+ # tdSql.checkRows(3)
diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py
new file mode 100644
index 0000000000000000000000000000000000000000..150c4d3f17e30ab5f4d25fb19af2bb80ee202776
--- /dev/null
+++ b/tests/system-test/2-query/apercentile.py
@@ -0,0 +1,107 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def check_apercentile(self,data,expect_data,param,percent,column):
+ if param == "default":
+ if abs((expect_data-data) <= expect_data * 0.2):
+ tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}")
+ else:
+ tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}")
+ sys.exit(1)
+ elif param == "t-digest":
+ if abs((expect_data-data) <= expect_data * 0.2):
+ tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}")
+ else:
+ tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}")
+ sys.exit(1)
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+ percent_list = [0,50,100]
+ param_list = ['default','t-digest']
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ intData.append(i + 1)
+ floatData.append(i + 0.1)
+
+ # percentile verifacation
+
+ tdSql.error("select apercentile(ts ,20) from test")
+ tdSql.error("select apercentile(col7 ,20) from test")
+ tdSql.error("select apercentile(col8 ,20) from test")
+ tdSql.error("select apercentile(col9 ,20) from test")
+
+ column_list = [1,2,3,4,5,6,11,12,13,14]
+
+ for i in column_list:
+ for j in percent_list:
+ for k in param_list:
+ tdSql.query(f"select apercentile(col{i},{j},'{k}') from test")
+ data = tdSql.getData(0, 0)
+ tdSql.query(f"select percentile(col{i},{j}) from test")
+ expect_data = tdSql.getData(0, 0)
+ self.check_apercentile(data,expect_data,k,j,i)
+
+ error_param_list = [-1,101,'"a"']
+ for i in error_param_list:
+ tdSql.error(f'select apercentile(col1,{i}) from test')
+
+ tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))")
+ tdSql.execute("create table t0 using meters tags('beijing')")
+ tdSql.execute("create table t1 using meters tags('shanghai')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1))
+ tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1))
+
+ column_list = ['voltage']
+ for i in column_list:
+ for j in percent_list:
+ for k in param_list:
+ tdSql.query(f"select apercentile({i}, {j},'{k}') from t0")
+ data = tdSql.getData(0, 0)
+ tdSql.query(f"select percentile({i},{j}) from t0")
+ expect_data = tdSql.getData(0,0)
+ self.check_apercentile(data,expect_data,k,j,i)
+ tdSql.query(f"select apercentile({i}, {j},'{k}') from meters")
+ tdSql.checkRows(1)
+ table_list = ["meters","t0"]
+ for i in error_param_list:
+ for j in table_list:
+ for k in column_list:
+ tdSql.error(f'select apercentile({k},{i}) from {j}')
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py
new file mode 100644
index 0000000000000000000000000000000000000000..20ee6df7fcf94e3b02641b735c6ad7fd1ce862ff
--- /dev/null
+++ b/tests/system-test/2-query/avg.py
@@ -0,0 +1,424 @@
+import taos
+import sys
+import datetime
+import inspect
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+
+class TDTestCase:
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ def prepare_datas(self):
+ tdSql.execute(
+ '''create table stb1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ tags (t1 int)
+ '''
+ )
+
+ tdSql.execute(
+ '''
+ create table t1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ '''
+ )
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ for i in range(9):
+ tdSql.execute(
+ f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+
+ tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+
+ tdSql.execute(
+ f'''insert into t1 values
+ ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ '''
+ )
+
+ def check_avg(self ,origin_query , check_query):
+ avg_result = tdSql.getResult(origin_query)
+ origin_result = tdSql.getResult(check_query)
+
+ check_status = True
+ for row_index , row in enumerate(avg_result):
+ for col_index , elem in enumerate(row):
+ if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
+ check_status = False
+ if not check_status:
+ tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
+ sys.exit(1)
+ else:
+ tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
+
+ def test_errors(self):
+ error_sql_lists = [
+ "select avg from t1",
+ # "select avg(-+--+c1) from t1",
+ # "select +-avg(c1) from t1",
+ # "select ++-avg(c1) from t1",
+ # "select ++--avg(c1) from t1",
+ # "select - -avg(c1)*0 from t1",
+ # "select avg(tbname+1) from t1 ",
+ "select avg(123--123)==1 from t1",
+ "select avg(c1) as 'd1' from t1",
+ "select avg(c1 ,c2 ) from t1",
+ "select avg(c1 ,NULL) from t1",
+ "select avg(,) from t1;",
+ "select avg(avg(c1) ab from t1)",
+ "select avg(c1) as int from t1",
+ "select avg from stb1",
+ # "select avg(-+--+c1) from stb1",
+ # "select +-avg(c1) from stb1",
+ # "select ++-avg(c1) from stb1",
+ # "select ++--avg(c1) from stb1",
+ # "select - -avg(c1)*0 from stb1",
+ # "select avg(tbname+1) from stb1 ",
+ "select avg(123--123)==1 from stb1",
+ "select avg(c1) as 'd1' from stb1",
+ "select avg(c1 ,c2 ) from stb1",
+ "select avg(c1 ,NULL) from stb1",
+ "select avg(,) from stb1;",
+ "select avg(avg(c1) ab from stb1)",
+ "select avg(c1) as int from stb1"
+ ]
+ for error_sql in error_sql_lists:
+ tdSql.error(error_sql)
+
+ def support_types(self):
+ type_error_sql_lists = [
+ "select avg(ts) from t1" ,
+ "select avg(c7) from t1",
+ "select avg(c8) from t1",
+ "select avg(c9) from t1",
+ "select avg(ts) from ct1" ,
+ "select avg(c7) from ct1",
+ "select avg(c8) from ct1",
+ "select avg(c9) from ct1",
+ "select avg(ts) from ct3" ,
+ "select avg(c7) from ct3",
+ "select avg(c8) from ct3",
+ "select avg(c9) from ct3",
+ "select avg(ts) from ct4" ,
+ "select avg(c7) from ct4",
+ "select avg(c8) from ct4",
+ "select avg(c9) from ct4",
+ "select avg(ts) from stb1" ,
+ "select avg(c7) from stb1",
+ "select avg(c8) from stb1",
+ "select avg(c9) from stb1" ,
+
+ "select avg(ts) from stbbb1" ,
+ "select avg(c7) from stbbb1",
+
+ "select avg(ts) from tbname",
+ "select avg(c9) from tbname"
+
+ ]
+
+ for type_sql in type_error_sql_lists:
+ tdSql.error(type_sql)
+
+
+ type_sql_lists = [
+ "select avg(c1) from t1",
+ "select avg(c2) from t1",
+ "select avg(c3) from t1",
+ "select avg(c4) from t1",
+ "select avg(c5) from t1",
+ "select avg(c6) from t1",
+
+ "select avg(c1) from ct1",
+ "select avg(c2) from ct1",
+ "select avg(c3) from ct1",
+ "select avg(c4) from ct1",
+ "select avg(c5) from ct1",
+ "select avg(c6) from ct1",
+
+ "select avg(c1) from ct3",
+ "select avg(c2) from ct3",
+ "select avg(c3) from ct3",
+ "select avg(c4) from ct3",
+ "select avg(c5) from ct3",
+ "select avg(c6) from ct3",
+
+ "select avg(c1) from stb1",
+ "select avg(c2) from stb1",
+ "select avg(c3) from stb1",
+ "select avg(c4) from stb1",
+ "select avg(c5) from stb1",
+ "select avg(c6) from stb1",
+
+ "select avg(c6) as alisb from stb1",
+ "select avg(c6) alisb from stb1",
+ ]
+
+ for type_sql in type_sql_lists:
+ tdSql.query(type_sql)
+
+ def basic_avg_function(self):
+
+ # basic query
+ tdSql.query("select c1 from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select c1 from t1")
+ tdSql.checkRows(12)
+ tdSql.query("select c1 from stb1")
+ tdSql.checkRows(25)
+
+ # used for empty table , ct3 is empty
+ tdSql.query("select avg(c1) from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select avg(c2) from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select avg(c3) from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select avg(c4) from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select avg(c5) from ct3")
+ tdSql.checkRows(0)
+ tdSql.query("select avg(c6) from ct3")
+
+ # used for regular table
+ tdSql.query("select avg(c1) from t1")
+ tdSql.checkData(0, 0, 5.000000000)
+
+
+ tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.checkData(1, 5, 1.11000)
+ tdSql.checkData(3, 4, 33)
+ tdSql.checkData(5, 5, None)
+ self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ")
+
+ # used for sub table
+ tdSql.query("select avg(c1) from ct1")
+ tdSql.checkData(0, 0, 4.846153846)
+
+ tdSql.query("select avg(c1) from ct3")
+ tdSql.checkRows(0)
+
+ self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ")
+ self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ")
+
+ # used for stable table
+
+ tdSql.query("select avg(c1) from stb1")
+ tdSql.checkRows(1)
+
+ self.check_avg(" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from stb1 " , " select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from stb1 ")
+
+ # used for not exists table
+ tdSql.error("select avg(c1) from stbbb1")
+ tdSql.error("select avg(c1) from tbname")
+ tdSql.error("select avg(c1) from ct5")
+
+ # mix with common col
+ tdSql.error("select c1, avg(c1) from ct1")
+ tdSql.error("select c1, avg(c1) from ct4")
+
+
+ # mix with common functions
+ tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ")
+ tdSql.error("select c1, avg(c1),c5, floor(c5) from stb1 ")
+
+ # mix with agg functions , not support
+ tdSql.error("select c1, avg(c1),c5, count(c5) from stb1 ")
+ tdSql.error("select c1, avg(c1),c5, count(c5) from ct1 ")
+ tdSql.error("select c1, count(c5) from ct1 ")
+ tdSql.error("select c1, count(c5) from stb1 ")
+
+ # agg functions mix with agg functions
+
+ tdSql.query(" select max(c5), count(c5) , avg(c5) from stb1 ")
+ tdSql.checkData(0, 0, 8.88000 )
+ tdSql.checkData(0, 1, 22 )
+ tdSql.checkData(0, 2, 2.270454591 )
+
+ tdSql.query(" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from ct1; ")
+ tdSql.checkData(0, 0, 8.88000 )
+ tdSql.checkData(0, 1, 13 )
+ tdSql.checkData(0, 2, 0.768461603 )
+
+ # bug fix for count
+ tdSql.query("select count(c1) from ct4 ")
+ tdSql.checkData(0,0,9)
+ tdSql.query("select count(*) from ct4 ")
+ tdSql.checkData(0,0,12)
+ tdSql.query("select count(c1) from stb1 ")
+ tdSql.checkData(0,0,22)
+ tdSql.query("select count(*) from stb1 ")
+ tdSql.checkData(0,0,25)
+
+ # bug fix for compute
+ tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ")
+ tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4")
+
+ # mix with nest query
+ self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1")
+ self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1")
+
+ tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ")
+ tdSql.checkData(0, 0, 4.500000000)
+ tdSql.query(" select abs(avg(abs(abs(c1)))) from t1 ")
+ tdSql.checkData(0, 0, 5.000000000)
+
+ tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ")
+ tdSql.checkData(0, 0, 4.500000000)
+
+ tdSql.query(" select avg(c1) from stb1 where c1 is null ")
+ tdSql.checkRows(0)
+
+
+ def avg_func_filter(self):
+ tdSql.execute("use db")
+ tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,7.000000000)
+ tdSql.checkData(0,1,7.000000000)
+ tdSql.checkData(0,2,7.000000000)
+ tdSql.checkData(0,3,6.900000000)
+ tdSql.checkData(0,4,3.000000000)
+
+ tdSql.query("select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1=5 ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,5.000000000)
+ tdSql.checkData(0,1,5.000000000)
+ tdSql.checkData(0,2,5.000000000)
+ tdSql.checkData(0,3,4.900000000)
+ tdSql.checkData(0,4,2.000000000)
+
+ tdSql.query("select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 4.500000000)
+ tdSql.checkData(0, 1, 49999.500000000)
+ tdSql.checkData(0, 5, 1.625000000)
+
+ def avg_Arithmetic(self):
+ pass
+
+ def check_boundary_values(self):
+
+ tdSql.execute("drop database if exists bound_test")
+ tdSql.execute("create database if not exists bound_test")
+ time.sleep(3)
+ tdSql.execute("use bound_test")
+ tdSql.execute(
+ "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ )
+ tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(
+ f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+ tdSql.execute(
+ f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+ tdSql.execute(
+ f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+
+ tdSql.execute(
+ f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+
+ tdSql.execute(
+ f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+
+ tdSql.execute(
+ f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+ tdSql.execute(
+ f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+
+
+ tdSql.error(
+ f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ )
+ self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ")
+
+
+ # check basic elem for table per row
+ tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,920350133.571428537)
+ tdSql.checkData(0,1,1.3176245766935393e+18)
+ tdSql.checkData(0,2,14042.142857143)
+ tdSql.checkData(0,3,53.571428571)
+ tdSql.checkData(0,4,5.828571332045761e+37)
+ # tdSql.checkData(0,5,None)
+
+
+ # check + - * / in functions
+ tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ")
+ tdSql.checkData(0,0,920350134.5714285)
+ tdSql.checkData(0,1,1.3176245766935393e+18)
+ tdSql.checkData(0,2,14042.142857143)
+ tdSql.checkData(0,3,26.785714286)
+ tdSql.checkData(0,4,2.9142856660228804e+37)
+ # tdSql.checkData(0,5,None)
+
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table ==============")
+
+ self.prepare_datas()
+
+ tdLog.printNoPrefix("==========step2:test errors ==============")
+
+ self.test_errors()
+
+ tdLog.printNoPrefix("==========step3:support types ============")
+
+ self.support_types()
+
+ tdLog.printNoPrefix("==========step4: avg basic query ============")
+
+ self.basic_avg_function()
+
+ tdLog.printNoPrefix("==========step5: avg boundary query ============")
+
+ self.check_boundary_values()
+
+ tdLog.printNoPrefix("==========step6: avg filter query ============")
+
+ self.avg_func_filter()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/between.py b/tests/system-test/2-query/between.py
index 3b9465dd263cc6774fdf580630bb578629e4ce8b..44750abd4648260ceb68ba03239cb128e4eaf53b 100644
--- a/tests/system-test/2-query/between.py
+++ b/tests/system-test/2-query/between.py
@@ -175,16 +175,17 @@ class TDTestCase:
tdLog.printNoPrefix("==========step10:invalid query type")
- tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
- tdSql.checkRows(23)
- # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
- tdSql.query("select * from supt where isused between 0 and 1")
- tdSql.checkRows(23)
- tdSql.query("select * from supt where isused between -1 and 0")
- tdSql.checkRows(0)
- tdSql.error("select * from supt where isused between false and true")
- tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
- tdSql.checkRows(23)
+ # TODO tag is not finished
+ # tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
+ # tdSql.checkRows(23)
+ # # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
+ # tdSql.query("select * from supt where isused between 0 and 1")
+ # tdSql.checkRows(23)
+ # tdSql.query("select * from supt where isused between -1 and 0")
+ # tdSql.checkRows(0)
+ # tdSql.error("select * from supt where isused between false and true")
+ # tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
+ # tdSql.checkRows(23)
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")
diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4390372dfa13ae4d6db6e545fc472b0395aed53
--- /dev/null
+++ b/tests/system-test/2-query/bottom.py
@@ -0,0 +1,106 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ # bottom verifacation
+ tdSql.error("select bottom(ts, 10) from test")
+ tdSql.error("select bottom(col1, 0) from test")
+ tdSql.error("select bottom(col1, 101) from test")
+ tdSql.error("select bottom(col2, 0) from test")
+ tdSql.error("select bottom(col2, 101) from test")
+ tdSql.error("select bottom(col3, 0) from test")
+ tdSql.error("select bottom(col3, 101) from test")
+ tdSql.error("select bottom(col4, 0) from test")
+ tdSql.error("select bottom(col4, 101) from test")
+ tdSql.error("select bottom(col5, 0) from test")
+ tdSql.error("select bottom(col5, 101) from test")
+ tdSql.error("select bottom(col6, 0) from test")
+ tdSql.error("select bottom(col6, 101) from test")
+ tdSql.error("select bottom(col7, 10) from test")
+ tdSql.error("select bottom(col8, 10) from test")
+ tdSql.error("select bottom(col9, 10) from test")
+
+ tdSql.query("select bottom(col1, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+ tdSql.query("select bottom(col2, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col3, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col4, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col11, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col12, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col13, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+
+ tdSql.query("select bottom(col13,50) from test")
+ tdSql.checkRows(10)
+
+ tdSql.query("select bottom(col14, 2) from test")
+ tdSql.checkRows(2)
+ tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
+ tdSql.query("select ts,bottom(col1, 2) from test1")
+ tdSql.checkRows(2)
+ tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname")
+ tdSql.checkRows(2)
+
+ tdSql.query('select bottom(col2,1) from test interval(1y) order by col2')
+ tdSql.checkData(0,0,1)
+
+
+ tdSql.error('select * from test where bottom(col2,1)=1')
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..33bf351207ebeacbfea514c2733700656e757d55
--- /dev/null
+++ b/tests/system-test/2-query/check_tsdb.py
@@ -0,0 +1,106 @@
+import taos
+import sys
+import datetime
+import inspect
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+class TDTestCase:
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ def prepare_datas(self):
+ tdSql.execute(
+ '''create table stb1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ tags (t1 int)
+ '''
+ )
+
+ tdSql.execute(
+ '''
+ create table t1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ '''
+ )
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ for i in range(9):
+ tdSql.execute(
+ f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+
+ tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+
+ tdSql.execute(
+ f'''insert into t1 values
+ ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ '''
+ )
+
+
+ def restart_taosd_query_sum(self):
+
+ for i in range(5):
+ tdLog.info(" this is %d_th restart taosd " %i)
+ os.system("taos -s ' use db ;select c6 from stb1 ; '")
+ tdSql.execute("use db ")
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkRows(1)
+ tdSql.query("select sum(c1),sum(c2),sum(c3),sum(c4),sum(c5),sum(c6) from stb1;")
+ tdSql.checkData(0,0,99)
+ tdSql.checkData(0,1,499995)
+ tdSql.checkData(0,2,4995)
+ tdSql.checkData(0,3,594)
+ tdSql.checkData(0,4,49.950001001)
+ tdSql.checkData(0,5,599.940000000)
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ time.sleep(2)
+
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table ==============")
+
+ self.prepare_datas()
+
+ os.system("taos -s ' select c6 from stb1 ; '")
+ self.restart_taosd_query_sum()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py
index 1167b444d2eb6f753a5d662586afb0dfe30dff0b..59fae9b59d62599e3bca23c393ecc854aed9c186 100644
--- a/tests/system-test/2-query/concat.py
+++ b/tests/system-test/2-query/concat.py
@@ -36,19 +36,19 @@ class TDTestCase:
concat_condition.extend(
(
char_col,
- f"upper( {char_col} )",
+ # f"upper( {char_col} )",
)
)
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
- concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
- concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
- concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
@@ -96,7 +96,6 @@ class TDTestCase:
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
-
def __concat_err_check(self,tbname):
sqls = []
@@ -139,7 +138,11 @@ class TDTestCase:
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [
+ "ct1",
+ "ct2",
+ "ct4",
+ ]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
@@ -147,7 +150,10 @@ class TDTestCase:
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [
+ "t1",
+ "stb1",
+ ]
for tb in tbname:
for errsql in self.__concat_err_check(tb):
diff --git a/tests/system-test/2-query/concat2.py b/tests/system-test/2-query/concat2.py
new file mode 100644
index 0000000000000000000000000000000000000000..717766e7ffcaafcc164cc1519d0a3a657d5e387c
--- /dev/null
+++ b/tests/system-test/2-query/concat2.py
@@ -0,0 +1,293 @@
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+
+PRIMARY_COL = "ts"
+
+INT_COL = "c1"
+BINT_COL = "c2"
+SINT_COL = "c3"
+TINT_COL = "c4"
+FLOAT_COL = "c5"
+DOUBLE_COL = "c6"
+BOOL_COL = "c7"
+
+BINARY_COL = "c8"
+NCHAR_COL = "c9"
+TS_COL = "c10"
+
+NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
+CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
+BOOLEAN_COL = [ BOOL_COL, ]
+TS_TYPE_COL = [ TS_COL, ]
+
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+
+ def __concat_condition(self): # sourcery skip: extract-method
+ concat_condition = []
+ for char_col in CHAR_COL:
+ concat_condition.extend(
+ (
+ char_col,
+ # f"upper( {char_col} )",
+ )
+ )
+ concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
+ concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
+ # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
+ # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
+ concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
+
+ for num_col in NUM_COL:
+ # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
+
+ concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
+
+ concat_condition.append('''"test1234!@#$%^&*():'>/.,][}{"''')
+
+ return concat_condition
+
+ def __where_condition(self, col):
+ # return f" where count({col}) > 0 "
+ return ""
+
+ def __concat_num(self, concat_lists, num):
+ return [ concat_lists[i] for i in range(num) ]
+
+
+ def __group_condition(self, col, having = ""):
+ return f" group by {col} having {having}" if having else f" group by {col} "
+
+ def __concat_check(self, tbname, num):
+ concat_condition = self.__concat_condition()
+ for i in range(len(concat_condition) - num + 1 ):
+ condition = self.__concat_num(concat_condition[i:], num)
+ concat_filter = f"concat( {','.join( condition ) }) "
+ where_condition = self.__where_condition(condition[0])
+ # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
+ concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " )
+ # group_no_having= self.__group_condition(condition[0] )
+ concat_group_no_having= self.__group_condition(concat_filter)
+ groups = ["", concat_group_having, concat_group_no_having]
+
+ if num > 8 or num < 2 :
+ [tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
+ break
+
+ tdSql.query(f"select {','.join(condition)} from {tbname} ")
+ rows = tdSql.queryRows
+ concat_data = []
+ for m in range(rows):
+ concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None)
+ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ")
+ tdSql.checkRows(rows)
+ for j in range(tdSql.queryRows):
+ assert tdSql.getData(j, 0) in concat_data
+
+ [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
+
+
+ def __concat_err_check(self,tbname):
+ sqls = []
+
+ for char_col in CHAR_COL:
+ sqls.extend(
+ (
+ f"select concat( {char_col} ) from {tbname} ",
+ f"select concat(ceil( {char_col} )) from {tbname} ",
+ f"select {char_col} from {tbname} group by concat( {char_col} ) ",
+ )
+ )
+
+ sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
+ sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
+
+ sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
+ sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
+ sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
+ sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
+ sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
+
+ sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
+ sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
+ sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL )
+ sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
+ sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
+ sqls.extend(
+ (
+ f"select concat() from {tbname} ",
+ f"select concat(*) from {tbname} ",
+ f"select concat(ccccccc) from {tbname} ",
+ f"select concat(111) from {tbname} ",
+ )
+ )
+
+ return sqls
+
+ def __test_current(self): # sourcery skip: use-itertools-product
+ tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
+ tbname = [
+ "t1",
+ "stb1",
+ ]
+ for tb in tbname:
+ for i in range(2,8):
+ self.__concat_check(tb,i)
+ tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
+
+ def __test_error(self):
+ tdLog.printNoPrefix("==========err sql condition check , must return error==========")
+ tbname = [
+ "ct1",
+ "ct4",
+ ]
+
+ for tb in tbname:
+ for errsql in self.__concat_err_check(tb):
+ tdSql.error(sql=errsql)
+ self.__concat_check(tb,1)
+ self.__concat_check(tb,9)
+ tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
+
+
+ def all_test(self):
+ self.__test_current()
+ self.__test_error()
+
+
+ def __create_tb(self):
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table")
+ create_stb_sql = f'''create table stb1(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
+ ) tags (t1 int)
+ '''
+ create_ntb_sql = f'''create table t1(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
+ )
+ '''
+ tdSql.execute(create_stb_sql)
+ tdSql.execute(create_ntb_sql)
+
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ def __insert_data(self, rows):
+ now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
+ for i in range(rows):
+ tdSql.execute(
+ f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f'''insert into ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
+ '''
+ )
+
+ tdSql.execute(
+ f'''insert into ct4 values
+ ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ (
+ { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
+ )
+ (
+ { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
+ )
+ '''
+ )
+
+ tdSql.execute(
+ f'''insert into ct2 values
+ ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ (
+ { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
+ )
+ (
+ { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
+ )
+ '''
+ )
+
+ for i in range(rows):
+ insert_data = f'''insert into t1 values
+ ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
+ '''
+ tdSql.execute(insert_data)
+ tdSql.execute(
+ f'''insert into t1 values
+ ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
+ )
+ (
+ { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
+ )
+ '''
+ )
+
+ def run(self):
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table")
+ self.__create_tb()
+
+ tdLog.printNoPrefix("==========step2:insert data")
+ self.rows = 10
+ self.__insert_data(self.rows)
+
+ tdLog.printNoPrefix("==========step3:all check")
+ self.all_test()
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.execute("use db")
+
+ tdLog.printNoPrefix("==========step4:after wal, all check again ")
+ self.all_test()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py
index 876a1c88055b0ab3ca3b1046d180365fc089ae0d..2c179b97ce0757670f31498c4dfa3926018854d9 100644
--- a/tests/system-test/2-query/concat_ws.py
+++ b/tests/system-test/2-query/concat_ws.py
@@ -36,22 +36,22 @@ class TDTestCase:
concat_ws_condition.extend(
(
char_col,
- f"upper( {char_col} )",
+ # f"upper( {char_col} )",
)
)
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
- concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
- concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
- concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
- concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
+ # concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_ws_condition.append('''"test1234!@#$%^&*():'>/.,][}{"''')
@@ -139,7 +139,10 @@ class TDTestCase:
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [
+ "t1",
+ "stb1"
+ ]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
@@ -147,7 +150,11 @@ class TDTestCase:
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [
+ "ct1",
+ "ct2",
+ "ct4",
+ ]
for tb in tbname:
for errsql in self.__concat_ws_err_check(tb):
diff --git a/tests/system-test/2-query/concat_ws2.py b/tests/system-test/2-query/concat_ws2.py
new file mode 100644
index 0000000000000000000000000000000000000000..477e5d1b557de513473adb31fc8cec9536b683f6
--- /dev/null
+++ b/tests/system-test/2-query/concat_ws2.py
@@ -0,0 +1,294 @@
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+
+PRIMARY_COL = "ts"
+
+INT_COL = "c1"
+BINT_COL = "c2"
+SINT_COL = "c3"
+TINT_COL = "c4"
+FLOAT_COL = "c5"
+DOUBLE_COL = "c6"
+BOOL_COL = "c7"
+
+BINARY_COL = "c8"
+NCHAR_COL = "c9"
+TS_COL = "c10"
+
+NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
+CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
+BOOLEAN_COL = [ BOOL_COL, ]
+TS_TYPE_COL = [ TS_COL, ]
+
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+
+ def __concat_ws_condition(self): # sourcery skip: extract-method
+ concat_ws_condition = []
+ for char_col in CHAR_COL:
+ concat_ws_condition.extend(
+ (
+ char_col,
+ # f"upper( {char_col} )",
+ )
+ )
+ concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
+ concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
+ # concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ # concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
+ # concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
+ concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
+
+ for num_col in NUM_COL:
+ # concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
+ concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
+
+ # concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
+
+ concat_ws_condition.append('''"test1234!@#$%^&*():'>/.,][}{"''')
+
+ return concat_ws_condition
+
+ def __where_condition(self, col):
+ # return f" where count({col}) > 0 "
+ return ""
+
+ def __concat_ws_num(self, concat_ws_lists, num):
+ return [ concat_ws_lists[i] for i in range(num) ]
+
+
+ def __group_condition(self, col, having = ""):
+ return f" group by {col} having {having}" if having else f" group by {col} "
+
+ def __concat_ws_check(self, tbname, num):
+ concat_ws_condition = self.__concat_ws_condition()
+ for i in range(len(concat_ws_condition) - num + 1 ):
+ condition = self.__concat_ws_num(concat_ws_condition[i:], num)
+ concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) "
+ where_condition = self.__where_condition(condition[0])
+ # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
+ concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " )
+ # group_no_having= self.__group_condition(condition[0] )
+ concat_ws_group_no_having= self.__group_condition(concat_ws_filter)
+ groups = ["", concat_ws_group_having, concat_ws_group_no_having]
+
+ if num > 8 or num < 2 :
+ [tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
+ break
+
+ tdSql.query(f"select {','.join(condition)} from {tbname} ")
+ rows = tdSql.queryRows
+ concat_ws_data = []
+ for m in range(rows):
+ concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None)
+ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ")
+ tdSql.checkRows(rows)
+ for j in range(tdSql.queryRows):
+ assert tdSql.getData(j, 0) in concat_ws_data
+
+ [ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
+
+
+ def __concat_ws_err_check(self,tbname):
+ sqls = []
+
+ for char_col in CHAR_COL:
+ sqls.extend(
+ (
+ f"select concat_ws('_', {char_col} ) from {tbname} ",
+ f"select concat_ws('_', ceil( {char_col} )) from {tbname} ",
+ f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ",
+ )
+ )
+
+ sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
+ sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
+
+ sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
+ sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
+ sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
+ sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
+ sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
+
+ sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
+ sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL )
+ sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL )
+ sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
+ sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
+ sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
+ sqls.extend(
+ (
+ f"select concat_ws('_', ) from {tbname} ",
+ f"select concat_ws('_', *) from {tbname} ",
+ f"select concat_ws('_', ccccccc) from {tbname} ",
+ f"select concat_ws('_', 111) from {tbname} ",
+ )
+ )
+
+ return sqls
+
+ def __test_current(self): # sourcery skip: use-itertools-product
+ tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
+ tbname = [
+ "ct1",
+ "ct2",
+ "ct4",
+ ]
+ for tb in tbname:
+ for i in range(2,8):
+ self.__concat_ws_check(tb,i)
+ tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
+
+ def __test_error(self):
+ tdLog.printNoPrefix("==========err sql condition check , must return error==========")
+ tbname = [
+ "t1",
+ "stb1"
+ ]
+
+ for tb in tbname:
+ for errsql in self.__concat_ws_err_check(tb):
+ tdSql.error(sql=errsql)
+ self.__concat_ws_check(tb,1)
+ self.__concat_ws_check(tb,9)
+ tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
+
+
+ def all_test(self):
+ self.__test_current()
+ self.__test_error()
+
+
+ def __create_tb(self):
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table")
+ create_stb_sql = f'''create table stb1(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
+ ) tags (t1 int)
+ '''
+ create_ntb_sql = f'''create table t1(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
+ )
+ '''
+ tdSql.execute(create_stb_sql)
+ tdSql.execute(create_ntb_sql)
+
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ def __insert_data(self, rows):
+ now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
+ for i in range(rows):
+ tdSql.execute(
+ f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ )
+ tdSql.execute(
+ f'''insert into ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
+ '''
+ )
+
+ tdSql.execute(
+ f'''insert into ct4 values
+ ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ (
+ { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
+ )
+ (
+ { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
+ )
+ '''
+ )
+
+ tdSql.execute(
+ f'''insert into ct2 values
+ ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ (
+ { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
+ )
+ (
+ { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
+ )
+ '''
+ )
+
+ for i in range(rows):
+ insert_data = f'''insert into t1 values
+ ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
+ '''
+ tdSql.execute(insert_data)
+ tdSql.execute(
+ f'''insert into t1 values
+ ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
+ )
+ (
+ { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
+ )
+ '''
+ )
+
+ def run(self):
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table")
+ self.__create_tb()
+
+ tdLog.printNoPrefix("==========step2:insert data")
+ self.rows = 10
+ self.__insert_data(self.rows)
+
+ tdLog.printNoPrefix("==========step3:all check")
+ self.all_test()
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.execute("use db")
+
+ tdLog.printNoPrefix("==========step4:after wal, all check again ")
+ self.all_test()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py
new file mode 100644
index 0000000000000000000000000000000000000000..a331311fd2e841da5fd4f6da86ccb27834fcbc69
--- /dev/null
+++ b/tests/system-test/2-query/csum.py
@@ -0,0 +1,428 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+import re
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""):
+
+ '''
+ csum function:
+ :param col: string, column name, required parameters;
+ :param alias: string, result column another name,or add other funtion;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2'
+ :return: csum query statement,default: select csum(c1) from t1
+ '''
+
+ return f"select csum({col}) {alias} from {table_expr} {condition}"
+
+ def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ):
+ line = sys._getframe().f_back.f_lineno
+ pre_sql = self.csum_query_form(
+ col=col, table_expr=table_expr, condition=condition
+ ).replace("csum", "count")
+ tdSql.query(pre_sql)
+
+ if tdSql.queryRows == 0:
+ tdSql.query(self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ print(f"case in {line}: ", end='')
+ tdSql.checkRows(0)
+ return
+
+ if "order by tbname" in condition:
+ tdSql.error(self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ return
+
+ if "group" in condition:
+
+ tb_condition = condition.split("group by")[1].split(" ")[1]
+ tdSql.query(f"select distinct {tb_condition} from {table_expr}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition)
+
+ pre_row = 0
+ for i in range(query_rows):
+ group_name = query_result[i][0]
+ if "where" in clear_condition:
+ pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition)
+ else:
+ pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition)
+
+ tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}")
+ pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ print("data is ", pre_data)
+ pre_csum = np.cumsum(pre_data)
+ tdSql.query(self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ for j in range(len(pre_csum)):
+ print(f"case in {line}:", end='')
+ tdSql.checkData(pre_row+j, 1, pre_csum[j])
+ pre_row += len(pre_csum)
+ return
+ elif "union" in condition:
+ union_sql_0 = self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ).split("union all")[0]
+
+ union_sql_1 = self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ).split("union all")[1]
+
+ tdSql.query(union_sql_0)
+ union_csum_0 = tdSql.queryResult
+ row_union_0 = tdSql.queryRows
+
+ tdSql.query(union_sql_1)
+ union_csum_1 = tdSql.queryResult
+
+ tdSql.query(self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ for i in range(tdSql.queryRows):
+ print(f"case in {line}: ", end='')
+ if i < row_union_0:
+ tdSql.checkData(i, 0, union_csum_0[i][0])
+ else:
+ tdSql.checkData(i, 0, union_csum_1[i-row_union_0][0])
+ return
+
+ else:
+ tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
+ pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ pre_csum = np.cumsum(pre_result)[offset_val:]
+ tdSql.query(self.csum_query_form(
+ col=col, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ for i in range(tdSql.queryRows):
+ print(f"case in {line}: ", end='')
+ if pre_csum[i] >1.7e+308 or pre_csum[i] < -1.7e+308:
+ continue
+ else:
+ tdSql.checkData(i, 0, pre_csum[i])
+
+ pass
+
+ def csum_current_query(self) :
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1~6: numeric col:int/bigint/tinyint/smallint/float/double
+ self.checkcsum()
+ case2 = {"col": "c2"}
+ self.checkcsum(**case2)
+ case3 = {"col": "c5"}
+ self.checkcsum(**case3)
+ case4 = {"col": "c7"}
+ self.checkcsum(**case4)
+ case5 = {"col": "c8"}
+ self.checkcsum(**case5)
+ case6 = {"col": "c9"}
+ self.checkcsum(**case6)
+
+ # case7~8: nested query
+ # case7 = {"table_expr": "(select c1 from stb1)"}
+ # self.checkcsum(**case7)
+ # case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"}
+ # self.checkcsum(**case8)
+
+ # case9~10: mix with tbname/ts/tag/col
+ # case9 = {"alias": ", tbname"}
+ # self.checkcsum(**case9)
+ # case10 = {"alias": ", _c0"}
+ # self.checkcsum(**case10)
+ # case11 = {"alias": ", st1"}
+ # self.checkcsum(**case11)
+ # case12 = {"alias": ", c1"}
+ # self.checkcsum(**case12)
+
+ # case13~15: with single condition
+ case13 = {"condition": "where c1 <= 10"}
+ self.checkcsum(**case13)
+ case14 = {"condition": "where c6 in (0, 1)"}
+ self.checkcsum(**case14)
+ case15 = {"condition": "where c1 between 1 and 10"}
+ self.checkcsum(**case15)
+
+ # case16: with multi-condition
+ case16 = {"condition": "where c6=1 or c6 =0"}
+ self.checkcsum(**case16)
+
+ # case17: only support normal table join
+ case17 = {
+ "col": "t1.c1",
+ "table_expr": "t1, t2",
+ "condition": "where t1.ts=t2.ts"
+ }
+ self.checkcsum(**case17)
+ # case18~19: with group by
+ # case18 = {
+ # "table_expr": "t1",
+ # "condition": "group by c6"
+ # }
+ # self.checkcsum(**case18)
+ # case19 = {
+ # "table_expr": "stb1",
+ # "condition": "partition by tbname" # partition by tbname
+ # }
+ # self.checkcsum(**case19)
+
+ # # case20~21: with order by
+ # case20 = {"condition": "order by ts"}
+ # self.checkcsum(**case20)
+
+ # # case22: with union
+ # case22 = {
+ # "condition": "union all select csum(c1) from t2"
+ # }
+ # self.checkcsum(**case22)
+
+ # case23: with limit/slimit
+ case23 = {
+ "condition": "limit 1"
+ }
+ self.checkcsum(**case23)
+ # case24 = {
+ # "table_expr": "stb1",
+ # "condition": "group by tbname slimit 1 soffset 1"
+ # }
+ # self.checkcsum(**case24)
+
+ pass
+
+ def csum_error_query(self) -> None :
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.csum_query_form(col="")) # no col
+ tdSql.error("csum(c1) from stb1") # no select
+ tdSql.error("select csum from t1") # no csum condition
+ tdSql.error("select csum c1 from t1") # no brackets
+ tdSql.error("select csum(c1) t1") # no from
+ tdSql.error("select csum( c1 ) from ") # no table_expr
+ # tdSql.error(self.csum_query_form(col="st1")) # tag col
+ tdSql.error(self.csum_query_form(col=1)) # col is a value
+ tdSql.error(self.csum_query_form(col="'c1'")) # col is a string
+ tdSql.error(self.csum_query_form(col=None)) # col is NULL 1
+ tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2
+ tdSql.error(self.csum_query_form(col='""')) # col is ""
+ tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1
+ tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2
+ tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3
+ tdSql.error(self.csum_query_form(col='c3')) # timestamp col
+ tdSql.error(self.csum_query_form(col='ts')) # Primary key
+ tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col
+ tdSql.error(self.csum_query_form(col='c6')) # bool col
+ tdSql.error(self.csum_query_form(col='c4')) # binary col
+ tdSql.error(self.csum_query_form(col='c10')) # nachr col
+ tdSql.error(self.csum_query_form(col='c10')) # not table_expr col
+ tdSql.error(self.csum_query_form(col='t1')) # tbname
+ tdSql.error(self.csum_query_form(col='stb1')) # stbname
+ tdSql.error(self.csum_query_form(col='db')) # datbasename
+ tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1
+ tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2
+ tdSql.error(self.csum_query_form(col='*')) # col is all col
+ tdSql.error("select csum[c1] from t1") # sql form error 1
+ tdSql.error("select csum{c1} from t1") # sql form error 2
+ tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3
+ # tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3
+ # tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3
+ tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1
+ tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2
+ tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1
+ tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2
+ tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1
+ tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2
+ # tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1
+ tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2
+ tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1
+ # tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly
+ stb_join = {
+ "col": "stb1.c1",
+ "table_expr": "stb1, stb2",
+ "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
+ }
+ tdSql.error(self.csum_query_form(**stb_join)) # stb join
+ interval_sql = {
+ "condition": "where ts>0 and ts < now interval(1h) fill(next)"
+ }
+ tdSql.error(self.csum_query_form(**interval_sql)) # interval
+ group_normal_col = {
+ "table_expr": "t1",
+ "condition": "group by c6"
+ }
+ tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col
+ slimit_soffset_sql = {
+ "table_expr": "stb1",
+ "condition": "group by tbname slimit 1 soffset 1"
+ }
+ # tdSql.error(self.csum_query_form(**slimit_soffset_sql))
+ order_by_tbname_sql = {
+ "table_expr": "stb1",
+ "condition": "group by tbname order by tbname"
+ }
+ tdSql.error(self.csum_query_form(**order_by_tbname_sql))
+
+ pass
+
+ def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def csum_test_table(self,tbnum: int) -> None :
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ pass
+
+ def csum_test_run(self) :
+ tdLog.printNoPrefix("==========TD-10594==========")
+ tbnum = 10
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 2
+ self.csum_test_table(tbnum)
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.csum_current_query()
+ self.csum_error_query()
+
+ tdLog.printNoPrefix("######## insert only NULL test:")
+ for i in range(tbnum):
+ tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ self.csum_current_query()
+ self.csum_error_query()
+
+ tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
+ self.csum_test_table(tbnum)
+ tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ self.csum_current_query()
+ self.csum_error_query()
+
+ tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
+ self.csum_test_table(tbnum)
+ tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
+ tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
+ self.csum_current_query()
+ self.csum_error_query()
+
+ tdLog.printNoPrefix("######## insert data without NULL data test:")
+ self.csum_test_table(tbnum)
+ self.csum_test_data(tbnum, per_table_rows, nowtime)
+ self.csum_current_query()
+ self.csum_error_query()
+
+
+ tdLog.printNoPrefix("######## insert data mix with NULL test:")
+ for i in range(tbnum):
+ tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ self.csum_current_query()
+ self.csum_error_query()
+
+
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+ self.csum_current_query()
+ self.csum_error_query()
+
+ def run(self):
+ import traceback
+ try:
+ # run in develop branch
+ self.csum_test_run()
+ pass
+ except Exception as e:
+ traceback.print_exc()
+ raise e
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py
index 03b3899dc659d79ca8ae0750710fe293b5f83a3b..0d8b0de3dca8d0db11eb98e9b04defff07df741c 100644
--- a/tests/system-test/2-query/diff.py
+++ b/tests/system-test/2-query/diff.py
@@ -15,59 +15,51 @@ class TDTestCase:
self.perfix = 'dev'
self.tables = 10
- def insertData(self):
- print("==============step1")
- tdSql.execute(
- "create table if not exists st (ts timestamp, col int) tags(dev nchar(50))")
-
- for i in range(self.tables):
- tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i))
- rows = 15 + i
- for j in range(rows):
- tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, self.ts + i * 20 * 10000 + j * 10000, j))
def run(self):
tdSql.prepare()
- tdSql.execute("create table ntb(ts timestamp,c1 int,c2 double,c3 float)")
- tdSql.execute("insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)")
+ tdSql.execute(
+ "create table ntb(ts timestamp,c1 int,c2 double,c3 float)")
+ tdSql.execute(
+ "insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)")
tdSql.query("select diff(c1,0) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,9)
- tdSql.checkData(1,0,-11)
+ tdSql.checkData(0, 0, 9)
+ tdSql.checkData(1, 0, -11)
tdSql.query("select diff(c1,1) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,9)
- tdSql.checkData(1,0,None)
-
+ tdSql.checkData(0, 0, 9)
+ tdSql.checkData(1, 0, None)
+
tdSql.query("select diff(c2,0) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,-101)
- tdSql.checkData(1,0,115.1)
+ tdSql.checkData(0, 0, -101)
+ tdSql.checkData(1, 0, 115.1)
tdSql.query("select diff(c2,1) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,None)
- tdSql.checkData(1,0,115.1)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, 115.1)
tdSql.query("select diff(c3,0) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,-5.4)
- tdSql.checkData(1,0,-0.1)
+ tdSql.checkData(0, 0, -5.4)
+ tdSql.checkData(1, 0, -0.1)
tdSql.query("select diff(c3,1) from ntb")
tdSql.checkRows(2)
- tdSql.checkData(0,0,None)
- tdSql.checkData(1,0,None)
-
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')")
- tdSql.execute("insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1))
-
- # diff verifacation
+ tdSql.execute(
+ "insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1))
+
+ # diff verifacation
tdSql.query("select diff(col1) from stb_1")
tdSql.checkRows(0)
-
+
tdSql.query("select diff(col2) from stb_1")
tdSql.checkRows(0)
@@ -87,38 +79,23 @@ class TDTestCase:
tdSql.checkRows(0)
for i in range(self.rowNum):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
-
- # tdSql.error("select diff(ts) from stb")
+ tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ tdSql.error("select diff(ts) from stb")
tdSql.error("select diff(ts) from stb_1")
- # tdSql.error("select diff(col7) from stb")
-
- # tdSql.error("select diff(col8) from stb")
+
+ # tdSql.error("select diff(col7) from stb")
+
+ tdSql.error("select diff(col8) from stb")
tdSql.error("select diff(col8) from stb_1")
- # tdSql.error("select diff(col9) from stb")
+ tdSql.error("select diff(col9) from stb")
tdSql.error("select diff(col9) from stb_1")
tdSql.error("select diff(col11) from stb_1")
tdSql.error("select diff(col12) from stb_1")
tdSql.error("select diff(col13) from stb_1")
tdSql.error("select diff(col14) from stb_1")
-
- tdSql.query("select ts,diff(col1),ts from stb_1")
- tdSql.checkRows(11)
- tdSql.checkData(0, 0, "2018-09-17 09:00:00.000")
- tdSql.checkData(1, 0, "2018-09-17 09:00:00.000")
- tdSql.checkData(1, 2, "2018-09-17 09:00:00.000")
- tdSql.checkData(9, 0, "2018-09-17 09:00:00.009")
- tdSql.checkData(9, 2, "2018-09-17 09:00:00.009")
-
- # tdSql.query("select ts,diff(col1),ts from stb group by tbname")
- # tdSql.checkRows(10)
- # tdSql.checkData(0, 0, "2018-09-17 09:00:00.000")
- # tdSql.checkData(0, 1, "2018-09-17 09:00:00.000")
- # tdSql.checkData(0, 3, "2018-09-17 09:00:00.000")
- # tdSql.checkData(9, 0, "2018-09-17 09:00:00.009")
- # tdSql.checkData(9, 1, "2018-09-17 09:00:00.009")
- # tdSql.checkData(9, 3, "2018-09-17 09:00:00.009")
+ tdSql.error("select ts,diff(col1),ts from stb_1")
tdSql.query("select diff(col1) from stb_1")
tdSql.checkRows(10)
@@ -137,10 +114,27 @@ class TDTestCase:
tdSql.query("select diff(col6) from stb_1")
tdSql.checkRows(10)
-
+
+ tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table stb1_1 using stb tags('shanghai')")
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ for i in range(self.rowNum):
+ tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts - i-1, i-1, i-1, i-1, i-1, -i - 0.1, -i - 0.1, -i % 2, i - 1, i - 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.query("select diff(col1,0) from stb1_1")
+ tdSql.checkRows(19)
+ tdSql.query("select diff(col1,1) from stb1_1")
+ tdSql.checkRows(19)
+ tdSql.checkData(0,0,None)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
+
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py
new file mode 100644
index 0000000000000000000000000000000000000000..017090128d40f66eb7f395c75c41cafff2934a47
--- /dev/null
+++ b/tests/system-test/2-query/elapsed.py
@@ -0,0 +1,1604 @@
+###################################################################
+# Copyright (c) 2020 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
+ self.num = 10
+
+ def caseDescription(self):
+
+ '''
+ case1