diff --git a/.gitignore b/.gitignore
index d7fcb019ae14a70ead3f84bbe97e01c3053acd5b..80fd850cd4483791afc19fc19732f5d0eadd8e2e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,6 +48,8 @@ pysim/
*.out
*DS_Store
tests/script/api/batchprepare
+taosadapter
+taosadapter-debug
# Doxygen Generated files
html/
diff --git a/.gitmodules b/.gitmodules
index 31c211bf89c4ad9fe370774b4a60f1f5a6423323..07e4bb2b9cef34e498f8e2af5848efe995969313 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -13,12 +13,3 @@
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
-[submodule "tools/taos-tools"]
- path = tools/taos-tools
- url = https://github.com/taosdata/taos-tools
-[submodule "tools/taosadapter"]
- path = tools/taosadapter
- url = https://github.com/taosdata/taosadapter.git
-[submodule "tools/taosws-rs"]
- path = tools/taosws-rs
- url = https://github.com/taosdata/taosws-rs
diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 1b04e40f2a312f663dae2b0e9c2177618607de1f..83fa1479dc8057cc98b2132ba17be560ca341c26 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -113,6 +113,12 @@ def pre_test(){
echo "unmatched reposiotry ${CHANGE_URL}"
'''
}
+ sh '''
+ cd ${WKC}
+ git rm --cached tools/taos-tools 2>/dev/null || :
+ git rm --cached tools/taosadapter 2>/dev/null || :
+ git rm --cached tools/taosws-rs 2>/dev/null || :
+ '''
sh '''
cd ${WKC}
git submodule update --init --recursive
@@ -258,6 +264,13 @@ def pre_test_win(){
git branch
git log -5
'''
+ bat '''
+ cd %WIN_COMMUNITY_ROOT%
+ git rm --cached tools/taos-tools 2>nul
+ git rm --cached tools/taosadapter 2>nul
+ git rm --cached tools/taosws-rs 2>nul
+ exit 0
+ '''
bat '''
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index 14db62218fc95a05bbb0e773303d00621529111a..2edabad3c950eb7ef1361221eef56bed1046909a 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -111,7 +111,7 @@ TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。
- taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../reference/taosbenchmark)。
+ taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
## 体验查询
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 6ac7567a057c2bc7c38b9776205a2aa2f50a11c3..6dbf74f8bcc3442018d259c9781999182fdc579d 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -245,7 +245,7 @@ select * from t;
Query OK, 2 row(s) in set (0.003128s)
```
-除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/)
+除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../../reference/taos-shell/)
## 使用 taosBenchmark 体验写入速度
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index edcad98fd85e731746db55c30230097e6b5dd995..839b1e2015dec45dd2df7d3b88202022aae50e98 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -8,17 +8,17 @@ description: "创建、删除数据库,查看、修改数据库参数"
```sql
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
-
+
database_options:
database_option ...
-
+
database_option: {
BUFFER value
| CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
| CACHESIZE value
| COMP {0 | 1 | 2}
| DURATION value
- | FSYNC value
+ | WAL_FSYNC_PERIOD value
| MAXROWS value
| MINROWS value
| KEEP value
@@ -28,7 +28,7 @@ database_option: {
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
- | WAL {1 | 2}
+ | WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
| WAL_RETENTION_PERIOD value
@@ -46,7 +46,7 @@ database_option: {
- last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
- last_value:表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
- both:表示同时打开缓存最近行和列功能。
-- CACHESIZE:表示缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
+- CACHESIZE:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
- COMP:表示数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。
- 0:表示不压缩。
- 1:表示一阶段压缩。
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 84fcda232db1bda567e810bb014a99c8dbc25328..75f149d0aeba924635bbccd6607ed9d820155a8d 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -218,7 +218,7 @@ GROUP BY 子句中的表达式可以包含表或视图中的任何列,这些
PARTITION BY 子句是 TDengine 特色语法,按 part_list 对数据进行切分,在每个切分的分片中进行计算。
-详见 [TDengine 特色查询](./distinguished)
+详见 [TDengine 特色查询](../distinguished)
## ORDER BY
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index 876aaa553e2e67e8fddc3eb63ac277e2ed3857cc..f4d45558329f381e4fef2c6953bb6145d3cd3649 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -16,15 +16,15 @@ toc_max_heading_level: 4
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的绝对值
+**功能说明**:获得指定字段的绝对值。
-**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。
+**返回结果类型**:与指定字段的原始数据类型一致。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -34,15 +34,15 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的反余弦结果
+**功能说明**:获得指定字段的反余弦结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -52,15 +52,15 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的反正弦结果
+**功能说明**:获得指定字段的反正弦结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -71,15 +71,15 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的反正切结果
+**功能说明**:获得指定字段的反正切结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -90,20 +90,17 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:获得指定列的向上取整数的结果。
+**功能说明**:获得指定字段的向上取整数的结果。
-**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。
+**返回结果类型**:与指定字段的原始数据类型一致。
**适用数据类型**:数值类型。
-**适用于**: 普通表、超级表。
+**适用于**: 表和超级表。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**使用说明**:
-
-- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。
-- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
+**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
#### COS
@@ -111,15 +108,15 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的余弦结果
+**功能说明**:获得指定字段的余弦结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -129,24 +126,24 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:获得指定列的向下取整数的结果。
+**功能说明**:获得指定字段的向下取整数的结果。
其他使用说明参见 CEIL 函数描述。
#### LOG
```sql
-SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
+SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列对于底数 base 的对数
+**功能说明**:获得指定字段对于底数 base 的对数。如果 base 参数省略,则返回指定字段的自然对数值。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -157,15 +154,15 @@ SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的指数为 power 的幂
+**功能说明**:获得指定字段的指数为 power 的幂。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -176,7 +173,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:获得指定列的四舍五入的结果。
+**功能说明**:获得指定字段的四舍五入的结果。
其他使用说明参见 CEIL 函数描述。
@@ -186,15 +183,15 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的正弦结果
+**功能说明**:获得指定字段的正弦结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -204,15 +201,15 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的平方根
+**功能说明**:获得指定字段的平方根。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -222,15 +219,15 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:获得指定列的正切结果
+**功能说明**:获得指定字段的正切结果。
-**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
+**返回结果类型**:DOUBLE。
**适用数据类型**:数值类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
@@ -246,13 +243,13 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:以字符计数的字符串长度。
-**返回结果类型**:INT。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:BIGINT。
-**适用数据类型**:VARCHAR, NCHAR
+**适用数据类型**:VARCHAR, NCHAR。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### CONCAT
@@ -262,13 +259,13 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
**功能说明**:字符串连接函数。
-**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### CONCAT_WS
@@ -279,13 +276,13 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
**功能说明**:带分隔符的字符串连接函数。
-**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串。
+**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### LENGTH
@@ -296,13 +293,13 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:以字节计数的字符串长度。
-**返回结果类型**:INT。
+**返回结果类型**:BIGINT。
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### LOWER
@@ -313,13 +310,13 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:将字符串参数值转换为全小写字母。
-**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:与输入字段的原始类型相同。
-**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
+**适用数据类型**:VARCHAR, NCHAR。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### LTRIM
@@ -330,13 +327,13 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:返回清除左边空格后的字符串。
-**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:与输入字段的原始类型相同。
-**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
+**适用数据类型**:VARCHAR, NCHAR。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### RTRIM
@@ -347,13 +344,13 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:返回清除右边空格后的字符串。
-**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:与输入字段的原始类型相同。
-**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
+**适用数据类型**:VARCHAR, NCHAR。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### SUBSTR
@@ -362,15 +359,15 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
+**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。
-**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:与输入字段的原始类型相同。
-**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。
+**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
#### UPPER
@@ -381,13 +378,13 @@ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:将字符串参数值转换为全大写字母。
-**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
+**返回结果类型**:与输入字段的原始类型相同。
-**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
+**适用数据类型**:VARCHAR, NCHAR。
**嵌套子查询支持**:适用于内层查询和外层查询。
-**适用于**: 表和超级表
+**适用于**: 表和超级表。
### 转换函数
@@ -400,16 +397,19 @@ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
```
-**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。
+**功能说明**:数据类型转换函数,返回 expression 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。
+
+**返回结果类型**:CAST 中指定的类型(type_name)。
-**返回结果类型**:CAST 中指定的类型(type_name),可以是 BIGINT、BIGINT UNSIGNED、BINARY、VARCHAR、NCHAR和TIMESTAMP。
+**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型。
-**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型
+**嵌套子查询支持**:适用于内层查询和外层查询。
+
+**适用于**: 表和超级表。
**使用说明**:
- 对于不能支持的类型转换会直接报错。
-- 如果输入值为NULL则输出值也为NULL。
- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。
2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。
@@ -418,20 +418,23 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
#### TO_ISO8601
```sql
-SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
+SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。
+**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果附带当前客户端的系统时区信息。
**返回结果数据类型**:VARCHAR 类型。
-**适用数据类型**:UNIX 时间戳常量或是 TIMESTAMP 类型的列
+**适用数据类型**:INTEGER, TIMESTAMP。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
+
+**适用于**: 表和超级表。
**使用说明**:
-- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定;
+- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
+- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
@@ -443,32 +446,34 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
**功能说明**: 将字符串常量转换为 JSON 类型。
-**返回结果数据类型**: JSON
+**返回结果数据类型**: JSON。
**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。
-**适用于**: 表和超级表
-
**嵌套子查询支持**:适用于内层查询和外层查询。
+**适用于**: 表和超级表。
+
#### TO_UNIXTIMESTAMP
```sql
-SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
+SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
```
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
-**返回结果数据类型**:长整型 INT64。
+**返回结果数据类型**:BIGINT。
-**应用字段**:字符串常量或是 VARCHAR/NCHAR 类型的列。
+**应用字段**:VARCHAR, NCHAR。
-**适用于**:表、超级表。
+**嵌套子查询支持**:适用于内层查询和外层查询。
+
+**适用于**:表和超级表。
**使用说明**:
-- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。
+- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
@@ -488,11 +493,13 @@ INSERT INTO tb_name VALUES (NOW(), ...);
**功能说明**:返回客户端当前系统时间。
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**返回结果数据类型**:TIMESTAMP。
**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
+
+**嵌套子查询支持**:适用于内层查询和外层查询。
**使用说明**:
@@ -504,40 +511,42 @@ INSERT INTO tb_name VALUES (NOW(), ...);
#### TIMEDIFF
```sql
-SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
+SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
```
**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
-**返回结果数据类型**:长整型 INT64。
+**返回结果数据类型**:BIGINT。输入包含不符合时间日期格式字符串则返回 NULL。
-**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
+**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
+
+**嵌套子查询支持**:适用于内层查询和外层查询。
**使用说明**:
- 支持的时间单位 time_unit 如下:
- 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
+ 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
#### TIMETRUNCATE
```sql
-SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
+SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
```
**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**返回结果数据类型**:TIMESTAMP。
-**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
+**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**:
- 支持的时间单位 time_unit 如下:
- 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
+ 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
@@ -549,11 +558,11 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
**功能说明**:返回客户端当前时区信息。
-**返回结果数据类型**:VARCHAR 类型。
+**返回结果数据类型**:VARCHAR。
**应用字段**:无
-**适用于**:表、超级表。
+**适用于**:表和超级表。
#### TODAY
@@ -566,11 +575,11 @@ INSERT INTO tb_name VALUES (TODAY(), ...);
**功能说明**:返回客户端当日零时的系统时间。
-**返回结果数据类型**:TIMESTAMP 时间戳类型。
+**返回结果数据类型**:TIMESTAMP。
**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**:
@@ -591,13 +600,13 @@ TDengine 支持针对数据的聚合查询。提供如下聚合函数。
SELECT AVG(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:统计表/超级表中某列的平均值。
+**功能说明**:统计指定字段的平均值。
-**返回数据类型**:双精度浮点数 Double。
+**返回数据类型**:DOUBLE。
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
### COUNT
@@ -606,19 +615,18 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
```
-**功能说明**:统计表/超级表中记录行数或某列的非空值个数。
+**功能说明**:统计指定字段的记录行数。
-**返回数据类型**:长整型 INT64。
+**返回数据类型**:BIGINT。
-**适用数据类型**:应用全部字段。
+**适用数据类型**:全部类型字段。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**:
- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
-- 针对同一表的(不包含 NULL 值)字段查询结果均相同。
-- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。
+- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。
### ELAPSED
@@ -629,17 +637,18 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。
-**返回结果类型**:Double
+**返回结果类型**:DOUBLE。
-**适用数据类型**:Timestamp类型
+**适用数据类型**:TIMESTAMP。
**支持的版本**:2.6.0.0 及以后的版本。
**适用于**: 表,超级表,嵌套查询的外层查询
**说明**:
-- field_name参数只能是表的第一列,即timestamp主键列。
-- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。
+- field_name参数只能是表的第一列,即 TIMESTAMP 类型的主键列。
+- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下:
+ 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。
- order by asc/desc不影响差值的计算结果。
- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。
@@ -668,11 +677,11 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
SELECT MODE(field_name) FROM tb_name [WHERE clause];
```
-**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。
+**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
-**返回数据类型**:同应用的字段。
+**返回数据类型**:与输入数据类型一致。
-**适用数据类型**: 数值类型。
+**适用数据类型**:全部类型字段。
**适用于**:表和超级表。
@@ -683,11 +692,11 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**功能说明**:统计表/超级表中某列的最大值和最小值之差。
+**功能说明**:统计表中某列的最大值和最小值之差。
-**返回数据类型**:双精度浮点数。
+**返回数据类型**:DOUBLE。
-**适用数据类型**:数值类型或TIMESTAMP类型。
+**适用数据类型**:INTEGER, TIMESTAMP。
**适用于**:表和超级表。
@@ -700,7 +709,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
**功能说明**:统计表中某列的均方差。
-**返回数据类型**:双精度浮点数 Double。
+**返回数据类型**:DOUBLE。
**适用数据类型**:数值类型。
@@ -715,7 +724,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
**功能说明**:统计表/超级表中某列的和。
-**返回数据类型**:双精度浮点数 Double 和长整型 INT64。
+**返回数据类型**:DOUBLE, BIGINT。
**适用数据类型**:数值类型。
@@ -729,10 +738,10 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
**功能说明**:
- - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
+ - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
- 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。
-**返回结果类型**:整形。
+**返回结果类型**:INTEGER。
**适用数据类型**:任何类型。
@@ -747,7 +756,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
**功能说明**:统计数据按照用户指定区间的分布。
-**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。
+**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。
**适用数据类型**:数值型字段。
@@ -782,11 +791,15 @@ FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
-**返回数据类型**: 双精度浮点数 Double。
+**返回数据类型**: DOUBLE。
-**适用数据类型**:数值类型。P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。如果不指定 algo_type 则使用默认算法 。
+**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
+
+**说明**:
+- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
+- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
### BOTTOM
@@ -930,7 +943,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
**功能说明**:统计表中某列的值百分比分位数。
-**返回数据类型**: 双精度浮点数 Double。
+**返回数据类型**: DOUBLE。
**应用字段**:数值类型。
@@ -951,7 +964,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:适合于除时间主列外的任何类型。
+**适用数据类型**:适合于除时间主键列外的任何类型。
**适用于**:表、超级表。
@@ -968,7 +981,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**:
@@ -1009,13 +1022,13 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
**嵌套子查询支持**: 适用于内层查询和外层查询。
-**适用于**:表和超级表
+**适用于**:表和超级表。
**使用说明**:
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
- - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
+ - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### DERIVATIVE
@@ -1026,13 +1039,13 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
-**返回数据类型**:双精度浮点数。
+**返回数据类型**:DOUBLE。
**适用数据类型**:数值类型。
-**适用于**:表、超级表
+**适用于**:表和超级表。
-**使用说明**: DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+**使用说明**: DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
### DIFF
@@ -1047,7 +1060,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。
@@ -1060,11 +1073,12 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
-**返回数据类型**:双精度浮点数 Double。
+**返回数据类型**:DOUBLE。
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
+
### MAVG
@@ -1074,19 +1088,19 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
- **返回结果类型**: 返回双精度浮点数类型。
+ **返回结果类型**: DOUBLE。
**适用数据类型**: 数值类型。
**嵌套子查询支持**: 适用于内层查询和外层查询。
- **适用于**:表和超级表
+ **适用于**:表和超级表。
**使用说明**:
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
+ - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### SAMPLE
@@ -1102,12 +1116,12 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**嵌套子查询支持**: 适用于内层查询和外层查询。
- **适用于**:表和超级表
+ **适用于**:表和超级表。
**使用说明**:
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- - 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。
+ - 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### STATECOUNT
@@ -1119,10 +1133,10 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
**参数范围**:
-- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
+- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
- val : 数值型
-**返回结果类型**:整形。
+**返回结果类型**:INTEGER。
**适用数据类型**:数值类型。
@@ -1132,7 +1146,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
**使用说明**:
-- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
+- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
@@ -1146,11 +1160,11 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**参数范围**:
-- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
+- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
- val : 数值型
- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。
-**返回结果类型**:整形。
+**返回结果类型**:INTEGER。
**适用数据类型**:数值类型。
@@ -1160,7 +1174,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**使用说明**:
-- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
+- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
@@ -1172,13 +1186,13 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
-**返回数据类型**:双精度浮点数 Double。
+**返回数据类型**:DOUBLE。
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
-**使用说明**: TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
## 系统信息函数
diff --git a/docs/zh/12-taos-sql/21-node.md b/docs/zh/12-taos-sql/21-node.md
index 47aa2077a3f9872c6b6751db49c9f865e2c3b9bc..4816daf42042c0607aebf37c8b57961e5b1927fe 100644
--- a/docs/zh/12-taos-sql/21-node.md
+++ b/docs/zh/12-taos-sql/21-node.md
@@ -40,7 +40,6 @@ ALTER ALL DNODES dnode_option
dnode_option: {
'resetLog'
- | 'resetQueryCache'
| 'balance' value
| 'monitor' value
| 'debugFlag' value
diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md
index 7b5acbfcad4bcba56ad3259c2aa2589a8a73adb7..bd8d61a5844241efae9eee99a73c65afd3d0926f 100644
--- a/docs/zh/12-taos-sql/26-udf.md
+++ b/docs/zh/12-taos-sql/26-udf.md
@@ -19,7 +19,7 @@ library_path:包含UDF函数实现的动态链接库的绝对路径,是在
OUTPUTTYPE:标识此函数的返回类型。
BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。
-关于如何开发自定义函数,请参考 [UDF使用说明](../develop/udf)。
+关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。
## 删除自定义函数
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 2d1866d5dd1874164d03ffdfb382010c8345ad63..fefb50c541d9b293ddabb7d5cbaa5f68bf1dee98 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -80,21 +80,16 @@ taos --dump-config
| 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 |
:::note
-确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表)
+确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
:::
| 协议 | 默认端口 | 用途说明 | 修改方法 |
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
-| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
-| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
-| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
+| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
-| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
-| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
-| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。
### maxShellConns
@@ -105,26 +100,6 @@ taos --dump-config
| 取值范围 | 10-50000000 |
| 缺省值 | 5000 |
-### maxConnections
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 一个数据库连接所容许的 dnode 连接数 |
-| 取值范围 | 1-100000 |
-| 缺省值 | 5000 |
-| 补充说明 | 实际测试下来,如果默认没有配,选 50 个 worker thread 会产生 Network unavailable |
-
-### rpcForceTcp
-
-| 属性 | 说明 |
-| -------- | --------------------------------------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 强制使用 TCP 传输 |
-| 取值范围 | 0: 不开启 1: 开启 |
-| 缺省值 | 0 |
-| 补充说明 | 在网络比较差的环境中,建议开启。
2.0 版本新增。 |
-
## 监控相关
### monitor
@@ -132,10 +107,26 @@ taos --dump-config
| 属性 | 说明 |
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
-| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽、HTTP 请求量的监控记录,记录信息存储在`LOG`库中。 |
+| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn` 和 `monitorProt` 指定的 TaosKeeper 监控服务 |
| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
| 缺省值 | 1 |
+### monitorFqdn
+
+| 属性 | 说明 |
+| -------- | -------------------------------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | TaosKeeper 监控服务的 FQDN |
+| 缺省值 | 无 |
+
+### monitorPort
+
+| 属性 | 说明 |
+| -------- | -------------------------------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | TaosKeeper 监控服务的端口号 |
+| 缺省值 | 6043 |
+
### monitorInterval
| 属性 | 说明 |
@@ -143,9 +134,10 @@ taos --dump-config
| 适用范围 | 仅服务端适用 |
| 含义 | 监控数据库记录系统参数(CPU/内存)的时间间隔 |
| 单位 | 秒 |
-| 取值范围 | 1-600 |
+| 取值范围 | 1-200000 |
| 缺省值 | 30 |
+
### telemetryReporting
| 属性 | 说明 |
@@ -167,19 +159,10 @@ taos --dump-config
| 缺省值 | 无 |
| 补充说明 | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。
(2.0.15 以前的版本中,此参数的单位是字节) |
-### ratioOfQueryCores
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 设置查询线程的最大数量。 |
-| 缺省值 | 1 |
-| 补充说明 | 最小值 0 表示只有 1 个查询线程
最大值 2 表示最大建立 2 倍 CPU 核数的查询线程。
默认为 1,表示最大和 CPU 核数相等的查询线程。
该值可以为小数,即 0.5 表示最大建立 CPU 核数一半的查询线程。 |
-
### maxNumOfDistinctRes
| 属性 | 说明 |
-| -------- | -------------------------------- | --- |
+| -------- | -------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 允许返回的 distinct 结果最大行数 |
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
@@ -301,96 +284,6 @@ charset 的有效值是 UTF-8。
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
| 缺省值 | /var/lib/taos |
-### cache
-
-| 属性 | 说明 |
-| -------- | ------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 内存块的大小 |
-| 单位 | MB |
-| 缺省值 | 16 |
-
-### blocks
-
-| 属性 | 说明 |
-| -------- | ----------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 每个 vnode(tsdb)中有多少 cache 大小的内存块。因此一个 vnode 的用的内存大小粗略为(cache \* blocks) |
-| 缺省值 | 6 |
-
-### days
-
-| 属性 | 说明 |
-| -------- | -------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 数据文件存储数据的时间跨度 |
-| 单位 | 天 |
-| 缺省值 | 10 |
-
-### keep
-
-| 属性 | 说明 |
-| -------- | -------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 数据保留的天数 |
-| 单位 | 天 |
-| 缺省值 | 3650 |
-
-### minRows
-
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 文件块中记录的最小条数 |
-| 缺省值 | 100 |
-
-### maxRows
-
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 文件块中记录的最大条数 |
-| 缺省值 | 4096 |
-
-### walLevel
-
-| 属性 | 说明 |
-| -------- | --------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | WAL 级别 |
-| 取值范围 | 0: 不写WAL;
1:写 WAL, 但不执行 fsync
2:写 WAL, 而且执行 fsync |
-| 缺省值 | 1 |
-
-### fsync
-
-| 属性 | 说明 |
-| -------- | -------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 当 WAL 设置为 2 时,执行 fsync 的周期 |
-| 单位 | 毫秒 |
-| 取值范围 | 最小为 0,表示每次写入,立即执行 fsync
最大为 180000(三分钟) |
-| 缺省值 | 3000 |
-
-### update
-
-| 属性 | 说明 |
-| -------- | ---------------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 允许更新已存在的数据行 |
-| 取值范围 | 0:不允许更新
1:允许整行更新
2:允许部分列更新。(2.1.7.0 版本开始此参数支持设为 2,在此之前取值只能是 [0, 1]) |
-| 缺省值 | 0 |
-| 补充说明 | 2.0.8.0 版本之前,不支持此参数。 |
-
-### cacheLast
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 是否在内存中缓存子表的最近数据 |
-| 取值范围 | 0:关闭
1:缓存子表最近一行数据
2:缓存子表每一列的最近的非 NULL 值
3:同时打开缓存最近行和列功能。(2.1.2.0 版本开始此参数支持 0 ~ 3 的取值范围,在此之前取值只能是 [0, 1]) |
-| 缺省值 | 0 |
-| 补充说明 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
-
### minimalTmpDirGB
| 属性 | 说明 |
@@ -409,110 +302,19 @@ charset 的有效值是 UTF-8。
| 单位 | GB |
| 缺省值 | 2.0 |
-### vnodeBak
-
-| 属性 | 说明 |
-| -------- | -------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 删除 vnode 时是否备份 vnode 目录 |
-| 取值范围 | 0:否,1:是 |
-| 缺省值 | 1 |
-
## 集群相关
-### numOfMnodes
-
-| 属性 | 说明 |
-| -------- | ------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 系统中管理节点个数 |
-| 缺省值 | 3 |
-
-### replica
-
-| 属性 | 说明 |
-| -------- | ------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 副本个数 |
-| 取值范围 | 1-3 |
-| 缺省值 | 1 |
-
-### quorum
-
-| 属性 | 说明 |
-| -------- | -------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 多副本环境下指令执行的确认数要求 |
-| 取值范围 | 1,2 |
-| 缺省值 | 1 |
-
-### role
+### supportVnodes
| 属性 | 说明 |
| -------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
-| 含义 | dnode 的可选角色 |
-| 取值范围 | 0:any(既可作为 mnode,也可分配 vnode)
1:mgmt(只能作为 mnode,不能分配 vnode)
2:dnode(不能作为 mnode,只能分配 vnode) |
-| 缺省值 | 0 |
-
-### balance
-
-| 属性 | 说明 |
-| -------- | ---------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 是否启动负载均衡 |
-| 取值范围 | 0,1 |
-| 缺省值 | 1 |
-
-### balanceInterval
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 管理节点在正常运行状态下,检查负载均衡的时间间隔 |
-| 单位 | 秒 |
-| 取值范围 | 1-30000 |
-| 缺省值 | 300 |
-
-### arbitrator
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 系统中裁决器的 endpoint,其格式如 firstEp |
-| 缺省值 | 空 |
+| 含义 | dnode 支持的最大 vnode 数目 |
+| 取值范围 | 0-4096 |
+| 缺省值 | 256 |
## 时间相关
-### precision
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------- |
-| 适用范围 | 仅服务端 |
-| 含义 | 创建数据库时使用的时间精度 |
-| 取值范围 | ms: millisecond; us: microsecond ; ns: nanosecond |
-| 缺省值 | ms |
-
-### rpcTimer
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | rpc 重试时长 |
-| 单位 | 毫秒 |
-| 取值范围 | 100-3000 |
-| 缺省值 | 300 |
-
-### rpcMaxTime
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | rpc 等待应答最大时长 |
-| 单位 | 秒 |
-| 取值范围 | 100-7200 |
-| 缺省值 | 600 |
-
### statusInterval
| 属性 | 说明 |
@@ -533,105 +335,8 @@ charset 的有效值是 UTF-8。
| 取值范围 | 1-120 |
| 缺省值 | 3 |
-### tableMetaKeepTimer
-
-| 属性 | 说明 |
-| -------- | --------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 表的元数据 cache 时长 |
-| 单位 | 秒 |
-| 取值范围 | 1-8640000 |
-| 缺省值 | 7200 |
-
-### maxTmrCtrl
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 定时器个数 |
-| 单位 | 个 |
-| 取值范围 | 8-2048 |
-| 缺省值 | 512 |
-
-### offlineThreshold
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | dnode 离线阈值,超过该时间将导致 dnode 离线 |
-| 单位 | 秒 |
-| 取值范围 | 5-7200000 |
-| 缺省值 | 86400\*10(10 天) |
-
## 性能调优
-### numOfThreadsPerCore
-
-| 属性 | 说明 |
-| -------- | ----------------------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 每个 CPU 核生成的队列消费者线程数量 |
-| 缺省值 | 1.0 |
-
-### ratioOfQueryThreads
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 设置查询线程的最大数量 |
-| 取值范围 | 0:表示只有 1 个查询线程
1:表示最大和 CPU 核数相等的查询线程
2:表示最大建立 2 倍 CPU 核数的查询线程。 |
-| 缺省值 | 1 |
-| 补充说明 | 该值可以为小数,即 0.5 表示最大建立 CPU 核数一半的查询线程。 |
-
-### maxVgroupsPerDb
-
-| 属性 | 说明 |
-| -------- | ------------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 每个 DB 中 能够使用的最大 vnode 个数 |
-| 取值范围 | 0-8192 |
-| 缺省值 | 0 |
-
-### maxTablesPerVnode
-
-| 属性 | 说明 |
-| -------- | --------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 每个 vnode 中能够创建的最大表个数 |
-| 缺省值 | 1000000 |
-
-### minTablesPerVnode
-
-| 属性 | 说明 |
-| -------- | --------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 每个 vnode 中必须创建表的最小数量 |
-| 缺省值 | 1000 |
-
-### tableIncStepPerVnode
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 每个 vnode 中超过最小表数,i.e. minTablesPerVnode, 后递增步长 |
-| 缺省值 | 1000 |
-
-### maxNumOfOrderedRes
-
-| 属性 | 说明 |
-| -------- | -------------------------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 支持超级表时间排序允许的最多记录数限制 |
-| 缺省值 | 10 万 |
-
-### mnodeEqualVnodeNum
-
-| 属性 | 说明 |
-| -------- | ------------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 将一个 mnode 等同于 vnode 消耗的个数 |
-| 缺省值 | 4 |
-
### numOfCommitThreads
| 属性 | 说明 |
@@ -642,23 +347,6 @@ charset 的有效值是 UTF-8。
## 压缩相关
-### comp
-
-| 属性 | 说明 |
-| -------- | ----------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 文件压缩标志位 |
-| 取值范围 | 0:关闭,1:一阶段压缩,2:两阶段压缩 |
-| 缺省值 | 2 |
-
-### tsdbMetaCompactRatio
-
-| 属性 | 说明 |
-| -------- | -------------------------------------------------------------- |
-| 含义 | tsdb meta 文件中冗余数据超过多少阈值,开启 meta 文件的压缩功能 |
-| 取值范围 | 0:不开启,[1-100]:冗余数据比例 |
-| 缺省值 | 0 |
-
### compressMsgSize
| 属性 | 说明 |
@@ -680,165 +368,6 @@ charset 的有效值是 UTF-8。
| 缺省值 | -1 |
| 补充说明 | 2.3.0.0 版本新增。 |
-### lossyColumns
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| 适用范围 | 服务器端 |
-| 含义 | 配置要进行有损压缩的浮点数据类型 |
-| 取值范围 | 空字符串:关闭有损压缩
float:只对 float 类型进行有损压缩
double:只对 double 类型进行有损压缩
float \| double:float double 都进行有损压缩 |
-| 缺省值 | 空字符串 |
-| 补充说明 | 有损压缩默认为关闭状态,只有配置后才生效 |
-
-### fPrecision
-
-| 属性 | 说明 |
-| -------- | -------------------------------- |
-| 适用范围 | 服务器端 |
-| 含义 | 设置 float 类型浮点数压缩精度 |
-| 取值范围 | 0.1 ~ 0.00000001 |
-| 缺省值 | 0.00000001 |
-| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
-
-### dPrecision
-
-| 属性 | 说明 |
-| -------- | -------------------------------- |
-| 适用范围 | 服务器端 |
-| 含义 | 设置 double 类型浮点数压缩精度 |
-| 取值范围 | 0.1 ~ 0.0000000000000001 |
-| 缺省值 | 0.0000000000000001 |
-| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
-
-## 连续查询相关
-
-### stream
-
-| 属性 | 说明 |
-| -------- | ------------------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 是否启用连续查询(流计算功能) |
-| 取值范围 | 0:不允许
1:允许 |
-| 缺省值 | 1 |
-
-### minSlidingTime
-
-| 属性 | 说明 |
-| -------- | ----------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 最小滑动窗口时长 |
-| 单位 | 毫秒 |
-| 取值范围 | 10-1000000 |
-| 缺省值 | 10 |
-| 补充说明 | 支持 us 补值后,这个值就是 1us 了。 |
-
-### minIntervalTime
-
-| 属性 | 说明 |
-| -------- | -------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 时间窗口最小值 |
-| 单位 | 毫秒 |
-| 取值范围 | 1-1000000 |
-| 缺省值 | 10 |
-
-### maxStreamCompDelay
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 连续查询启动最大延迟 |
-| 单位 | 毫秒 |
-| 取值范围 | 10-1000000000 |
-| 缺省值 | 20000 |
-
-### maxFirstStreamCompDelay
-
-| 属性 | 说明 |
-| -------- | -------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 第一次连续查询启动最大延迟 |
-| 单位 | 毫秒 |
-| 取值范围 | 10-1000000000 |
-| 缺省值 | 10000 |
-
-### retryStreamCompDelay
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 连续查询重试等待间隔 |
-| 单位 | 毫秒 |
-| 取值范围 | 10-1000000000 |
-| 缺省值 | 10 |
-
-### streamCompDelayRatio
-
-| 属性 | 说明 |
-| -------- | ---------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 连续查询的延迟时间计算系数,实际延迟时间为本参数乘以计算时间窗口 |
-| 取值范围 | 0.1-0.9 |
-| 缺省值 | 0.1 |
-
-:::info
-为避免多个 stream 同时执行占用太多系统资源,程序中对 stream 的执行时间人为增加了一些随机的延时。
maxFirstStreamCompDelay 是 stream 第一次执行前最少要等待的时间。
streamCompDelayRatio 是延迟时间的计算系数,它乘以查询的 interval 后为延迟时间基准。
maxStreamCompDelay 是延迟时间基准的上限。
实际延迟时间为一个不超过延迟时间基准的随机值。
stream 某次计算失败后需要重试,retryStreamCompDelay 是重试的等待时间基准。
实际重试等待时间为不超过等待时间基准的随机值。
-
-:::
-
-## HTTP 相关
-
-:::note
-HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0.0 以后(含)由 taosAdapter 提供。
-本节的配置参数仅在 2.4.0.0(不含)以前的版本中生效。如果您使用的是 2.4.0.0(含)及以后的版本请参考[文档](/reference/taosadapter/)。
-
-:::
-
-### http
-
-| 属性 | 说明 |
-| -------- | --------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 服务器内部的 http 服务开关。 |
-| 取值范围 | 0:关闭 http 服务, 1:激活 http 服务。 |
-| 缺省值 | 1 |
-
-### httpEnableRecordSql
-
-| 属性 | 说明 |
-| -------- | --------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 记录通过 RESTFul 接口,产生的 SQL 调用。 |
-| 缺省值 | 0 |
-| 补充说明 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
-
-### httpMaxThreads
-
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | RESTFul 接口的线程数。taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
-| 缺省值 | 2 |
-
-### restfulRowLimit
-
-| 属性 | 说明 |
-| -------- | ----------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | RESTFul 接口单次返回的记录条数。taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
-| 缺省值 | 10240 |
-| 补充说明 | 最大 10,000,000 |
-
-### httpDBNameMandatory
-
-| 属性 | 说明 |
-| -------- | ---------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 是否在 URL 中输入 数据库名称 |
-| 取值范围 | 0:不开启,1:开启 |
-| 缺省值 | 0 |
-| 补充说明 | 2.3 版本新增。 |
-
## 日志相关
### logDir
@@ -894,50 +423,23 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
| 取值范围 | 131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志) |
| 缺省值 | 131 或 135(不同模块有不同的默认值) |
-### mDebugFlag
-
-| 属性 | 说明 |
-| -------- | ------------------ |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 管理模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
-
-### dDebugFlag
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | dnode 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
-
-### sDebugFlag
-
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | sync 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
-
-### wDebugFlag
+### tmrDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 服务端和客户端均适用 |
-| 含义 | WAL 模块的日志开关 |
+| 含义 | 定时器模块的日志开关 |
| 取值范围 | 同上 |
-| 缺省值 | 135 |
+| 缺省值 | |
-### sdbDebugFlag
+### uDebugFlag
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | sdb 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
+| 属性 | 说明 |
+| -------- | ---------------------- |
+| 适用范围 | 服务端和客户端均适用 |
+| 含义 | 共用功能模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
### rpcDebugFlag
@@ -948,12 +450,21 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
| 取值范围 | 同上 |
| 缺省值 | |
-### tmrDebugFlag
+### jniDebugFlag
+
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅客户端适用 |
+| 含义 | jni 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
+
+### qDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 定时器模块的日志开关 |
+| 含义 | query 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | |
@@ -966,157 +477,113 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
| 取值范围 | 同上 |
| 缺省值 | |
-### jniDebugFlag
-
-| 属性 | 说明 |
-| -------- | ------------------ |
-| 适用范围 | 仅客户端适用 |
-| 含义 | jni 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
-
-### odbcDebugFlag
-
-| 属性 | 说明 |
-| -------- | ------------------- |
-| 适用范围 | 仅客户端适用 |
-| 含义 | odbc 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
-
-### uDebugFlag
-
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 共用功能模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
-
-### httpDebugFlag
+### dDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | http 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 属性 | 说明 |
+| -------- | -------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | dnode 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | 135 |
-### mqttDebugFlag
+### vDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | mqtt 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 属性 | 说明 |
+| -------- | -------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | vnode 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-### monitorDebugFlag
+### mDebugFlag
| 属性 | 说明 |
| -------- | ------------------ |
| 适用范围 | 仅服务端适用 |
-| 含义 | 监控模块的日志开关 |
+| 含义 | mnode 模块的日志开关 |
| 取值范围 | 同上 |
-| 缺省值 | |
+| 缺省值 | 135 |
-### qDebugFlag
+### wDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 查询模块的日志开关 |
+| 适用范围 | 仅服务端适用 |
+| 含义 | wal 模块的日志开关 |
| 取值范围 | 同上 |
-| 缺省值 | |
+| 缺省值 | 135 |
-### vDebugFlag
+### sDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 服务端和客户端均适用 |
-| 含义 | vnode 模块的日志开关 |
+| 含义 | sync 模块的日志开关 |
| 取值范围 | 同上 |
-| 缺省值 | |
+| 缺省值 | 135 |
### tsdbDebugFlag
| 属性 | 说明 |
| -------- | ------------------- |
| 适用范围 | 仅服务端适用 |
-| 含义 | TSDB 模块的日志开关 |
+| 含义 | tsdb 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | |
-### cqDebugFlag
-
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 连续查询模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
-
-## 仅客户端适用
+### tqDebugFlag
-### maxSQLLength
-
-| 属性 | 说明 |
-| -------- | --------------------------- |
-| 适用范围 | 仅客户端适用 |
-| 含义 | 单条 SQL 语句允许的最长限制 |
-| 单位 | bytes |
-| 取值范围 | 65480-1048576 |
-| 缺省值 | 1048576 |
+| 属性 | 说明 |
+| -------- | ------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | tq 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-### tscEnableRecordSql
+### fsDebugFlag
-| 属性 | 说明 |
-| -------- | ----------------------------------------------------------------------------------- |
-| 含义 | 是否记录客户端 sql 语句到文件 |
-| 取值范围 | 0:否,1:是 |
-| 缺省值 | 0 |
-| 补充说明 | 生成的文件(tscnote-xxxx.0/tscnote-xxx.1,xxxx 是 pid),与客户端日志所在目录相同。 |
+| 属性 | 说明 |
+| -------- | ------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | fs 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-### maxBinaryDisplayWidth
+### udfDebugFlag
-| 属性 | 说明 |
-| -------- | -------------------------------------------------------------------------- |
-| 含义 | Taos shell 中 binary 和 nchar 字段的显示宽度上限,超过此限制的部分将被隐藏 |
-| 取值范围 | 5 - |
-| 缺省值 | 30 |
+| 属性 | 说明 |
+| -------- | ---------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | UDF 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-:::info
-实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。
否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。
可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项
+### smaDebugFlag
-:::
+| 属性 | 说明 |
+| -------- | ---------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | sma 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-### maxWildCardsLength
+### idxDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------------------------------ |
-| 含义 | 设定 LIKE 算子的通配符字符串允许的最大长度 |
-| 单位 | bytes |
-| 取值范围 | 0-16384 |
-| 缺省值 | 100 |
-| 补充说明 | 2.1.6.1 版本新增。 |
+| 属性 | 说明 |
+| -------- | ---------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | index 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
-### clientMerge
+### tdbDebugFlag
-| 属性 | 说明 |
-| -------- | ---------------------------- |
-| 含义 | 是否允许客户端对写入数据去重 |
-| 取值范围 | 0:不开启,1:开启 |
-| 缺省值 | 0 |
-| 补充说明 | 2.3 版本新增。 |
-
-### maxRegexStringLen
-
-| 属性 | 说明 |
-| -------- | -------------------------- |
-| 含义 | 正则表达式最大允许长度 |
-| 取值范围 | 默认值 128,最大长度 16384 |
-| 缺省值 | 128 |
-| 补充说明 | 2.3 版本新增。 |
+| 属性 | 说明 |
+| -------- | ---------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | tdb 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | |
## 其他
diff --git a/docs/zh/14-reference/14-taosx.md b/docs/zh/14-reference/14-taosx.md
new file mode 100644
index 0000000000000000000000000000000000000000..ed3f8d488fe6f809e855510df913f69cc79ee6a0
--- /dev/null
+++ b/docs/zh/14-reference/14-taosx.md
@@ -0,0 +1,4 @@
+---
+sidebar_label: taosX
+title: 使用 taosX 在集群间复制数据
+---
\ No newline at end of file
diff --git a/docs/zh/14-reference/_category_.yml b/docs/zh/14-reference/_category_.yml
index ae861a15ff626b1e0424a28838830702262aa377..faca32476cc5a636ad76b093272d1116ff06ed3a 100644
--- a/docs/zh/14-reference/_category_.yml
+++ b/docs/zh/14-reference/_category_.yml
@@ -1 +1 @@
-label: 参考指南
\ No newline at end of file
+label: 参考手册
\ No newline at end of file
diff --git a/docs/zh/14-reference/index.md b/docs/zh/14-reference/index.md
index a9abff8f427311df4a608466e12f769bd0703763..e9c0c4fe236b8eefec1275a447c1dd1188921ee0 100644
--- a/docs/zh/14-reference/index.md
+++ b/docs/zh/14-reference/index.md
@@ -1,5 +1,4 @@
---
-sidebar_label: 参考手册
title: 参考手册
---
diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md
index f814ee70b77db1a775dda951bab413da03d57561..0680f7609543322d1b74f4ca89df56d14fb705f7 100644
--- a/docs/zh/17-operation/01-pkg-install.md
+++ b/docs/zh/17-operation/01-pkg-install.md
@@ -10,7 +10,7 @@ import TabItem from "@theme/TabItem";
## 安装
-关于安装,请参考 [使用安装包立即开始](../get-started/package)
+关于安装,请参考 [使用安装包立即开始](../../get-started/package)
diff --git a/docs/zh/17-operation/02-planning.mdx b/docs/zh/17-operation/02-planning.mdx
index 954ba7ca00ebdcb10cfcad515292d96127106ff3..0d63c4eaf365036cbba1d838ba6ee860a894724d 100644
--- a/docs/zh/17-operation/02-planning.mdx
+++ b/docs/zh/17-operation/02-planning.mdx
@@ -5,33 +5,28 @@ title: 容量规划
使用 TDengine 来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。
-## 内存需求
+## 服务端内存需求
-每个 Database 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
+每个 Database 可以创建固定数目的 vgroup,默认 2 个 vgroup,在创建数据库时可以通过`vgroups `参数来指定,其副本数由参数`replica `指定。vgroup 中的每个副本会是一个 vnode;所以每个数据库占用的内存由以下几个参数决定:
-```
-Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
-```
+- vgroups
+- replica
+- buffer
+- pages
+- pagesize
+- cachesize
-示例:假设 maxVgroupPerDB 是缺省值 64,cache 是缺省大小 16M, blocks 是缺省值 6,并且一个 DB 中有 10 万张表,单副本,标签总长度是 256 字节,则这个 DB 总的内存需求为:64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M。
+关于这些参数的详细说明请参考 [数据库管理](../../taos-sql/database)。
-在实际的系统运维中,我们通常会更关心 TDengine 服务进程(taosd)会占用的内存量。
+一个数据库所需要的内存大小等于
```
-taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
+vgroups * replica * (buffer + pages * pagesize + cachesize)
```
-其中:
-
-1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。
-2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点,则内存消耗还需要增加“0.2KB \* 集群中数据表总数”。
-3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB \* 查询涉及的数据表总数”的内存量。
+但要注意的是这些内存并不需要由单一服务器提供,而是由整个集群中所有数据节点共同负担,相当于由这些数据节点所在的服务器共同负担。如果集群中有不止一个数据库,则所需内存要累加。更复杂的场景是如果集群中的数据节点并非在最初就一次性全部建立,而是随着使用中系统负载的增加逐步增加服务器并增加数据节点,则新创建的数据库会导致新旧数据节点上的负载并不均衡,此时简单的理论计算并不能直接使用,要结合各数据节点的负载情况。
-注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况。
-
-最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高写入和查询速度。
-
-### 客户端内存需求
+## 客户端内存需求
客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。
@@ -56,7 +51,7 @@ CPU 的需求取决于如下两方面:
- **数据插入** TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。
- **查询需求** TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。
-因此仅对数据插入而言,CPU 是可以估算出来的,但查询所耗的计算资源无法估算。在实际运营过程中,不建议 CPU 使用率超过 50%,超过后,需要增加新的节点,以获得更多计算资源。
+因此仅对数据插入而言,CPU 是可以估算出来的,但查询所耗的计算资源无法估算。在实际运行过程中,不建议 CPU 使用率超过 50%,超过后,需要增加新的节点,以获得更多计算资源。
## 存储需求
@@ -77,5 +72,3 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
根据上面的内存、CPU、存储的预估,就可以知道整个系统需要多少核、多少内存、多少存储空间。如果数据副本数不为 1,总需求量需要再乘以副本数。
因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。
-
-**立即计算 CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)。**
diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md
index 2c466819621adc10423c452328714c81e6f6f966..2cfd4b6484acdcb617cd91ed694d2f4c0f010e93 100644
--- a/docs/zh/17-operation/03-tolerance.md
+++ b/docs/zh/17-operation/03-tolerance.md
@@ -4,25 +4,27 @@ title: 容错和灾备
## 容错
-TDengine 支持**WAL**(Write Ahead Log)机制,实现数据的容错能力,保证数据的高可用。
+TDengine 支持 **WAL**(Write Ahead Log)机制,实现数据的容错能力,保证数据的高可用。
TDengine 接收到应用的请求数据包时,先将请求的原始数据包写入数据库日志文件,等数据成功写入数据库数据文件后,再删除相应的 WAL。这样保证了 TDengine 能够在断电等因素导致的服务重启时从数据库日志文件中恢复数据,避免数据的丢失。
涉及的系统配置参数有两个:
-- walLevel:WAL 级别,0:不写 WAL; 1:写 WAL, 但不执行 fsync; 2:写 WAL, 而且执行 fsync。
-- fsync:当 walLevel 设置为 2 时,执行 fsync 的周期。设置为 0,表示每次写入,立即执行 fsync。
+- wal_level:WAL 级别,1:写 WAL,但不执行 fsync。2:写 WAL,而且执行 fsync。默认值为 1。
+- wal_fsync_period:当 wal_level 设置为 2 时,执行 fsync 的周期。设置为 0,表示每次写入,立即执行 fsync。
-如果要 100%的保证数据不丢失,需要将 walLevel 设置为 2,fsync 设置为 0。这时写入速度将会下降。但如果应用侧启动的写数据的线程数达到一定的数量(超过 50),那么写入数据的性能也会很不错,只会比 fsync 设置为 3000 毫秒下降 30%左右。
+如果要 100%的保证数据不丢失,需要将 wal_level 设置为 2,wal_fsync_period 设置为 0。这时写入速度将会下降。但如果应用侧启动的写数据的线程数达到一定的数量(超过 50),那么写入数据的性能也会很不错,只会比 wal_fsync_period 设置为 3000 毫秒下降 30%左右。
## 灾备
-TDengine 的集群通过多个副本的机制,来提供系统的高可用性,实现灾备能力。
+TDengine 的集群通过多个副本的机制,来提供系统的高可用性,同时具备一定的灾备能力。
-TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置多个 mnode 副本,副本数由系统配置参数 numOfMnodes 决定,为了支持高可靠,需要设置大于 1。为保证元数据的强一致性,mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。
+TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置 三个 mnode 副本。为保证元数据的强一致性,mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。
-TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数大于 1。
+TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数为 3。
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
当 TDengine 集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
+
+另外一种灾备方式是通过 `taosX` 将一个 TDengine 集群的数据同步复制到物理上位于不同数据中心的另一个 TDengine 集群。其详细使用方法请参考 [taosX 参考手册](../../reference/taosX)
diff --git a/docs/zh/17-operation/06-admin.md b/docs/zh/17-operation/06-admin.md
deleted file mode 100644
index 7934d31eafb774fb45e1902bee29e8b518d152d6..0000000000000000000000000000000000000000
--- a/docs/zh/17-operation/06-admin.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: 用户管理
----
-
-系统管理员可以在 CLI 界面里添加、删除用户,也可以修改密码。CLI 里 SQL 语法如下:
-
-```sql
-CREATE USER PASS <'password'>;
-```
-
-创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
-
-```sql
-DROP USER ;
-```
-
-删除用户,限 root 用户使用
-
-```sql
-ALTER USER PASS <'password'>;
-```
-
-修改用户密码,为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
-
-```sql
-ALTER USER PRIVILEGE ;
-```
-
-修改用户权限为:write 或 read,不需要添加单引号
-
-说明:系统内共有 super/write/read 三种权限级别,但目前不允许通过 alter 指令把 super 权限赋予用户。
-
-```sql
-SHOW USERS;
-```
-
-显示所有用户
-
-:::note
-SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身。
-
-:::
diff --git a/docs/zh/17-operation/09-status.md b/docs/zh/17-operation/09-status.md
deleted file mode 100644
index e7ae78bace91f6dab06591340965ba04efdd5edb..0000000000000000000000000000000000000000
--- a/docs/zh/17-operation/09-status.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: 系统连接、任务查询管理
----
-
-系统管理员可以从 CLI 查询系统的连接、正在进行的查询、流式计算,并且可以关闭连接、停止正在进行的查询和流式计算。
-
-## 显示数据库的连接
-
-```sql
-SHOW CONNECTIONS;
-```
-
-其结果中的一列显示 ip:port, 为连接的 IP 地址和端口号。
-
-## 强制关闭数据库连接
-
-```sql
-KILL CONNECTION ;
-```
-
-其中的 connection-id 是 SHOW CONNECTIONS 中显示的第一列的数字。
-
-## 显示数据查询
-
-```sql
-SHOW QUERIES;
-```
-
-其中第一列显示的以冒号隔开的两个数字为 query-id,为发起该 query 应用连接的 connection-id 和查询次数。
-
-## 强制关闭数据查询
-
-```sql
-KILL QUERY ;
-```
-
-其中 query-id 是 SHOW QUERIES 中显示的 connection-id:query-no 字串,如“105:2”,拷贝粘贴即可。
-
-## 显示连续查询
-
-```sql
-SHOW STREAMS;
-```
-
-其中第一列显示的以冒号隔开的两个数字为 stream-id, 为启动该 stream 应用连接的 connection-id 和发起 stream 的次数。
-
-## 强制关闭连续查询
-
-```sql
-KILL STREAM ;
-```
-
-其中的 stream-id 是 SHOW STREAMS 中显示的 connection-id:stream-no 字串,如 103:2,拷贝粘贴即可。
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index ac998b807e2c6124c12d36867b268a799e9f2d9d..c07f422557d2622e843fc1ab91b6f4e8b1d11a77 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -41,10 +41,8 @@ extern int32_t tsCompressMsgSize;
extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults;
extern int32_t tsCompatibleModel;
-extern bool tsEnableSlaveQuery;
extern bool tsPrintAuth;
extern int64_t tsTickPerMin[3];
-
extern int32_t tsCountAlwaysReturnValue;
// multi-process
@@ -92,8 +90,6 @@ extern uint16_t tsTelemPort;
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
extern bool tsRetrieveBlockingModel; // retrieve threads will be blocked
-extern bool tsKeepOriginalColumnName;
-extern bool tsDeadLockKillQuery;
// query client
extern int32_t tsQueryPolicy;
@@ -102,11 +98,6 @@ extern int32_t tsQuerySmaOptimize;
// client
extern int32_t tsMinSlidingTime;
extern int32_t tsMinIntervalTime;
-extern int32_t tsMaxStreamComputDelay;
-extern int32_t tsStreamCompStartDelay;
-extern int32_t tsRetryStreamCompDelay;
-extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
-extern int64_t tsMaxRetentWindow;
// build info
extern char version[];
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index dc83015a8968031a86bdcd8004b1525a856ac15c..5d6d9178ed0ef7abbddae592c4dbb08de4e906ba 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -3044,12 +3044,41 @@ typedef struct SDeleteRes {
int64_t skey;
int64_t ekey;
int64_t affectedRows;
- char tableFName[TSDB_TABLE_FNAME_LEN];
+ char tableFName[TSDB_TABLE_NAME_LEN];
+ char tsColName[TSDB_COL_NAME_LEN];
} SDeleteRes;
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
+typedef struct {
+ int32_t msgType;
+ int32_t msgLen;
+ void* msg;
+} SBatchMsg;
+
+typedef struct {
+ SMsgHead header;
+ int32_t msgNum;
+ SBatchMsg msg[];
+} SBatchReq;
+
+typedef struct {
+ int32_t reqType;
+ int32_t msgLen;
+ int32_t rspCode;
+ void* msg;
+} SBatchRsp;
+
+static FORCE_INLINE void tFreeSBatchRsp(void *p) {
+ if (NULL == p) {
+ return;
+ }
+
+ SBatchRsp* pRsp = (SBatchRsp*)p;
+ taosMemoryFree(pRsp->msg);
+}
+
#pragma pack(pop)
#ifdef __cplusplus
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 56e0935ce1318225cb30d97fb614a6a6f672edbe..20dc04631e0aee7ae2d694f8dbe0eb4b048cb17c 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -136,6 +136,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_TABLE_INDEX, "get-table-index", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MND_BATCH_META, "batch-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TABLE_CFG, "table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL)
@@ -180,6 +181,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_CFG, "vnode-table-cfg", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_BATCH_META, "vnode-batch-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_STB, "vnode-alter-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_STB, "vnode-drop-stb", SVDropStbReq, NULL)
diff --git a/include/libs/executor/dataSinkMgt.h b/include/libs/executor/dataSinkMgt.h
index 90b804b3825c7a3c9dd6fc9b7469bc0166f7ab21..47177dc11b5669511d92202605c002e34c64d589 100644
--- a/include/libs/executor/dataSinkMgt.h
+++ b/include/libs/executor/dataSinkMgt.h
@@ -38,7 +38,8 @@ typedef struct SDeleterRes {
int64_t skey;
int64_t ekey;
int64_t affectedRows;
- char tableFName[TSDB_TABLE_FNAME_LEN];
+ char tableName[TSDB_TABLE_NAME_LEN];
+ char tsColName[TSDB_COL_NAME_LEN];
} SDeleterRes;
typedef struct SDeleterParam {
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index 65e20336ccf36d1cf3667828729c76efa1edb355..a7fae403edbb608945840da7237080258aa37e1c 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -64,17 +64,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
* @param SReadHandle
* @return
*/
-qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols,
- SSchemaWrapper** pSchema);
-
-/**
- * Set the input data block for the stream scan.
- * @param tinfo
- * @param input
- * @param type
- * @return
- */
-int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid);
+qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema);
/**
* Set multiple input data blocks for the stream scan.
@@ -84,7 +74,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool
* @param type
* @return
*/
-int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid);
+int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
/**
* Update the table id list, add or remove.
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index f1f60cb8e5e39d5c55eac192bca799a04a17b59f..60ad3ba451361fcb0f00c61f03e16ed7999cc0ac 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -209,6 +209,7 @@ typedef enum EFuncDataRequired {
} EFuncDataRequired;
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
+EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow* pTimeWindow);
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
int32_t fmGetScalarFuncExecFuncs(int32_t funcId, SScalarFuncExecFuncs* pFpSet);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index c91978c125437f201153eadec302580528349e95..644185a244794f47ec1c8be0ee55822135ed14aa 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -151,7 +151,8 @@ typedef struct SVnodeModifyLogicNode {
uint64_t tableId;
uint64_t stableId;
int8_t tableType; // table type
- char tableFName[TSDB_TABLE_FNAME_LEN];
+ char tableName[TSDB_TABLE_NAME_LEN];
+ char tsColName[TSDB_COL_NAME_LEN];
STimeWindow deleteTimeRange;
SVgroupsInfo* pVgroupList;
SNodeList* pInsertCols;
@@ -494,7 +495,7 @@ typedef struct SQueryInserterNode {
uint64_t tableId;
uint64_t stableId;
int8_t tableType; // table type
- char tableFName[TSDB_TABLE_FNAME_LEN];
+ char tableName[TSDB_TABLE_NAME_LEN];
int32_t vgId;
SEpSet epSet;
} SQueryInserterNode;
@@ -503,7 +504,7 @@ typedef struct SDataDeleterNode {
SDataSinkNode sink;
uint64_t tableId;
int8_t tableType; // table type
- char tableFName[TSDB_TABLE_FNAME_LEN];
+ char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
STimeWindow deleteTimeRange;
SNode* pAffectedRows;
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 6199732cc8fb991929876f75ff01dcb2572c143e..ab1c00a69409f636e3a890588398d8708b62128b 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -270,7 +270,7 @@ typedef struct SStreamTask {
int64_t startVer;
int64_t checkpointVer;
int64_t processedVer;
- int32_t numOfVgroups;
+ // int32_t numOfVgroups;
// children info
SArray* childEpInfo; // SArray
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index 5b8d70fb7c127d476566f54623c43783cd5a240a..14173690967ffd26be92cf13a05af6f7508533fe 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -77,11 +77,11 @@ typedef struct {
} SWalSyncInfo;
typedef struct {
- int8_t protoVer;
int64_t version;
- int16_t msgType;
+ int64_t ingestTs;
int32_t bodyLen;
- int64_t ingestTs; // not implemented
+ int16_t msgType;
+ int8_t protoVer;
// sync meta
SWalSyncInfo syncMeta;
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index df9072fe1ac05cb63e5317c1c60beb9f79143fb3..88ebb099e5469d64ca6caf31b9cc60e42694e67a 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -2823,35 +2823,35 @@ end:
// delete from db.tabl where .. -> delete from tabl where ..
// delete from db .tabl where .. -> delete from tabl where ..
-static void getTbName(char *sql){
- char *ch = sql;
-
- bool inBackQuote = false;
- int8_t dotIndex = 0;
- while(*ch != '\0'){
- if(!inBackQuote && *ch == '`'){
- inBackQuote = true;
- ch++;
- continue;
- }
-
- if(inBackQuote && *ch == '`'){
- inBackQuote = false;
- ch++;
-
- continue;
- }
-
- if(!inBackQuote && *ch == '.'){
- dotIndex ++;
- if(dotIndex == 2){
- memmove(sql, ch + 1, strlen(ch + 1) + 1);
- break;
- }
- }
- ch++;
- }
-}
+//static void getTbName(char *sql){
+// char *ch = sql;
+//
+// bool inBackQuote = false;
+// int8_t dotIndex = 0;
+// while(*ch != '\0'){
+// if(!inBackQuote && *ch == '`'){
+// inBackQuote = true;
+// ch++;
+// continue;
+// }
+//
+// if(inBackQuote && *ch == '`'){
+// inBackQuote = false;
+// ch++;
+//
+// continue;
+// }
+//
+// if(!inBackQuote && *ch == '.'){
+// dotIndex ++;
+// if(dotIndex == 2){
+// memmove(sql, ch + 1, strlen(ch + 1) + 1);
+// break;
+// }
+// }
+// ch++;
+// }
+//}
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
SDeleteRes req = {0};
@@ -2867,9 +2867,9 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
- getTbName(req.tableFName);
+// getTbName(req.tableFName);
char sql[256] = {0};
- sprintf(sql, "delete from `%s` where `%s` >= %" PRId64" and `%s` <= %" PRId64, req.tableFName, "ts", req.skey, "ts", req.ekey);
+ sprintf(sql, "delete from `%s` where `%s` >= %" PRId64" and `%s` <= %" PRId64, req.tableFName, req.tsColName, req.skey, req.tsColName, req.ekey);
printf("delete sql:%s\n", sql);
TAOS_RES* res = taos_query(taos, sql);
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index cb1f3ca91c6e36c3e5d2282c3990c308f1d11e06..ce09b83fae3ec8767a0bad361467371526c55693 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -35,7 +35,6 @@ int32_t tsNumOfSupportVnodes = 256;
// common
int32_t tsMaxShellConns = 50000;
int32_t tsShellActivityTimer = 3; // second
-bool tsEnableSlaveQuery = true;
bool tsPrintAuth = false;
// multi process
@@ -118,20 +117,6 @@ int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 database precision unit for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
-// 20sec, the maximum value of stream computing delay, changed accordingly
-int32_t tsMaxStreamComputDelay = 20000;
-
-// 10sec, the first stream computing delay time after system launched successfully, changed accordingly
-int32_t tsStreamCompStartDelay = 10000;
-
-// the stream computing delay time after executing failed, change accordingly
-int32_t tsRetryStreamCompDelay = 10 * 1000;
-
-// The delayed computing ration. 10% of the whole computing time window by default.
-float tsStreamComputDelayRatio = 0.1f;
-
-int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
-
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
// 0 no query allowed, queries are disabled
@@ -142,12 +127,6 @@ int64_t tsQueryBufferSizeBytes = -1;
// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing.
bool tsRetrieveBlockingModel = false;
-// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
-bool tsKeepOriginalColumnName = false;
-
-// kill long query
-bool tsDeadLockKillQuery = false;
-
// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
@@ -330,11 +309,10 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "fqdn", defaultFqdn, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "serverPort", defaultServerPort, 1, 65056, 1) != 0) return -1;
if (cfgAddDir(pCfg, "tempDir", tsTempDir, 1) != 0) return -1;
- if (cfgAddFloat(pCfg, "minimalTempDirGB", 1.0f, 0.001f, 10000000, 1) != 0) return -1;
+ if (cfgAddFloat(pCfg, "minimalTmpDirGB", 1.0f, 0.001f, 10000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "shellActivityTimer", tsShellActivityTimer, 1, 120, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
- if (cfgAddBool(pCfg, "keepColumnName", tsKeepOriginalColumnName, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
@@ -383,15 +361,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "maxStreamCompDelay", tsMaxStreamComputDelay, 10, 1000000000, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "maxFirstStreamCompDelay", tsStreamCompStartDelay, 1000, 1000000000, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "retryStreamCompDelay", tsRetryStreamCompDelay, 10, 1000000000, 0) != 0) return -1;
- if (cfgAddFloat(pCfg, "streamCompDelayRatio", tsStreamComputDelayRatio, 0.1, 0.9, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "retrieveBlockingModel", tsRetrieveBlockingModel, 0) != 0) return -1;
if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, 0) != 0) return -1;
- if (cfgAddBool(pCfg, "slaveQuery", tsEnableSlaveQuery, 0) != 0) return -1;
- if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
@@ -399,7 +371,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
@@ -409,25 +381,21 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
- tsNumOfMnodeQueryThreads = tsNumOfCores * 2;
- tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 4, 8);
- if (cfgAddInt32(pCfg, "numOfMnodeQueryThreads", tsNumOfMnodeQueryThreads, 1, 1024, 0) != 0) return -1;
-
tsNumOfMnodeReadThreads = tsNumOfCores / 8;
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
if (cfgAddInt32(pCfg, "numOfMnodeReadThreads", tsNumOfMnodeReadThreads, 1, 1024, 0) != 0) return -1;
tsNumOfVnodeQueryThreads = tsNumOfCores * 2;
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
- if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, 0) != 0) return -1;
tsNumOfVnodeStreamThreads = tsNumOfCores / 4;
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
- if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 4, 1024, 0) != 0) return -1;
tsNumOfVnodeFetchThreads = tsNumOfCores / 4;
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
- if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 4, 1024, 0) != 0) return -1;
tsNumOfVnodeWriteThreads = tsNumOfCores;
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
@@ -447,11 +415,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
- if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeSharedThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeSharedThreads, 2, 1024, 0) != 0) return -1;
tsNumOfSnodeUniqueThreads = tsNumOfCores / 4;
tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
- if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 1, 1024, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 2, 1024, 0) != 0) return -1;
tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, TSDB_MAX_MSG_SIZE * 10000L);
@@ -532,7 +500,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tstrncpy(tsTempDir, cfgGetItem(pCfg, "tempDir")->str, PATH_MAX);
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
- tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTempDirGB")->fval;
+ tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTmpDirGB")->fval;
if (taosMulMkDir(tsTempDir) != 0) {
uError("failed to create tempDir:%s since %s", tsTempDir, terrstr());
return -1;
@@ -545,7 +513,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
tsCompressMsgSize = cfgGetItem(pCfg, "compressMsgSize")->i32;
tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32;
- tsKeepOriginalColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
@@ -579,15 +546,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
- tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
- tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
- tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
- tsStreamComputDelayRatio = cfgGetItem(pCfg, "streamCompDelayRatio")->fval;
tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32;
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
- tsEnableSlaveQuery = cfgGetItem(pCfg, "slaveQuery")->bval;
- tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->i32;
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
tsMnodeShmSize = cfgGetItem(pCfg, "mnodeShmSize")->i32;
@@ -598,7 +559,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
- tsNumOfMnodeQueryThreads = cfgGetItem(pCfg, "numOfMnodeQueryThreads")->i32;
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32;
@@ -673,9 +633,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'd': {
- if (strcasecmp("deadLockKillQuery", name) == 0) {
- tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->i32;
- } else if (strcasecmp("dDebugFlag", name) == 0) {
+ if (strcasecmp("dDebugFlag", name) == 0) {
dDebugFlag = cfgGetItem(pCfg, "dDebugFlag")->i32;
}
break;
@@ -732,9 +690,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'k': {
- if (strcasecmp("keepColumnName", name) == 0) {
- tsKeepOriginalColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
- }
break;
}
case 'l': {
@@ -758,10 +713,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsMaxShellConns = cfgGetItem(pCfg, "maxShellConns")->i32;
} else if (strcasecmp("maxNumOfDistinctRes", name) == 0) {
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
- } else if (strcasecmp("maxStreamCompDelay", name) == 0) {
- tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
- } else if (strcasecmp("maxFirstStreamCompDelay", name) == 0) {
- tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
}
break;
}
@@ -772,8 +723,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'i': {
- if (strcasecmp("minimalTempDirGB", name) == 0) {
- tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTempDirGB")->fval;
+ if (strcasecmp("minimalTmpDirGB", name) == 0) {
+ tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTmpDirGB")->fval;
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
tsDataSpace.reserved = cfgGetItem(pCfg, "minimalDataDirGB")->fval;
} else if (strcasecmp("minSlidingTime", name) == 0) {
@@ -834,8 +785,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
} else if (strcasecmp("numOfCommitThreads", name) == 0) {
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
- } else if (strcasecmp("numOfMnodeQueryThreads", name) == 0) {
- tsNumOfMnodeQueryThreads = cfgGetItem(pCfg, "numOfMnodeQueryThreads")->i32;
} else if (strcasecmp("numOfMnodeReadThreads", name) == 0) {
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
} else if (strcasecmp("numOfVnodeQueryThreads", name) == 0) {
@@ -883,9 +832,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'r': {
- if (strcasecmp("retryStreamCompDelay", name) == 0) {
- tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
- } else if (strcasecmp("retrieveBlockingModel", name) == 0) {
+ if (strcasecmp("retrieveBlockingModel", name) == 0) {
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
} else if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
@@ -913,10 +860,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfSupportVnodes = cfgGetItem(pCfg, "supportVnodes")->i32;
} else if (strcasecmp("statusInterval", name) == 0) {
tsStatusInterval = cfgGetItem(pCfg, "statusInterval")->i32;
- } else if (strcasecmp("streamCompDelayRatio", name) == 0) {
- tsStreamComputDelayRatio = cfgGetItem(pCfg, "streamCompDelayRatio")->fval;
- } else if (strcasecmp("slaveQuery", name) == 0) {
- tsEnableSlaveQuery = cfgGetItem(pCfg, "slaveQuery")->bval;
} else if (strcasecmp("snodeShmSize", name) == 0) {
tsSnodeShmSize = cfgGetItem(pCfg, "snodeShmSize")->i32;
} else if (strcasecmp("serverPort", name) == 0) {
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 22b7dd827d50a133c2d1f2ea3e3be3929e9171c9..3163982decb1fe4b62f2140cd1c1aa6f94bf3916 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -5682,6 +5682,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
if (tEncodeI64v(pCoder, pRes->affectedRows) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
+ if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
@@ -5700,6 +5701,7 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeI64v(pCoder, &pRes->affectedRows) < 0) return -1;
if (tDecodeCStrTo(pCoder, pRes->tableFName) < 0) return -1;
+ if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index 22a53f07f6aeab486719da5fd3253154dc65a41e..647af20fcf24e47b27b02d69595f8d1555a4cc19 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -184,6 +184,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_STB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_STB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TABLE_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_BATCH_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TABLE_CFG, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_SMA, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_SMA, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 0c80e69384a0efd9934144a3aaaa47fac83d4c07..eca61dd960eaf6fd9b0dfbd0d9bdc4de698e8c77 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -343,6 +343,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_UPDATE_TAG_VAL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_CFG, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index 5ad13e383a648b7703a7a247d182242950008750..9d1142801d945141c4c0831e03c40a3f33cef1e4 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -89,7 +89,7 @@ static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to stream since %s", pVnode->vgId, pMsg, terrstr());
+ dGError("vgId:%d, msg:%p failed to process stream since %s", pVnode->vgId, pMsg, terrstr());
vmSendRsp(pMsg, code);
}
diff --git a/source/dnode/mnode/impl/CMakeLists.txt b/source/dnode/mnode/impl/CMakeLists.txt
index a4bd12a7f7627307cd1473d4ef973657c1c4fb49..c740ea1397e7b406f0b832d3d63aa88b9accd4e0 100644
--- a/source/dnode/mnode/impl/CMakeLists.txt
+++ b/source/dnode/mnode/impl/CMakeLists.txt
@@ -1,4 +1,11 @@
aux_source_directory(src MNODE_SRC)
+IF (TD_PRIVILEGE)
+ ADD_DEFINITIONS(-D_PRIVILEGE)
+ENDIF ()
+IF (TD_PRIVILEGE)
+ LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
+ENDIF ()
+
add_library(mnode STATIC ${MNODE_SRC})
target_include_directories(
mnode
@@ -8,11 +15,8 @@ target_include_directories(
target_link_libraries(
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser
)
-
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(mnode grant)
-ENDIF ()
-IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
diff --git a/source/dnode/mnode/impl/inc/mndPrivilege.h b/source/dnode/mnode/impl/inc/mndPrivilege.h
index f6002e3be8ba98e3867cef6034161545430f2a9d..dc88b25f51adbbbd1e813401645b51a4e3ba0089 100644
--- a/source/dnode/mnode/impl/inc/mndPrivilege.h
+++ b/source/dnode/mnode/impl/inc/mndPrivilege.h
@@ -30,6 +30,7 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname);
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname);
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter);
+int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndUser.h b/source/dnode/mnode/impl/inc/mndUser.h
index 016ec3e6e9e6155c68c0553b738f237140bf1d42..970d1db7dbc2ff9a92f201aabb940fdeea1f22e4 100644
--- a/source/dnode/mnode/impl/inc/mndUser.h
+++ b/source/dnode/mnode/impl/inc/mndUser.h
@@ -17,6 +17,7 @@
#define _TD_MND_USER_H_
#include "mndInt.h"
+#include "thash.h"
#ifdef __cplusplus
extern "C" {
@@ -28,9 +29,10 @@ SUserObj *mndAcquireUser(SMnode *pMnode, const char *userName);
void mndReleaseUser(SMnode *pMnode, SUserObj *pUser);
// for trans test
-SSdbRaw *mndUserActionEncode(SUserObj *pUser);
-int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
- int32_t *pRspLen);
+SSdbRaw *mndUserActionEncode(SUserObj *pUser);
+SHashObj *mndDupDbHash(SHashObj *pOld);
+int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
+ int32_t *pRspLen);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index d84455ac94a76cb6523249e30d37101800f6a0c9..3f90f087fd34985d99b5476610aec4dd5e9002d3 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -805,14 +805,6 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
return -1;
}
- SDnodeObj *pDnode = mndAcquireDnode(pMnode, cfgReq.dnodeId);
- if (pDnode == NULL) {
- mError("dnode:%d, failed to config since %s ", cfgReq.dnodeId, terrstr());
- return -1;
- }
- SEpSet epSet = mndGetDnodeEpset(pDnode);
- mndReleaseDnode(pMnode, pDnode);
-
SDCfgDnodeReq dcfgReq = {0};
if (strcasecmp(cfgReq.config, "resetlog") == 0) {
strcpy(dcfgReq.config, "resetlog");
@@ -860,16 +852,36 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
}
}
- int32_t bufLen = tSerializeSDCfgDnodeReq(NULL, 0, &dcfgReq);
- void *pBuf = rpcMallocCont(bufLen);
+ int32_t code = -1;
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ while (1) {
+ SDnodeObj *pDnode = NULL;
+ pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
+ if (pIter == NULL) break;
- if (pBuf == NULL) return -1;
- tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq);
+ if (pDnode->id == cfgReq.dnodeId || cfgReq.dnodeId == -1 || cfgReq.dnodeId == 0) {
+ SEpSet epSet = mndGetDnodeEpset(pDnode);
+ int32_t bufLen = tSerializeSDCfgDnodeReq(NULL, 0, &dcfgReq);
+ void *pBuf = rpcMallocCont(bufLen);
+
+ if (pBuf != NULL) {
+ tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq);
+ mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle,
+ dcfgReq.config, dcfgReq.value);
+ SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen};
+ tmsgSendReq(&epSet, &rpcMsg);
+ code = 0;
+ }
+ }
- mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle,
- dcfgReq.config, dcfgReq.value);
- SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen};
- return tmsgSendReq(&epSet, &rpcMsg);
+ sdbRelease(pSdb, pDnode);
+ }
+
+ if (code == -1) {
+ terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
+ }
+ return code;
}
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) {
diff --git a/source/dnode/mnode/impl/src/mndPrivilege.c b/source/dnode/mnode/impl/src/mndPrivilege.c
index e4422c480f5b37874f20d36eb77ae99dbb027d51..151a2a64042bfab4cdbc5be38ef30b11680750a5 100644
--- a/source/dnode/mnode/impl/src/mndPrivilege.c
+++ b/source/dnode/mnode/impl/src/mndPrivilege.c
@@ -18,177 +18,20 @@
#include "mndDb.h"
#include "mndUser.h"
+#ifndef _PRIVILEGE
int32_t mndInitPrivilege(SMnode *pMnode) { return 0; }
-
-void mndCleanupPrivilege(SMnode *pMnode) {}
-
-int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType) {
- int32_t code = 0;
- SUserObj *pUser = mndAcquireUser(pMnode, user);
-
- if (pUser == NULL) {
- terrno = TSDB_CODE_MND_NO_USER_FROM_CONN;
- code = -1;
- goto _OVER;
- }
-
- if (pUser->superUser) {
- goto _OVER;
- }
-
- if (!pUser->enable) {
- terrno = TSDB_CODE_MND_USER_DISABLED;
- code = -1;
- goto _OVER;
- }
-
- switch (operType) {
- case MND_OPER_CONNECT:
- case MND_OPER_CREATE_FUNC:
- case MND_OPER_DROP_FUNC:
- case MND_OPER_SHOW_VARIBALES:
- break;
- default:
- terrno = TSDB_CODE_MND_NO_RIGHTS;
- code = -1;
- }
-
-_OVER:
- mndReleaseUser(pMnode, pUser);
- return code;
-}
-
-int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) {
- if (pUser->superUser && pAlter->alterType != TSDB_ALTER_USER_PASSWD) {
- terrno = TSDB_CODE_MND_NO_RIGHTS;
- return -1;
- }
-
- if (pOperUser->superUser) return 0;
-
- if (!pOperUser->enable) {
- terrno = TSDB_CODE_MND_USER_DISABLED;
- return -1;
- }
-
- if (pAlter->alterType == TSDB_ALTER_USER_PASSWD) {
- if (strcmp(pUser->user, pOperUser->user) == 0) {
- if (pOperUser->sysInfo) return 0;
- }
- }
-
- terrno = TSDB_CODE_MND_NO_RIGHTS;
- return -1;
-}
-
-int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname) {
- int32_t code = 0;
- SUserObj *pUser = mndAcquireUser(pMnode, user);
-
- if (pUser == NULL) {
- code = -1;
- goto _OVER;
- }
-
- if (pUser->superUser) {
- goto _OVER;
- }
-
- if (!pUser->enable) {
- terrno = TSDB_CODE_MND_USER_DISABLED;
- code = -1;
- goto _OVER;
- }
-
- if (pUser->sysInfo) {
- goto _OVER;
- }
-
- switch (showType) {
- case TSDB_MGMT_TABLE_DB:
- case TSDB_MGMT_TABLE_STB:
- case TSDB_MGMT_TABLE_INDEX:
- case TSDB_MGMT_TABLE_STREAMS:
- case TSDB_MGMT_TABLE_CONSUMERS:
- case TSDB_MGMT_TABLE_TOPICS:
- case TSDB_MGMT_TABLE_SUBSCRIPTIONS:
- case TSDB_MGMT_TABLE_FUNC:
- case TSDB_MGMT_TABLE_QUERIES:
- case TSDB_MGMT_TABLE_CONNS:
- case TSDB_MGMT_TABLE_APPS:
- case TSDB_MGMT_TABLE_TRANS:
- code = 0;
- break;
- default:
- terrno = TSDB_CODE_MND_NO_RIGHTS;
- code = -1;
- goto _OVER;
- }
-
- if (showType == TSDB_MGMT_TABLE_STB || showType == TSDB_MGMT_TABLE_VGROUP || showType == TSDB_MGMT_TABLE_INDEX) {
- code = mndCheckDbPrivilegeByName(pMnode, user, MND_OPER_READ_OR_WRITE_DB, dbname);
- }
-
-_OVER:
- mndReleaseUser(pMnode, pUser);
- return code;
+void mndCleanupPrivilege(SMnode *pMnode) {}
+int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType) { return 0; }
+int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) { return 0; }
+int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname) { return 0; }
+int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) { return 0; }
+int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname) {
+ return 0;
}
-
-int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) {
- int32_t code = 0;
- SUserObj *pUser = mndAcquireUser(pMnode, user);
-
- if (pUser == NULL) {
- code = -1;
- goto _OVER;
- }
-
- if (pUser->superUser) goto _OVER;
-
- if (!pUser->enable) {
- terrno = TSDB_CODE_MND_USER_DISABLED;
- code = -1;
- goto _OVER;
- }
-
- if (operType == MND_OPER_CREATE_DB) {
- if (pUser->sysInfo) goto _OVER;
- }
-
- if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB ||
- operType == MND_OPER_TRIM_DB) {
- if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
- }
-
- if (operType == MND_OPER_USE_DB || operType == MND_OPER_READ_OR_WRITE_DB) {
- if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
- if (taosHashGet(pUser->readDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
- if (taosHashGet(pUser->writeDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
- }
-
- if (operType == MND_OPER_WRITE_DB) {
- if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
- if (taosHashGet(pUser->writeDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
- }
-
- if (operType == MND_OPER_READ_DB) {
- if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
- if (taosHashGet(pUser->readDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
- }
-
- terrno = TSDB_CODE_MND_NO_RIGHTS;
- code = -1;
-
-_OVER:
- mndReleaseUser(pMnode, pUser);
- return code;
+int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp) {
+ memcpy(pRsp->user, pUser->user, TSDB_USER_LEN);
+ pRsp->superAuth = 1;
+ pRsp->version = pUser->authVersion;
+ return 0;
}
-
-int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname) {
- SDbObj *pDb = mndAcquireDb(pMnode, dbname);
- if (pDb == NULL) return -1;
-
- int32_t code = mndCheckDbPrivilege(pMnode, user, operType, pDb);
- mndReleaseDb(pMnode, pDb);
- return code;
-}
\ No newline at end of file
+#endif
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index 5a527b994e2c8eb130fe5c16f294e7a9ef8342f2..2beeb10335b02c6d08a75c0f2c498c825e56de54 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -63,6 +63,106 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
return code;
}
+int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
+ int32_t code = 0;
+ int32_t offset = 0;
+ int32_t rspSize = 0;
+ SBatchReq *batchReq = (SBatchReq*)pMsg->pCont;
+ int32_t msgNum = ntohl(batchReq->msgNum);
+ offset += sizeof(SBatchReq);
+ SBatchMsg req = {0};
+ SBatchRsp rsp = {0};
+ SRpcMsg reqMsg = *pMsg;
+ SRpcMsg rspMsg = {0};
+ void* pRsp = NULL;
+ SMnode *pMnode = pMsg->info.node;
+
+ SArray* batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
+ if (NULL == batchRsp) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ for (int32_t i = 0; i < msgNum; ++i) {
+ req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
+ offset += sizeof(req.msgType);
+
+ req.msgLen = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
+ offset += sizeof(req.msgLen);
+
+ req.msg = (char*)pMsg->pCont + offset;
+ offset += req.msgLen;
+
+ reqMsg.msgType = req.msgType;
+ reqMsg.pCont = req.msg;
+ reqMsg.contLen = req.msgLen;
+ reqMsg.info.rsp = NULL;
+ reqMsg.info.rspLen = 0;
+
+ MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(req.msgType)];
+ if (fp == NULL) {
+ mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
+ terrno = TSDB_CODE_MSG_NOT_PROCESSED;
+ return -1;
+ }
+
+ if ((*fp)(&reqMsg)) {
+ rsp.rspCode = terrno;
+ } else {
+ rsp.rspCode = 0;
+ }
+ rsp.reqType = reqMsg.msgType;
+ rsp.msgLen = reqMsg.info.rspLen;
+ rsp.msg = reqMsg.info.rsp;
+
+ taosArrayPush(batchRsp, &rsp);
+
+ rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
+ }
+
+ rspSize += sizeof(int32_t);
+ offset = 0;
+
+ pRsp = rpcMallocCont(rspSize);
+ if (pRsp == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ *(int32_t*)((char*)pRsp + offset) = htonl(msgNum);
+ offset += sizeof(msgNum);
+ for (int32_t i = 0; i < msgNum; ++i) {
+ SBatchRsp *p = taosArrayGet(batchRsp, i);
+
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->reqType);
+ offset += sizeof(p->reqType);
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen);
+ offset += sizeof(p->msgLen);
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode);
+ offset += sizeof(p->rspCode);
+ memcpy((char*)pRsp + offset, p->msg, p->msgLen);
+ offset += p->msgLen;
+
+ rpcFreeCont(p->msg);
+ }
+
+ taosArrayDestroy(batchRsp);
+ batchRsp = NULL;
+
+_exit:
+
+ pMsg->info.rsp = pRsp;
+ pMsg->info.rspLen = rspSize;
+
+ if (code) {
+ mError("mnd get batch meta failed cause of %s", tstrerror(code));
+ }
+
+ taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
+
+ return code;
+}
+
int32_t mndInitQuery(SMnode *pMnode) {
if (qWorkerInit(NODE_TYPE_MNODE, MNODE_HANDLE, NULL, (void **)&pMnode->pQuery, &pMnode->msgCb) != 0) {
mError("failed to init qworker in mnode since %s", terrstr());
@@ -76,6 +176,7 @@ int32_t mndInitQuery(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_SCH_MERGE_FETCH, mndProcessQueryMsg);
mndSetMsgHandle(pMnode, TDMT_SCH_DROP_TASK, mndProcessQueryMsg);
mndSetMsgHandle(pMnode, TDMT_SCH_QUERY_HEARTBEAT, mndProcessQueryMsg);
+ mndSetMsgHandle(pMnode, TDMT_MND_BATCH_META, mndProcessBatchMetaMsg);
return 0;
}
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 9882b0a9ae9eccb79b330d5d155e2f3079f8c03b..9d7fa537bb3ed9fffde4dc5b49e37e7e0e4afc84 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -391,10 +391,12 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
// exec
pInnerTask->execType = TASK_EXEC__PIPE;
+#if 0
SDbObj* pSourceDb = mndAcquireDb(pMnode, pStream->sourceDb);
ASSERT(pDbObj != NULL);
sdbRelease(pSdb, pSourceDb);
pInnerTask->numOfVgroups = pSourceDb->cfg.numOfVgroups;
+#endif
if (tsSchedStreamToSnode) {
SSnodeObj* pSnode = mndSchedFetchOneSnode(pMnode);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index 2d7117c11f9b23783ced39a4c5b067ba66ced76e..b2a0e6aac83f879bb1ba2d802894179d85d19d5a 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -1287,6 +1287,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
mDebug("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
+ pTrans->failedTimes++;
pTrans->code = terrno;
if (pTrans->policy == TRN_POLICY_ROLLBACK) {
if (pTrans->lastAction != 0) {
@@ -1306,7 +1307,6 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, stage from redoAction to rollback since %s", pTrans->id, terrstr());
continueExec = true;
} else {
- pTrans->failedTimes++;
mError("trans:%d, stage keep on redoAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
continueExec = false;
}
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index 0452659d47ab333be6de7da5350787b7c09102a9..5da119bb30af5bb27fb40d1dc42391893fb98c43 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -15,8 +15,8 @@
#define _DEFAULT_SOURCE
#include "mndUser.h"
-#include "mndPrivilege.h"
#include "mndDb.h"
+#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndTrans.h"
#include "tbase64.h"
@@ -408,7 +408,7 @@ static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpc
return 0;
}
-static SHashObj *mndDupDbHash(SHashObj *pOld) {
+SHashObj *mndDupDbHash(SHashObj *pOld) {
SHashObj *pNew =
taosHashInit(taosHashGetSize(pOld), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
if (pNew == NULL) {
@@ -662,38 +662,6 @@ _OVER:
return code;
}
-static int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp) {
- memcpy(pRsp->user, pUser->user, TSDB_USER_LEN);
- pRsp->superAuth = pUser->superUser;
- pRsp->version = pUser->authVersion;
- taosRLockLatch(&pUser->lock);
- pRsp->readDbs = mndDupDbHash(pUser->readDbs);
- pRsp->writeDbs = mndDupDbHash(pUser->writeDbs);
- taosRUnLockLatch(&pUser->lock);
- pRsp->createdDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (NULL == pRsp->createdDbs) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- while (1) {
- SDbObj *pDb = NULL;
- pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pDb);
- if (pIter == NULL) break;
-
- if (strcmp(pDb->createUser, pUser->user) == 0) {
- int32_t len = strlen(pDb->name) + 1;
- taosHashPut(pRsp->createdDbs, pDb->name, len, pDb->name, len);
- }
-
- sdbRelease(pSdb, pDb);
- }
-
- return 0;
-}
-
static int32_t mndProcessGetUserAuthReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index 984b34814d9389374b36682e1b1e857e7f90ac23..dd1facb462ceb2b543f4a2ea443c9a886a6b13ca 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -78,8 +78,9 @@ void vnodeBufPoolReset(SVBufPool* pPool);
// vnodeQuery.c
int32_t vnodeQueryOpen(SVnode* pVnode);
void vnodeQueryClose(SVnode* pVnode);
-int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
-int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg);
+int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
+int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
+int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index eecb0e6621a1b397cf09d7267d75c4ce7a0d6c43..3505711cd0f7a338f574951aea5f684f8fb4ce68 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -611,8 +611,8 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
goto _err;
}
- smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%"PRIi64, SMA_VID(pSma),
- suid, pItem->level, output->info.version);
+ smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
+ SMA_VID(pSma), suid, pItem->level, output->info.version);
taosMemoryFreeClear(pReq);
taosArrayClear(pResult);
@@ -644,7 +644,7 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level,
pItem->taskInfo, suid);
- if (qSetStreamInput(pItem->taskInfo, pMsg, inputType, true) < 0) { // INPUT__DATA_SUBMIT
+ if (qSetMultiStreamInput(pItem->taskInfo, pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT
smaError("vgId:%d, rsma % " PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
@@ -1329,7 +1329,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
tdRefRSmaInfo(pSma, pRSmaInfo);
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
- qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false);
+ qSetMultiStreamInput(pItem->taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK);
tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK);
tdUnRefRSmaInfo(pSma, pRSmaInfo);
@@ -1356,4 +1356,4 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
_end:
tdReleaseSmaRef(smaMgmt.rsetId, pItem->refId, __func__, __LINE__);
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c
new file mode 100644
index 0000000000000000000000000000000000000000..21dfd8a32d617a2674940c483287506bdf7a8852
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaSnapshot.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+
+static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData);
+static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+
+// SRsmaSnapReader ========================================
+struct SRsmaSnapReader {
+ SSma* pSma;
+ int64_t sver;
+ int64_t ever;
+
+ // for data file
+ int8_t rsmaDataDone[TSDB_RETENTION_L2];
+ STsdbSnapReader* pDataReader[TSDB_RETENTION_L2];
+
+ // for qtaskinfo file
+ int8_t qTaskDone;
+ SQTaskFReader* pQTaskFReader;
+};
+
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader) {
+ int32_t code = 0;
+ SRsmaSnapReader* pReader = NULL;
+
+ // alloc
+ pReader = (SRsmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
+ if (pReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pReader->pSma = pSma;
+ pReader->sver = sver;
+ pReader->ever = ever;
+
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (pSma->pRSmaTsdb[i]) {
+ code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
+ if (code < 0) {
+ goto _err;
+ }
+ }
+ }
+ *ppReader = pReader;
+ smaInfo("vgId:%d vnode snapshot rsma reader opened succeed", SMA_VID(pSma));
+ return TSDB_CODE_SUCCESS;
+_err:
+ smaError("vgId:%d vnode snapshot rsma reader opened failed since %s", SMA_VID(pSma), tstrerror(code));
+ return TSDB_CODE_FAILED;
+}
+
+static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+ SSma* pSma = pReader->pSma;
+
+_exit:
+ smaInfo("vgId:%d vnode snapshot rsma read qtaskinfo succeed", SMA_VID(pSma));
+ return code;
+
+_err:
+ smaError("vgId:%d vnode snapshot rsma read qtaskinfo failed since %s", SMA_VID(pSma), tstrerror(code));
+ return code;
+}
+
+int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+
+ *ppData = NULL;
+
+ smaInfo("vgId:%d vnode snapshot rsma read entry", SMA_VID(pReader->pSma));
+ // read rsma1/rsma2 file
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ STsdbSnapReader* pTsdbSnapReader = pReader->pDataReader[i];
+ if (!pTsdbSnapReader) {
+ continue;
+ }
+ if (!pReader->rsmaDataDone[i]) {
+ smaInfo("vgId:%d vnode snapshot rsma read level %d not done", SMA_VID(pReader->pSma), i);
+ code = tsdbSnapRead(pTsdbSnapReader, ppData);
+ if (code) {
+ goto _err;
+ } else {
+ if (*ppData) {
+ goto _exit;
+ } else {
+ pReader->rsmaDataDone[i] = 1;
+ }
+ }
+ } else {
+ smaInfo("vgId:%d vnode snapshot rsma read level %d is done", SMA_VID(pReader->pSma), i);
+ }
+ }
+
+ // read qtaskinfo file
+ if (!pReader->qTaskDone) {
+ code = rsmaSnapReadQTaskInfo(pReader, ppData);
+ if (code) {
+ goto _err;
+ } else {
+ if (*ppData) {
+ goto _exit;
+ } else {
+ pReader->qTaskDone = 1;
+ }
+ }
+ }
+
+_exit:
+ smaInfo("vgId:%d vnode snapshot rsma read succeed", SMA_VID(pReader->pSma));
+ return code;
+
+_err:
+ smaError("vgId:%d vnode snapshot rsma read failed since %s", SMA_VID(pReader->pSma), tstrerror(code));
+ return code;
+}
+
+int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
+ int32_t code = 0;
+ SRsmaSnapReader* pReader = *ppReader;
+
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (pReader->pDataReader[i]) {
+ tsdbSnapReaderClose(&pReader->pDataReader[i]);
+ }
+ }
+
+ if (pReader->pQTaskFReader) {
+ // TODO: close for qtaskinfo
+ smaInfo("vgId:%d vnode snapshot rsma reader closed for qTaskInfo", SMA_VID(pReader->pSma));
+ }
+
+
+ smaInfo("vgId:%d vnode snapshot rsma reader closed", SMA_VID(pReader->pSma));
+
+ taosMemoryFreeClear(*ppReader);
+ return code;
+}
+
+// SRsmaSnapWriter ========================================
+struct SRsmaSnapWriter {
+ SSma* pSma;
+ int64_t sver;
+ int64_t ever;
+
+ // config
+ int64_t commitID;
+
+ // for data file
+ STsdbSnapWriter* pDataWriter[TSDB_RETENTION_L2];
+
+ // for qtaskinfo file
+ SQTaskFReader* pQTaskFReader;
+ SQTaskFWriter* pQTaskFWriter;
+};
+
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter) {
+ int32_t code = 0;
+ SRsmaSnapWriter* pWriter = NULL;
+
+ // alloc
+ pWriter = (SRsmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
+ if (pWriter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pWriter->pSma = pSma;
+ pWriter->sver = sver;
+ pWriter->ever = ever;
+
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (pSma->pRSmaTsdb[i]) {
+ code = tsdbSnapWriterOpen(pSma->pRSmaTsdb[i], sver, ever, &pWriter->pDataWriter[i]);
+ if (code < 0) {
+ goto _err;
+ }
+ }
+ }
+
+ // qtaskinfo
+ // TODO
+
+ *ppWriter = pWriter;
+
+ smaInfo("vgId:%d rsma snapshot writer open succeed", TD_VID(pSma->pVnode));
+ return code;
+
+_err:
+ smaError("vgId:%d rsma snapshot writer open failed since %s", TD_VID(pSma->pVnode), tstrerror(code));
+ *ppWriter = NULL;
+ return code;
+}
+
+int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
+ int32_t code = 0;
+ SRsmaSnapWriter* pWriter = *ppWriter;
+
+ if (rollback) {
+ ASSERT(0);
+ // code = tsdbFSRollback(pWriter->pTsdb->pFS);
+ // if (code) goto _err;
+ } else {
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (pWriter->pDataWriter[i]) {
+ code = tsdbSnapWriterClose(&pWriter->pDataWriter[i], rollback);
+ if (code) goto _err;
+ }
+ }
+ }
+
+ taosMemoryFree(pWriter);
+ *ppWriter = NULL;
+
+ smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
+ return code;
+
+_err:
+ smaError("vgId:%d vnode snapshot rsma writer close failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
+ return code;
+}
+
+int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+ int32_t code = 0;
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+
+ // rsma1/rsma2
+ if (pHdr->type == SNAP_DATA_RSMA1) {
+ pHdr->type = SNAP_DATA_TSDB;
+ code = tsdbSnapWrite(pWriter->pDataWriter[0], pData, nData);
+ } else if (pHdr->type == SNAP_DATA_RSMA2) {
+ pHdr->type = SNAP_DATA_TSDB;
+ code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
+ } else if (pHdr->type == SNAP_DATA_QTASK) {
+ code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
+ }
+ if (code < 0) goto _err;
+
+_exit:
+ smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
+ return code;
+
+_err:
+ smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
+ tstrerror(code));
+ return code;
+}
+
+static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+ int32_t code = 0;
+
+ if (pWriter->pQTaskFWriter == NULL) {
+ // SDelFile* pDelFile = pWriter->fs.pDelFile;
+
+ // // reader
+ // if (pDelFile) {
+ // code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb, NULL);
+ // if (code) goto _err;
+
+ // code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR, NULL);
+ // if (code) goto _err;
+ // }
+
+ // // writer
+ // SDelFile delFile = {.commitID = pWriter->commitID, .offset = 0, .size = 0};
+ // code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb);
+ // if (code) goto _err;
+ }
+ smaInfo("vgId:%d vnode snapshot rsma write qtaskinfo succeed", SMA_VID(pWriter->pSma));
+_exit:
+ return code;
+
+_err:
+ smaError("vgId:%d vnode snapshot rsma write qtaskinfo failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
+ return code;
+}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 01f2f659ff0e5d3b00ad4f5b14aac4de4ae4052d..6b0e3944e3b3b44828e90cdb9d8d1a521fa468c2 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -653,7 +653,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
} else {
SReadHandle mgHandle = {
.vnode = NULL,
- .numOfVgroups = pTask->numOfVgroups,
+ .numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index e259dde29c86a7559d8d4dd5f256a81dc137727b..4e6a450d35d01d7c41a80bbb7ae1fc5d1c21b127 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -46,11 +46,6 @@ void tsdbCloseCache(SLRUCache *pCache) {
}
}
-static void getTableCacheKeyS(tb_uid_t uid, const char *cacheType, char *key, int *len) {
- snprintf(key, 30, "%" PRIi64 "%s", uid, cacheType);
- *len = strlen(key);
-}
-
static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) {
if (cacheType == 0) { // last_row
*(uint64_t *)key = (uint64_t)uid;
@@ -245,8 +240,6 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
char key[32] = {0};
int keyLen = 0;
- // ((void)(row));
-
// getTableCacheKey(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
@@ -323,26 +316,10 @@ static tb_uid_t getTableSuidByUid(tb_uid_t uid, STsdb *pTsdb) {
static int32_t getTableDelDataFromDelIdx(SDelFReader *pDelReader, SDelIdx *pDelIdx, SArray *aDelData) {
int32_t code = 0;
- // SMapData delDataMap;
- // SDelData delData;
-
if (pDelIdx) {
- // tMapDataReset(&delDataMap);
-
- // code = tsdbReadDelData(pDelReader, pDelIdx, &delDataMap, NULL);
code = tsdbReadDelData(pDelReader, pDelIdx, aDelData, NULL);
- if (code) goto _err;
- /*
- for (int32_t iDelData = 0; iDelData < delDataMap.nItem; ++iDelData) {
- code = tMapDataGetItemByIdx(&delDataMap, iDelData, &delData, tGetDelData);
- if (code) goto _err;
-
- taosArrayPush(aDelData, &delData);
- }
- */
}
-_err:
return code;
}
@@ -444,18 +421,16 @@ typedef struct SFSNextRowIter {
SArray *aDFileSet;
SDataFReader *pDataFReader;
SArray *aBlockIdx;
- // SMapData blockIdxMap;
- // SBlockIdx blockIdx;
- SBlockIdx *pBlockIdx;
- SMapData blockMap;
- int32_t nBlock;
- int32_t iBlock;
- SBlock block;
- SBlockData blockData;
- SBlockData *pBlockData;
- int32_t nRow;
- int32_t iRow;
- TSDBROW row;
+ SBlockIdx *pBlockIdx;
+ SMapData blockMap;
+ int32_t nBlock;
+ int32_t iBlock;
+ SBlock block;
+ SBlockData blockData;
+ SBlockData *pBlockData;
+ int32_t nRow;
+ int32_t iRow;
+ TSDBROW row;
} SFSNextRowIter;
static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
@@ -629,41 +604,8 @@ typedef struct SMemNextRowIter {
} SMemNextRowIter;
static int32_t getNextRowFromMem(void *iter, TSDBROW **ppRow) {
- // static int32_t getNextRowFromMem(void *iter, SArray *pRowArray) {
SMemNextRowIter *state = (SMemNextRowIter *)iter;
int32_t code = 0;
- /*
- if (!state->iterOpened) {
- if (state->pMem != NULL) {
- tsdbTbDataIterOpen(state->pMem, NULL, 1, &state->iter);
-
- state->iterOpened = true;
-
- TSDBROW *pMemRow = tsdbTbDataIterGet(&state->iter);
- if (pMemRow) {
- state->curRow = pMemRow;
- } else {
- return code;
- }
- } else {
- return code;
- }
- }
-
- taosArrayPush(pRowArray, state->curRow);
- while (tsdbTbDataIterNext(&state->iter)) {
- TSDBROW *row = tsdbTbDataIterGet(&state->iter);
-
- if (TSDBROW_TS(row) < TSDBROW_TS(state->curRow)) {
- state->curRow = row;
- break;
- } else {
- taosArrayPush(pRowArray, row);
- }
- }
-
- return code;
- */
switch (state->state) {
case SMEMNEXTROW_ENTER: {
if (state->pMem != NULL) {
@@ -702,44 +644,44 @@ _err:
return code;
}
-static int32_t tsRowFromTsdbRow(STSchema *pTSchema, TSDBROW *pRow, STSRow **ppRow) {
- int32_t code = 0;
-
- SColVal *pColVal = &(SColVal){0};
-
- if (pRow->type == 0) {
- *ppRow = tdRowDup(pRow->pTSRow);
- } else {
- SArray *pArray = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
- if (pArray == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- TSDBKEY key = TSDBROW_KEY(pRow);
- STColumn *pTColumn = &pTSchema->columns[0];
- *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts});
-
- if (taosArrayPush(pArray, pColVal) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) {
- tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
- if (taosArrayPush(pArray, pColVal) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
- }
-
- code = tdSTSRowNew(pArray, pTSchema, ppRow);
- if (code) goto _exit;
- }
-
-_exit:
- return code;
-}
+/* static int32_t tsRowFromTsdbRow(STSchema *pTSchema, TSDBROW *pRow, STSRow **ppRow) { */
+/* int32_t code = 0; */
+
+/* SColVal *pColVal = &(SColVal){0}; */
+
+/* if (pRow->type == 0) { */
+/* *ppRow = tdRowDup(pRow->pTSRow); */
+/* } else { */
+/* SArray *pArray = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)); */
+/* if (pArray == NULL) { */
+/* code = TSDB_CODE_OUT_OF_MEMORY; */
+/* goto _exit; */
+/* } */
+
+/* TSDBKEY key = TSDBROW_KEY(pRow); */
+/* STColumn *pTColumn = &pTSchema->columns[0]; */
+/* *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts}); */
+
+/* if (taosArrayPush(pArray, pColVal) == NULL) { */
+/* code = TSDB_CODE_OUT_OF_MEMORY; */
+/* goto _exit; */
+/* } */
+
+/* for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) { */
+/* tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal); */
+/* if (taosArrayPush(pArray, pColVal) == NULL) { */
+/* code = TSDB_CODE_OUT_OF_MEMORY; */
+/* goto _exit; */
+/* } */
+/* } */
+
+/* code = tdSTSRowNew(pArray, pTSchema, ppRow); */
+/* if (code) goto _exit; */
+/* } */
+
+/* _exit: */
+/* return code; */
+/* } */
static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
bool deleted = false;
@@ -768,10 +710,8 @@ static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
}
typedef int32_t (*_next_row_fn_t)(void *iter, TSDBROW **ppRow);
-// typedef int32_t (*_next_row_fn_t)(void *iter, SArray *pRowArray);
typedef int32_t (*_next_row_clear_fn_t)(void *iter);
-// typedef struct TsdbNextRowState {
typedef struct {
TSDBROW *pRow;
bool stop;
@@ -782,7 +722,6 @@ typedef struct {
} TsdbNextRowState;
typedef struct {
- // STsdb *pTsdb;
SArray *pSkyline;
int64_t iSkyline;
@@ -793,10 +732,8 @@ typedef struct {
TSDBROW memRow, imemRow, fsRow;
TsdbNextRowState input[3];
- // SMemTable *pMemTable;
- // SMemTable *pIMemTable;
- STsdbReadSnap *pReadSnap;
- STsdb *pTsdb;
+ STsdbReadSnap *pReadSnap;
+ STsdb *pTsdb;
} CacheNextRowIter;
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb) {
@@ -967,7 +904,7 @@ _err:
return code;
}
-static int32_t mergeLastRow2(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRow) {
+static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRow) {
int32_t code = 0;
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
@@ -978,8 +915,6 @@ static int32_t mergeLastRow2(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppR
SArray *pColArray = taosArrayInit(nCol, sizeof(SColVal));
SColVal *pColVal = &(SColVal){0};
- // tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
-
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
@@ -1066,7 +1001,7 @@ _err:
return code;
}
-static int32_t mergeLast2(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
+static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
int32_t code = 0;
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
@@ -1077,8 +1012,6 @@ static int32_t mergeLast2(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
SArray *pColArray = taosArrayInit(nCol, sizeof(SLastCol));
SColVal *pColVal = &(SColVal){0};
- // tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
-
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
@@ -1124,12 +1057,7 @@ static int32_t mergeLast2(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
continue;
}
}
- /*
- if ((TSDBROW_TS(pRow) < lastRowTs)) {
- // goto build the result ts row
- break;
- }
- */
+
// merge into pColArray
setNoneCol = false;
for (iCol = noneCol; iCol < nCol; ++iCol) {
@@ -1139,7 +1067,6 @@ static int32_t mergeLast2(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
if ((tColVal->isNone || tColVal->isNull) && (!pColVal->isNone && !pColVal->isNull)) {
taosArraySet(pColArray, iCol, &(SLastCol){.ts = rowTs, .colVal = *pColVal});
- //} else if (tColVal->isNone && pColVal->isNone && !setNoneCol) {
} else if ((tColVal->isNone || tColVal->isNull) && (pColVal->isNone || pColVal->isNull) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
@@ -1148,521 +1075,36 @@ static int32_t mergeLast2(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
} while (setNoneCol);
// build the result ts row here
- //*dup = false;
if (taosArrayGetSize(pColArray) <= 0) {
*ppLastArray = NULL;
taosArrayDestroy(pColArray);
} else {
*ppLastArray = pColArray;
}
- /* if (taosArrayGetSize(pColArray) == nCol) {
- code = tdSTSRowNew(pColArray, pTSchema, ppRow);
- if (code) goto _err;
- } else {
- *ppRow = NULL;
- }*/
nextRowIterClose(&iter);
- // taosArrayDestroy(pColArray);
taosMemoryFreeClear(pTSchema);
return code;
_err:
nextRowIterClose(&iter);
- // taosArrayDestroy(pColArray);
taosMemoryFreeClear(pTSchema);
return code;
}
-// static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRow) {
-// int32_t code = 0;
-// SArray *pSkyline = NULL;
-
-// STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
-// int16_t nCol = pTSchema->numOfCols;
-// SArray *pColArray = taosArrayInit(nCol, sizeof(SColVal));
-
-// tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
-
-// STbData *pMem = NULL;
-// if (pTsdb->mem) {
-// tsdbGetTbDataFromMemTable(pTsdb->mem, suid, uid, &pMem);
-// }
-
-// STbData *pIMem = NULL;
-// if (pTsdb->imem) {
-// tsdbGetTbDataFromMemTable(pTsdb->imem, suid, uid, &pIMem);
-// }
-
-// *ppRow = NULL;
-
-// pSkyline = taosArrayInit(32, sizeof(TSDBKEY));
-
-// SDelIdx delIdx;
-
-// SDelFile *pDelFile = tsdbFSStateGetDelFile(pTsdb->pFS->cState);
-// if (pDelFile) {
-// SDelFReader *pDelFReader;
-
-// code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL);
-// if (code) goto _err;
-
-// code = getTableDelIdx(pDelFReader, suid, uid, &delIdx);
-// if (code) goto _err;
-
-// code = getTableDelSkyline(pMem, pIMem, pDelFReader, &delIdx, pSkyline);
-// if (code) goto _err;
-
-// tsdbDelFReaderClose(&pDelFReader);
-// } else {
-// code = getTableDelSkyline(pMem, pIMem, NULL, NULL, pSkyline);
-// if (code) goto _err;
-// }
-
-// int64_t iSkyline = taosArrayGetSize(pSkyline) - 1;
-
-// SBlockIdx idx = {.suid = suid, .uid = uid};
-
-// SFSNextRowIter fsState = {0};
-// fsState.state = SFSNEXTROW_FS;
-// fsState.pTsdb = pTsdb;
-// fsState.pBlockIdxExp = &idx;
-
-// SMemNextRowIter memState = {0};
-// SMemNextRowIter imemState = {0};
-// TSDBROW memRow, imemRow, fsRow;
-
-// TsdbNextRowState input[3] = {{&memRow, true, false, &memState, getNextRowFromMem, NULL},
-// {&imemRow, true, false, &imemState, getNextRowFromMem, NULL},
-// {&fsRow, false, true, &fsState, getNextRowFromFS, clearNextRowFromFS}};
-
-// if (pMem) {
-// memState.pMem = pMem;
-// memState.state = SMEMNEXTROW_ENTER;
-// input[0].stop = false;
-// input[0].next = true;
-// }
-// if (pIMem) {
-// imemState.pMem = pIMem;
-// imemState.state = SMEMNEXTROW_ENTER;
-// input[1].stop = false;
-// input[1].next = true;
-// }
-
-// int16_t nilColCount = nCol - 1; // count of null & none cols
-// int iCol = 0; // index of first nil col index from left to right
-// bool setICol = false;
-
-// do {
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].next && !input[i].stop) {
-// if (input[i].pRow == NULL) {
-// code = input[i].nextRowFn(input[i].iter, &input[i].pRow);
-// if (code) goto _err;
-
-// if (input[i].pRow == NULL) {
-// input[i].stop = true;
-// input[i].next = false;
-// }
-// }
-// }
-// }
-
-// if (input[0].stop && input[1].stop && input[2].stop) {
-// break;
-// }
-
-// // select maxpoint(s) from mem, imem, fs
-// TSDBROW *max[3] = {0};
-// int iMax[3] = {-1, -1, -1};
-// int nMax = 0;
-// TSKEY maxKey = TSKEY_MIN;
-
-// for (int i = 0; i < 3; ++i) {
-// if (!input[i].stop && input[i].pRow != NULL) {
-// TSDBKEY key = TSDBROW_KEY(input[i].pRow);
-
-// // merging & deduplicating on client side
-// if (maxKey <= key.ts) {
-// if (maxKey < key.ts) {
-// nMax = 0;
-// maxKey = key.ts;
-// }
-
-// iMax[nMax] = i;
-// max[nMax++] = input[i].pRow;
-// }
-// }
-// }
-
-// // delete detection
-// TSDBROW *merge[3] = {0};
-// int iMerge[3] = {-1, -1, -1};
-// int nMerge = 0;
-// for (int i = 0; i < nMax; ++i) {
-// TSDBKEY maxKey = TSDBROW_KEY(max[i]);
-
-// bool deleted = tsdbKeyDeleted(&maxKey, pSkyline, &iSkyline);
-// if (!deleted) {
-// iMerge[nMerge] = i;
-// merge[nMerge++] = max[i];
-// }
-
-// input[iMax[i]].next = deleted;
-// }
-
-// // merge if nMerge > 1
-// if (nMerge > 0) {
-// *dup = false;
-
-// if (nMerge == 1) {
-// code = tsRowFromTsdbRow(pTSchema, merge[nMerge - 1], ppRow);
-// if (code) goto _err;
-// } else {
-// // merge 2 or 3 rows
-// SRowMerger merger = {0};
-
-// tRowMergerInit(&merger, merge[0], pTSchema);
-// for (int i = 1; i < nMerge; ++i) {
-// tRowMerge(&merger, merge[i]);
-// }
-// tRowMergerGetRow(&merger, ppRow);
-// tRowMergerClear(&merger);
-// }
-// }
-
-// } while (1);
-
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].nextRowClearFn) {
-// input[i].nextRowClearFn(input[i].iter);
-// }
-// }
-// if (pSkyline) {
-// taosArrayDestroy(pSkyline);
-// }
-// taosMemoryFreeClear(pTSchema);
-
-// return code;
-// _err:
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].nextRowClearFn) {
-// input[i].nextRowClearFn(input[i].iter);
-// }
-// }
-// if (pSkyline) {
-// taosArrayDestroy(pSkyline);
-// }
-// taosMemoryFreeClear(pTSchema);
-// tsdbError("vgId:%d merge last_row failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
-// return code;
-// }
-
-// static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, STSRow **ppRow) {
-// static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
-// int32_t code = 0;
-// SArray *pSkyline = NULL;
-// STSRow *pRow = NULL;
-// STSRow **ppRow = &pRow;
-
-// STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
-// int16_t nCol = pTSchema->numOfCols;
-// // SArray *pColArray = taosArrayInit(nCol, sizeof(SColVal));
-// SArray *pColArray = taosArrayInit(nCol, sizeof(SLastCol));
-
-// tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
-
-// STbData *pMem = NULL;
-// if (pTsdb->mem) {
-// tsdbGetTbDataFromMemTable(pTsdb->mem, suid, uid, &pMem);
-// }
-
-// STbData *pIMem = NULL;
-// if (pTsdb->imem) {
-// tsdbGetTbDataFromMemTable(pTsdb->imem, suid, uid, &pIMem);
-// }
-
-// *ppLastArray = NULL;
-
-// pSkyline = taosArrayInit(32, sizeof(TSDBKEY));
-
-// SDelIdx delIdx;
-
-// SDelFile *pDelFile = tsdbFSStateGetDelFile(pTsdb->pFS->cState);
-// if (pDelFile) {
-// SDelFReader *pDelFReader;
-
-// code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL);
-// if (code) goto _err;
-
-// code = getTableDelIdx(pDelFReader, suid, uid, &delIdx);
-// if (code) goto _err;
-
-// code = getTableDelSkyline(pMem, pIMem, pDelFReader, &delIdx, pSkyline);
-// if (code) goto _err;
-
-// tsdbDelFReaderClose(&pDelFReader);
-// } else {
-// code = getTableDelSkyline(pMem, pIMem, NULL, NULL, pSkyline);
-// if (code) goto _err;
-// }
-
-// int64_t iSkyline = taosArrayGetSize(pSkyline) - 1;
-
-// SBlockIdx idx = {.suid = suid, .uid = uid};
-
-// SFSNextRowIter fsState = {0};
-// fsState.state = SFSNEXTROW_FS;
-// fsState.pTsdb = pTsdb;
-// fsState.pBlockIdxExp = &idx;
-
-// SMemNextRowIter memState = {0};
-// SMemNextRowIter imemState = {0};
-// TSDBROW memRow, imemRow, fsRow;
-
-// TsdbNextRowState input[3] = {{&memRow, true, false, &memState, getNextRowFromMem, NULL},
-// {&imemRow, true, false, &imemState, getNextRowFromMem, NULL},
-// {&fsRow, false, true, &fsState, getNextRowFromFS, clearNextRowFromFS}};
-
-// if (pMem) {
-// memState.pMem = pMem;
-// memState.state = SMEMNEXTROW_ENTER;
-// input[0].stop = false;
-// input[0].next = true;
-// }
-// if (pIMem) {
-// imemState.pMem = pIMem;
-// imemState.state = SMEMNEXTROW_ENTER;
-// input[1].stop = false;
-// input[1].next = true;
-// }
-
-// int16_t nilColCount = nCol - 1; // count of null & none cols
-// int iCol = 0; // index of first nil col index from left to right
-// bool setICol = false;
-
-// do {
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].next && !input[i].stop) {
-// code = input[i].nextRowFn(input[i].iter, &input[i].pRow);
-// if (code) goto _err;
-
-// if (input[i].pRow == NULL) {
-// input[i].stop = true;
-// input[i].next = false;
-// }
-// }
-// }
-
-// if (input[0].stop && input[1].stop && input[2].stop) {
-// break;
-// }
-
-// // select maxpoint(s) from mem, imem, fs
-// TSDBROW *max[3] = {0};
-// int iMax[3] = {-1, -1, -1};
-// int nMax = 0;
-// TSKEY maxKey = TSKEY_MIN;
-
-// for (int i = 0; i < 3; ++i) {
-// if (!input[i].stop && input[i].pRow != NULL) {
-// TSDBKEY key = TSDBROW_KEY(input[i].pRow);
-
-// // merging & deduplicating on client side
-// if (maxKey <= key.ts) {
-// if (maxKey < key.ts) {
-// nMax = 0;
-// maxKey = key.ts;
-// }
-
-// iMax[nMax] = i;
-// max[nMax++] = input[i].pRow;
-// }
-// }
-// }
-
-// // delete detection
-// TSDBROW *merge[3] = {0};
-// int iMerge[3] = {-1, -1, -1};
-// int nMerge = 0;
-// for (int i = 0; i < nMax; ++i) {
-// TSDBKEY maxKey = TSDBROW_KEY(max[i]);
-
-// bool deleted = tsdbKeyDeleted(&maxKey, pSkyline, &iSkyline);
-// if (!deleted) {
-// iMerge[nMerge] = iMax[i];
-// merge[nMerge++] = max[i];
-// }
-
-// input[iMax[i]].next = deleted;
-// }
-
-// // merge if nMerge > 1
-// if (nMerge > 0) {
-// if (nMerge == 1) {
-// code = tsRowFromTsdbRow(pTSchema, merge[nMerge - 1], ppRow);
-// if (code) goto _err;
-// } else {
-// // merge 2 or 3 rows
-// SRowMerger merger = {0};
-
-// tRowMergerInit(&merger, merge[0], pTSchema);
-// for (int i = 1; i < nMerge; ++i) {
-// tRowMerge(&merger, merge[i]);
-// }
-// tRowMergerGetRow(&merger, ppRow);
-// tRowMergerClear(&merger);
-// }
-// } else {
-// /* *ppRow = NULL; */
-// /* return code; */
-// continue;
-// }
-
-// if (iCol == 0) {
-// STColumn *pTColumn = &pTSchema->columns[0];
-// SColVal *pColVal = &(SColVal){0};
-
-// *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = maxKey});
-
-// // if (taosArrayPush(pColArray, pColVal) == NULL) {
-// if (taosArrayPush(pColArray, &(SLastCol){.ts = maxKey, .colVal = *pColVal}) == NULL) {
-// code = TSDB_CODE_OUT_OF_MEMORY;
-// goto _err;
-// }
-
-// ++iCol;
-
-// setICol = false;
-// for (int16_t i = iCol; i < nCol; ++i) {
-// // tsdbRowGetColVal(*ppRow, pTSchema, i, pColVal);
-// tTSRowGetVal(*ppRow, pTSchema, i, pColVal);
-// // if (taosArrayPush(pColArray, pColVal) == NULL) {
-// if (taosArrayPush(pColArray, &(SLastCol){.ts = maxKey, .colVal = *pColVal}) == NULL) {
-// code = TSDB_CODE_OUT_OF_MEMORY;
-// goto _err;
-// }
-
-// if (pColVal->isNull || pColVal->isNone) {
-// for (int j = 0; j < nMerge; ++j) {
-// SColVal jColVal = {0};
-// tsdbRowGetColVal(merge[j], pTSchema, i, &jColVal);
-// if (jColVal.isNull || jColVal.isNone) {
-// input[iMerge[j]].next = true;
-// }
-// }
-// if (!setICol) {
-// iCol = i;
-// setICol = true;
-// }
-// } else {
-// --nilColCount;
-// }
-// }
-
-// if (*ppRow) {
-// taosMemoryFreeClear(*ppRow);
-// }
-
-// continue;
-// }
-
-// setICol = false;
-// for (int16_t i = iCol; i < nCol; ++i) {
-// SColVal colVal = {0};
-// tTSRowGetVal(*ppRow, pTSchema, i, &colVal);
-// TSKEY rowTs = (*ppRow)->ts;
-
-// // SColVal *tColVal = (SColVal *)taosArrayGet(pColArray, i);
-// SLastCol *tTsVal = (SLastCol *)taosArrayGet(pColArray, i);
-// SColVal *tColVal = &tTsVal->colVal;
-
-// if (!colVal.isNone && !colVal.isNull) {
-// if (tColVal->isNull || tColVal->isNone) {
-// // taosArraySet(pColArray, i, &colVal);
-// taosArraySet(pColArray, i, &(SLastCol){.ts = rowTs, .colVal = colVal});
-// --nilColCount;
-// }
-// } else {
-// if ((tColVal->isNull || tColVal->isNone) && !setICol) {
-// iCol = i;
-// setICol = true;
-
-// for (int j = 0; j < nMerge; ++j) {
-// SColVal jColVal = {0};
-// tsdbRowGetColVal(merge[j], pTSchema, i, &jColVal);
-// if (jColVal.isNull || jColVal.isNone) {
-// input[iMerge[j]].next = true;
-// }
-// }
-// }
-// }
-// }
-
-// if (*ppRow) {
-// taosMemoryFreeClear(*ppRow);
-// }
-// } while (nilColCount > 0);
-
-// // if () new ts row from pColArray if non empty
-// /* if (taosArrayGetSize(pColArray) == nCol) { */
-// /* code = tdSTSRowNew(pColArray, pTSchema, ppRow); */
-// /* if (code) goto _err; */
-// /* } */
-// /* taosArrayDestroy(pColArray); */
-// if (taosArrayGetSize(pColArray) <= 0) {
-// *ppLastArray = NULL;
-// taosArrayDestroy(pColArray);
-// } else {
-// *ppLastArray = pColArray;
-// }
-// if (*ppRow) {
-// taosMemoryFreeClear(*ppRow);
-// }
-
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].nextRowClearFn) {
-// input[i].nextRowClearFn(input[i].iter);
-// }
-// }
-// if (pSkyline) {
-// taosArrayDestroy(pSkyline);
-// }
-// taosMemoryFreeClear(pTSchema);
-
-// return code;
-// _err:
-// taosArrayDestroy(pColArray);
-// if (*ppRow) {
-// taosMemoryFreeClear(*ppRow);
-// }
-// for (int i = 0; i < 3; ++i) {
-// if (input[i].nextRowClearFn) {
-// input[i].nextRowClearFn(input[i].iter);
-// }
-// }
-// if (pSkyline) {
-// taosArrayDestroy(pSkyline);
-// }
-// taosMemoryFreeClear(pTSchema);
-// tsdbError("vgId:%d merge last_row failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
-// return code;
-// }
-
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **handle) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
- // getTableCacheKey(uid, "lr", key, &keyLen);
+ // getTableCacheKeyS(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
- //*ppRow = (STSRow *)taosLRUCacheValue(pCache, h);
} else {
STSRow *pRow = NULL;
bool dup = false; // which is always false for now
- code = mergeLastRow2(uid, pTsdb, &dup, &pRow);
+ code = mergeLastRow(uid, pTsdb, &dup, &pRow);
// if table's empty or error, return code of -1
if (code < 0 || pRow == NULL) {
if (!dup && pRow) {
@@ -1680,9 +1122,7 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH
code = -1;
}
- // tsdbCacheInsertLastrow(pCache, pTsdb, uid, pRow, dup);
h = taosLRUCacheLookup(pCache, key, keyLen);
- //*ppRow = (STSRow *)taosLRUCacheValue(pCache, h);
}
*handle = h;
@@ -1719,18 +1159,13 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
char key[32] = {0};
int keyLen = 0;
- // getTableCacheKey(uid, "l", key, &keyLen);
+ // getTableCacheKeyS(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
- //*ppRow = (STSRow *)taosLRUCacheValue(pCache, h);
-
} else {
- // STSRow *pRow = NULL;
- // code = mergeLast(uid, pTsdb, &pRow);
SArray *pLastArray = NULL;
- // code = mergeLast(uid, pTsdb, &pLastArray);
- code = mergeLast2(uid, pTsdb, &pLastArray);
+ code = mergeLast(uid, pTsdb, &pLastArray);
// if table's empty or error, return code of -1
// if (code < 0 || pRow == NULL) {
if (code < 0 || pLastArray == NULL) {
@@ -1746,7 +1181,6 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
}
h = taosLRUCacheLookup(pCache, key, keyLen);
- //*ppRow = (STSRow *)taosLRUCacheValue(pCache, h);
}
*handle = h;
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 1c3e2f0514df46769786b05b7e529ab4913b5cea..71b9d70518947ba07253122f2f83c6841bb2b57f 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -21,7 +21,7 @@ int vnodeQueryOpen(SVnode *pVnode) {
void vnodeQueryClose(SVnode *pVnode) { qWorkerDestroy((void **)&pVnode->pQuery); }
-int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
+int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
STableInfoReq infoReq = {0};
STableMetaRsp metaRsp = {0};
SMetaReader mer1 = {0};
@@ -99,7 +99,12 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
goto _exit;
}
- pRsp = rpcMallocCont(rspLen);
+ if (direct) {
+ pRsp = rpcMallocCont(rspLen);
+ } else {
+ pRsp = taosMemoryCalloc(1, rspLen);
+ }
+
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -117,15 +122,19 @@ _exit:
qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code));
}
- tmsgSendRsp(&rpcMsg);
-
+ if (direct) {
+ tmsgSendRsp(&rpcMsg);
+ } else {
+ *pMsg = rpcMsg;
+ }
+
taosMemoryFree(metaRsp.pSchemas);
metaReaderClear(&mer2);
metaReaderClear(&mer1);
return TSDB_CODE_SUCCESS;
}
-int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
+int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
STableCfgReq cfgReq = {0};
STableCfgRsp cfgRsp = {0};
SMetaReader mer1 = {0};
@@ -209,7 +218,12 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
goto _exit;
}
- pRsp = rpcMallocCont(rspLen);
+ if (direct) {
+ pRsp = rpcMallocCont(rspLen);
+ } else {
+ pRsp = taosMemoryCalloc(1, rspLen);
+ }
+
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -227,14 +241,124 @@ _exit:
qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code));
}
- tmsgSendRsp(&rpcMsg);
-
+ if (direct) {
+ tmsgSendRsp(&rpcMsg);
+ } else {
+ *pMsg = rpcMsg;
+ }
+
tFreeSTableCfgRsp(&cfgRsp);
metaReaderClear(&mer2);
metaReaderClear(&mer1);
return TSDB_CODE_SUCCESS;
}
+int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
+ int32_t code = 0;
+ int32_t offset = 0;
+ int32_t rspSize = 0;
+ SBatchReq *batchReq = (SBatchReq*)pMsg->pCont;
+ int32_t msgNum = ntohl(batchReq->msgNum);
+ offset += sizeof(SBatchReq);
+ SBatchMsg req = {0};
+ SBatchRsp rsp = {0};
+ SRpcMsg reqMsg = *pMsg;
+ SRpcMsg rspMsg = {0};
+ void* pRsp = NULL;
+
+ SArray* batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
+ if (NULL == batchRsp) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ for (int32_t i = 0; i < msgNum; ++i) {
+ req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
+ offset += sizeof(req.msgType);
+
+ req.msgLen = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
+ offset += sizeof(req.msgLen);
+
+ req.msg = (char*)pMsg->pCont + offset;
+ offset += req.msgLen;
+
+ reqMsg.msgType = req.msgType;
+ reqMsg.pCont = req.msg;
+ reqMsg.contLen = req.msgLen;
+
+ switch (req.msgType) {
+ case TDMT_VND_TABLE_META:
+ vnodeGetTableMeta(pVnode, &reqMsg, false);
+ break;
+ case TDMT_VND_TABLE_CFG:
+ vnodeGetTableCfg(pVnode, &reqMsg, false);
+ break;
+ default:
+ qError("invalid req msgType %d", req.msgType);
+ reqMsg.code = TSDB_CODE_INVALID_MSG;
+ reqMsg.pCont = NULL;
+ reqMsg.contLen = 0;
+ break;
+ }
+
+ rsp.reqType = reqMsg.msgType;
+ rsp.msgLen = reqMsg.contLen;
+ rsp.rspCode = reqMsg.code;
+ rsp.msg = reqMsg.pCont;
+
+ taosArrayPush(batchRsp, &rsp);
+
+ rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
+ }
+
+ rspSize += sizeof(int32_t);
+ offset = 0;
+
+ pRsp = rpcMallocCont(rspSize);
+ if (pRsp == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ *(int32_t*)((char*)pRsp + offset) = htonl(msgNum);
+ offset += sizeof(msgNum);
+ for (int32_t i = 0; i < msgNum; ++i) {
+ SBatchRsp *p = taosArrayGet(batchRsp, i);
+
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->reqType);
+ offset += sizeof(p->reqType);
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen);
+ offset += sizeof(p->msgLen);
+ *(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode);
+ offset += sizeof(p->rspCode);
+ memcpy((char*)pRsp + offset, p->msg, p->msgLen);
+ offset += p->msgLen;
+
+ taosMemoryFreeClear(p->msg);
+ }
+
+ taosArrayDestroy(batchRsp);
+ batchRsp = NULL;
+
+_exit:
+
+ rspMsg.info = pMsg->info;
+ rspMsg.pCont = pRsp;
+ rspMsg.contLen = rspSize;
+ rspMsg.code = code;
+ rspMsg.msgType = pMsg->msgType;
+
+ if (code) {
+ qError("vnd get batch meta failed cause of %s", tstrerror(code));
+ }
+
+ taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
+
+ tmsgSendRsp(&rspMsg);
+
+ return code;
+}
+
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = TD_VID(pVnode);
pLoad->syncState = syncGetMyRole(pVnode->sync);
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index b5bf92ee7589d6a8a943a84179921504579a74e1..a83e1ab85b0b0f7172eafadf0f94ebc3777102ca 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -298,7 +298,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
vTrace("message in fetch queue is processing");
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META ||
- pMsg->msgType == TDMT_VND_TABLE_CFG) &&
+ pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) &&
!vnodeIsLeader(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
@@ -320,9 +320,11 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
case TDMT_SCH_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TABLE_META:
- return vnodeGetTableMeta(pVnode, pMsg);
+ return vnodeGetTableMeta(pVnode, pMsg, true);
case TDMT_VND_TABLE_CFG:
- return vnodeGetTableCfg(pVnode, pMsg);
+ return vnodeGetTableCfg(pVnode, pMsg, true);
+ case TDMT_VND_BATCH_META:
+ return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 4323fa0aff0955c9b176dfeddac37f5c21cdcfd5..a0e5071685bb0870c92cf7be968772ee304f7234 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -281,8 +281,8 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
for (int32_t i = 0; i < numOfMsgs; ++i) {
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
const STraceId *trace = &pMsg->info.traceId;
- vGTrace("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p", vgId, pMsg, TMSG_INFO(pMsg->msgType),
- pMsg->info.handle);
+ vGInfo("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p index:%ld", vgId, pMsg,
+ TMSG_INFO(pMsg->msgType), pMsg->info.handle, pMsg->info.conn.applyIndex);
SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
if (rsp.code == 0) {
@@ -503,9 +503,6 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon
static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
if (cbMeta.isWeak == 0) {
SVnode *pVnode = pFsm->data;
- vTrace("vgId:%d, commit-cb is excuted, fsm:%p, index:%" PRId64 ", isWeak:%d, code:%d, state:%d %s, msgtype:%d %s",
- syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state,
- syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType));
if (cbMeta.code == 0) {
SRpcMsg rpcMsg = {.msgType = pMsg->msgType, .contLen = pMsg->contLen};
@@ -514,11 +511,17 @@ static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta c
syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info);
rpcMsg.info.conn.applyIndex = cbMeta.index;
rpcMsg.info.conn.applyTerm = cbMeta.term;
+
+ vInfo("vgId:%d, commit-cb is excuted, fsm:%p, index:%" PRId64 ", term:%" PRIu64 ", msg-index:%" PRId64
+ ", isWeak:%d, code:%d, state:%d %s, msgtype:%d %s",
+ syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.term, rpcMsg.info.conn.applyIndex, cbMeta.isWeak,
+ cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType));
+
tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, &rpcMsg);
} else {
SRpcMsg rsp = {.code = cbMeta.code, .info = pMsg->info};
- vError("vgId:%d, sync commit error, msgtype:%d,%s, error:0x%X, errmsg:%s", syncGetVgId(pVnode->sync),
- pMsg->msgType, TMSG_INFO(pMsg->msgType), cbMeta.code, tstrerror(cbMeta.code));
+ vError("vgId:%d, sync commit error, msgtype:%d,%s, index:%ld, error:0x%X, errmsg:%s", syncGetVgId(pVnode->sync),
+ pMsg->msgType, TMSG_INFO(pMsg->msgType), cbMeta.index, cbMeta.code, tstrerror(cbMeta.code));
if (rsp.info.handle != NULL) {
tmsgSendRsp(&rsp);
}
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index bf3bc1f0f4937c3c3098ea8573c4ddbdfae42156..1aaa1ecfd723d573e885975fdd3dfd5afd4ba55f 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -31,6 +31,7 @@ extern "C" {
#define CTG_DEFAULT_RENT_SECOND 10
#define CTG_DEFAULT_RENT_SLOT_SIZE 10
#define CTG_DEFAULT_MAX_RETRY_TIMES 3
+#define CTG_DEFAULT_BATCH_NUM 64
#define CTG_RENT_SLOT_SECOND 1.5
@@ -38,6 +39,8 @@ extern "C" {
#define CTG_ERR_CODE_TABLE_NOT_EXIST TSDB_CODE_PAR_TABLE_NOT_EXIST
+#define CTG_BATCH_FETCH 1
+
enum {
CTG_READ = 1,
CTG_WRITE,
@@ -200,8 +203,20 @@ typedef struct SCatalog {
SCtgRentMgmt stbRent;
} SCatalog;
+typedef struct SCtgBatch {
+ int32_t batchId;
+ int32_t msgType;
+ int32_t msgSize;
+ SArray* pMsgs;
+ SRequestConnInfo conn;
+ char dbFName[TSDB_DB_FNAME_LEN];
+ SArray* pTaskIds;
+} SCtgBatch;
+
typedef struct SCtgJob {
int64_t refId;
+ int32_t batchId;
+ SHashObj* pBatchs;
SArray* pTasks;
int32_t taskDone;
SMetaData jobRes;
@@ -236,6 +251,16 @@ typedef struct SCtgMsgCtx {
char* target;
} SCtgMsgCtx;
+
+typedef struct SCtgTaskCallbackParam {
+ uint64_t queryId;
+ int64_t refId;
+ SArray* taskId;
+ int32_t reqType;
+ int32_t batchId;
+} SCtgTaskCallbackParam;
+
+
typedef struct SCtgTask SCtgTask;
typedef int32_t (*ctgSubTaskCbFp)(SCtgTask*);
@@ -258,6 +283,7 @@ typedef struct SCtgTask {
SRWLatch lock;
SArray* pParents;
SCtgSubRes subRes;
+ SHashObj* pBatchs;
} SCtgTask;
typedef int32_t (*ctgInitTaskFp)(SCtgJob*, int32_t, void*);
@@ -618,6 +644,7 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask);
+int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob *pJob, SHashObj* pBatchs);
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp, void* param);
int32_t ctgLaunchJob(SCtgJob *pJob);
@@ -626,6 +653,9 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
int32_t ctgGetTbCfgCb(SCtgTask *pTask);
void ctgFreeHandle(SCatalog* pCatalog);
+void ctgFreeMsgSendParam(void* param);
+void ctgFreeBatch(SCtgBatch *pBatch);
+void ctgFreeBatchs(SHashObj *pBatchs);
int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst);
int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput);
int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList);
@@ -642,7 +672,7 @@ int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2);
void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput);
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target);
char * ctgTaskTypeStr(CTG_TASK_TYPE type);
-int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask);
+int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId);
int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes);
void ctgFreeSTableIndex(void *info);
void ctgClearSubTaskRes(SCtgSubRes *pRes);
diff --git a/source/libs/catalog/inc/ctgRemote.h b/source/libs/catalog/inc/ctgRemote.h
index cd88863c1b9b306b264cd05986f0a7e3a6b814d8..fe0762a88af49d8ed8ee873f03b719b0bc169a79 100644
--- a/source/libs/catalog/inc/ctgRemote.h
+++ b/source/libs/catalog/inc/ctgRemote.h
@@ -20,12 +20,6 @@
extern "C" {
#endif
-typedef struct SCtgTaskCallbackParam {
- uint64_t queryId;
- int64_t refId;
- uint64_t taskId;
- int32_t reqType;
-} SCtgTaskCallbackParam;
#ifdef __cplusplus
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 920acbac2e13b40d66085c0a4310490e6d4cd026..f4cee13ec0e6f8ad67cf01e557bd7bcce3aa1f91 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -473,8 +473,15 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
pJob->tbCfgNum = tbCfgNum;
pJob->svrVerNum = svrVerNum;
- pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask));
+#if CTG_BATCH_FETCH
+ pJob->pBatchs = taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pJob->pBatchs) {
+ ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+#endif
+ pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask));
if (NULL == pJob->pTasks) {
ctgError("taosArrayInit %d tasks failed", taskNum);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
@@ -560,7 +567,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
_return:
- taosMemoryFreeClear(*job);
+ ctgFreeJob(*job);
CTG_RET(code);
}
@@ -776,7 +783,8 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
pParent->subRes.code = code;
}
}
-
+
+ pParent->pBatchs = pTask->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
}
@@ -872,7 +880,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, ctx->pName, &vgInfo));
- ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
+ ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
ctx->vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, ctx->pName, &vgInfo, NULL, pTask));
@@ -890,7 +898,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
return TSDB_CODE_SUCCESS;
}
- ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName));
+ ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName));
ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false);
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
@@ -1653,6 +1661,7 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].cloneFp)(pSub, &pTask->subRes.res));
+ pTask->pBatchs = pSub->pBatchs;
CTG_ERR_JRET(pTask->subRes.fp(pTask));
} else {
if (NULL == pSub->pParents) {
@@ -1690,6 +1699,7 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
if (newTask) {
+ pSub->pBatchs = pTask->pBatchs;
CTG_ERR_RET((*gCtgAsyncFps[pSub->type].launchFp)(pSub));
pSub->status = CTG_TASK_LAUNCHED;
}
@@ -1702,9 +1712,11 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
+ pTask->pBatchs = pJob->pBatchs;
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
+
pTask->status = CTG_TASK_LAUNCHED;
}
@@ -1712,6 +1724,10 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
taosAsyncExec(ctgCallUserCb, pJob, NULL);
+#if CTG_BATCH_FETCH
+ } else {
+ ctgLaunchBatchs(pJob->pCtg, pJob, pJob->pBatchs);
+#endif
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c
index cc5dde929811a8900192d3f12414f2eed4887923..55bfc88a49f63cd2bf2a869f969e9e1e890746a0 100644
--- a/source/libs/catalog/src/ctgRemote.c
+++ b/source/libs/catalog/src/ctgRemote.c
@@ -18,9 +18,67 @@
#include "tname.h"
#include "catalogInt.h"
#include "systable.h"
-#include "ctgRemote.h"
#include "tref.h"
+int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBuf *pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SArray* pTaskId = cbParam->taskId;
+ SCatalog* pCtg = pJob->pCtg;
+ int32_t taskNum = taosArrayGetSize(pTaskId);
+ SDataBuf taskMsg = *pMsg;
+ int32_t offset = 0;
+ int32_t msgNum = (TSDB_CODE_SUCCESS == rspCode && pMsg->pData && (pMsg->len > 0)) ? ntohl(*(int32_t*)pMsg->pData) : 0;
+ ASSERT(taskNum == msgNum || 0 == msgNum);
+
+ ctgDebug("QID:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId, TMSG_INFO(cbParam->reqType + 1));
+
+ offset += sizeof(msgNum);
+ SBatchRsp rsp = {0};
+ SHashObj* pBatchs = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pBatchs) {
+ ctgError("taosHashInit %d batch failed", taskNum);
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ for (int32_t i = 0; i < taskNum; ++i) {
+ int32_t* taskId = taosArrayGet(pTaskId, i);
+ SCtgTask *pTask = taosArrayGet(pJob->pTasks, *taskId);
+ if (msgNum > 0) {
+ rsp.reqType = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
+ offset += sizeof(rsp.reqType);
+ rsp.msgLen = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
+ offset += sizeof(rsp.msgLen);
+ rsp.rspCode = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
+ offset += sizeof(rsp.rspCode);
+ rsp.msg = ((char*)pMsg->pData) + offset;
+ offset += rsp.msgLen;
+
+ taskMsg.msgType = rsp.reqType;
+ taskMsg.pData = rsp.msg;
+ taskMsg.len = rsp.msgLen;
+ } else {
+ rsp.reqType = -1;
+ taskMsg.msgType = -1;
+ taskMsg.pData = NULL;
+ taskMsg.len = 0;
+ }
+
+ pTask->pBatchs = pBatchs;
+
+ ctgDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(taskMsg.msgType + 1));
+
+ (*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode));
+ }
+
+ CTG_ERR_JRET(ctgLaunchBatchs(pJob->pCtg, pJob, pBatchs));
+
+_return:
+
+ ctgFreeBatchs(pBatchs);
+ CTG_RET(code);
+}
+
+
int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target) {
int32_t code = 0;
@@ -233,6 +291,11 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
break;
}
default:
+ if (TSDB_CODE_SUCCESS != rspCode) {
+ qError("Got error rsp, error:%s", tstrerror(rspCode));
+ CTG_ERR_RET(rspCode);
+ }
+
qError("invalid req type %s", TMSG_INFO(reqType));
return TSDB_CODE_APP_ERROR;
}
@@ -254,12 +317,32 @@ int32_t ctgHandleMsgCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
goto _return;
}
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, cbParam->taskId);
+ SCatalog* pCtg = pJob->pCtg;
+
+ if (TDMT_VND_BATCH_META == cbParam->reqType || TDMT_MND_BATCH_META == cbParam->reqType) {
+ CTG_ERR_JRET(ctgHandleBatchRsp(pJob, cbParam, pMsg, rspCode));
+ } else {
+ int32_t *taskId = taosArrayGet(cbParam->taskId, 0);
+ SCtgTask *pTask = taosArrayGet(pJob->pTasks, *taskId);
+
+ qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
+
+#if CTG_BATCH_FETCH
+ SHashObj* pBatchs = taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pBatchs) {
+ ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ pTask->pBatchs = pBatchs;
+#endif
- qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
+ CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode));
+
+#if CTG_BATCH_FETCH
+ CTG_ERR_JRET(ctgLaunchBatchs(pJob->pCtg, pJob, pBatchs));
+#endif
+ }
- CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode));
-
_return:
taosMemoryFree(pMsg->pData);
@@ -272,12 +355,12 @@ _return:
}
-int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsgSendInfo) {
+int32_t ctgMakeMsgSendInfo(SCtgJob* pJob, SArray* pTaskId, int32_t batchId, int32_t msgType, SMsgSendInfo **pMsgSendInfo) {
int32_t code = 0;
SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (NULL == msgSendInfo) {
qError("calloc %d failed", (int32_t)sizeof(SMsgSendInfo));
- CTG_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ CTG_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCtgTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SCtgTaskCallbackParam));
@@ -287,12 +370,13 @@ int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsg
}
param->reqType = msgType;
- param->queryId = pTask->pJob->queryId;
- param->refId = pTask->pJob->refId;
- param->taskId = pTask->taskId;
+ param->queryId = pJob->queryId;
+ param->refId = pJob->refId;
+ param->taskId = pTaskId;
+ param->batchId = batchId;
msgSendInfo->param = param;
- msgSendInfo->paramFreeFp = taosMemoryFree;
+ msgSendInfo->paramFreeFp = ctgFreeMsgSendParam;
msgSendInfo->fp = ctgHandleMsgCallback;
*pMsgSendInfo = msgSendInfo;
@@ -301,18 +385,19 @@ int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsg
_return:
- taosMemoryFree(param);
- taosMemoryFree(msgSendInfo);
+ taosArrayDestroy(pTaskId);
+ destroySendMsgInfo(msgSendInfo);
CTG_RET(code);
}
-int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask, int32_t msgType, void *msg, uint32_t msgSize) {
+int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob* pJob, SArray* pTaskId,
+ int32_t batchId, char* dbFName, int32_t vgId, int32_t msgType, void *msg, uint32_t msgSize) {
int32_t code = 0;
SMsgSendInfo *pMsgSendInfo = NULL;
- CTG_ERR_JRET(ctgMakeMsgSendInfo(pTask, msgType, &pMsgSendInfo));
+ CTG_ERR_JRET(ctgMakeMsgSendInfo(pJob, pTaskId, batchId, msgType, &pMsgSendInfo));
- ctgUpdateSendTargetInfo(pMsgSendInfo, msgType, pTask);
+ ctgUpdateSendTargetInfo(pMsgSendInfo, msgType, dbFName, vgId);
pMsgSendInfo->requestId = pConn->requestId;
pMsgSendInfo->requestObjRefId = pConn->requestObjRefId;
@@ -323,24 +408,178 @@ int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask
int64_t transporterId = 0;
code = asyncSendMsgToServer(pConn->pTrans, &pConn->mgmtEps, &transporterId, pMsgSendInfo);
+ pMsgSendInfo = NULL;
if (code) {
ctgError("asyncSendMsgToSever failed, error: %s", tstrerror(code));
CTG_ERR_JRET(code);
}
- ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType));
+ ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pJob->queryId, msgType, TMSG_INFO(msgType));
return TSDB_CODE_SUCCESS;
_return:
if (pMsgSendInfo) {
- taosMemoryFreeClear(pMsgSendInfo->param);
- taosMemoryFreeClear(pMsgSendInfo);
+ destroySendMsgInfo(pMsgSendInfo);
}
CTG_RET(code);
}
+int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo *pConn, SCtgTask* pTask, int32_t msgType, void *msg, uint32_t msgSize) {
+ int32_t code = 0;
+ SHashObj* pBatchs = pTask->pBatchs;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId));
+ int32_t taskNum = taosArrayGetSize(pTask->pJob->pTasks);
+ SCtgBatch newBatch = {0};
+ SBatchMsg req = {0};
+
+ if (NULL == pBatch) {
+ newBatch.pMsgs = taosArrayInit(taskNum, sizeof(SBatchMsg));
+ newBatch.pTaskIds = taosArrayInit(taskNum, sizeof(int32_t));
+ if (NULL == newBatch.pMsgs || NULL == newBatch.pTaskIds) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ newBatch.conn = *pConn;
+
+ req.msgType = msgType;
+ req.msgLen = msgSize;
+ req.msg = msg;
+ if (NULL == taosArrayPush(newBatch.pMsgs, &req)) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ if (NULL == taosArrayPush(newBatch.pTaskIds, &pTask->taskId)) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ newBatch.msgSize = sizeof(SBatchReq) + sizeof(req) + msgSize - POINTER_BYTES;
+
+ if (vgId > 0) {
+ if (TDMT_VND_TABLE_CFG == msgType) {
+ SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
+ tNameGetFullDbName(ctx->pName, newBatch.dbFName);
+ } else if (TDMT_VND_TABLE_META == msgType) {
+ SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
+ tNameGetFullDbName(ctx->pName, newBatch.dbFName);
+ } else {
+ ctgError("invalid vnode msgType %d", msgType);
+ CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
+ }
+ }
+
+ newBatch.msgType = (vgId > 0) ? TDMT_VND_BATCH_META : TDMT_MND_BATCH_META;
+ newBatch.batchId = atomic_add_fetch_32(&pJob->batchId, 1);
+
+ if (0 != taosHashPut(pBatchs, &vgId, sizeof(vgId), &newBatch, sizeof(newBatch))) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), newBatch.batchId, vgId);
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ req.msgType = msgType;
+ req.msgLen = msgSize;
+ req.msg = msg;
+ if (NULL == taosArrayPush(pBatch->pMsgs, &req)) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ if (NULL == taosArrayPush(pBatch->pTaskIds, &pTask->taskId)) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ pBatch->msgSize += sizeof(req) + msgSize - POINTER_BYTES;
+
+ if (vgId > 0) {
+ if (TDMT_VND_TABLE_CFG == msgType) {
+ SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
+ tNameGetFullDbName(ctx->pName, newBatch.dbFName);
+ } else if (TDMT_VND_TABLE_META == msgType) {
+ SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
+ tNameGetFullDbName(ctx->pName, newBatch.dbFName);
+ } else {
+ ctgError("invalid vnode msgType %d", msgType);
+ CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
+ }
+ }
+
+ ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId, vgId);
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ ctgFreeBatch(&newBatch);
+ taosMemoryFree(msg);
+
+ return code;
+}
+
+int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg) {
+ *msg = taosMemoryMalloc(pBatch->msgSize);
+ if (NULL == (*msg)) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ int32_t offset = 0;
+ int32_t num = taosArrayGetSize(pBatch->pMsgs);
+ SBatchReq *pBatchReq = (SBatchReq*)(*msg);
+
+ pBatchReq->header.vgId = htonl(vgId);
+ pBatchReq->msgNum = htonl(num);
+ offset += sizeof(SBatchReq);
+
+ for (int32_t i = 0; i < num; ++i) {
+ SBatchMsg* pReq = taosArrayGet(pBatch->pMsgs, i);
+ *(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgType);
+ offset += sizeof(pReq->msgType);
+ *(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgLen);
+ offset += sizeof(pReq->msgLen);
+ memcpy((char*)(*msg) + offset, pReq->msg, pReq->msgLen);
+ offset += pReq->msgLen;
+ }
+
+ ASSERT(pBatch->msgSize == offset);
+
+ qDebug("batch req %d to vg %d msg built with %d meta reqs", pBatch->batchId, vgId, num);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob *pJob, SHashObj* pBatchs) {
+ int32_t code = 0;
+ void* msg = NULL;
+ void* p = taosHashIterate(pBatchs, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ int32_t* vgId = taosHashGetKey(p, &len);
+ SCtgBatch* pBatch = (SCtgBatch*)p;
+
+ ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId);
+
+ CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg));
+ code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId,
+ pBatch->dbFName, *vgId, pBatch->msgType, msg, pBatch->msgSize);
+ pBatch->pTaskIds = NULL;
+ CTG_ERR_JRET(code);
+
+ p = taosHashIterate(pBatchs, p);
+ }
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ if (p) {
+ taosHashCancelIterate(pBatchs, p);
+ }
+ taosMemoryFree(msg);
+
+ CTG_RET(code);
+}
+
+
int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
@@ -361,7 +600,18 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, NULL));
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
+
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -396,7 +646,18 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
+
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -436,8 +697,18 @@ int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildU
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, input->db));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -476,8 +747,18 @@ int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)dbFName));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -516,8 +797,18 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)indexName));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -559,8 +850,18 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *n
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)tbFName));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -599,8 +900,18 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const ch
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)funcName));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -639,8 +950,18 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)user));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -684,8 +1005,17 @@ int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, tbFName));
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -744,7 +1074,21 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
.requestId = pConn->requestId,
.requestObjRefId = pConn->requestObjRefId,
.mgmtEps = vgroupInfo->epSet};
- CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask, reqType, msg, msgLen));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, pTask, reqType, msg, msgLen));
+#else
+ SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameGetFullDbName(ctx->pName, dbFName);
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
+
+ CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, dbFName, ctx->vgId, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -791,7 +1135,20 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
.requestId = pConn->requestId,
.requestObjRefId = pConn->requestObjRefId,
.mgmtEps = vgroupInfo->epSet};
- CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask, reqType, msg, msgLen));
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, pTask, reqType, msg, msgLen));
+#else
+ SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameGetFullDbName(ctx->pName, dbFName);
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
+
+ CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, dbFName, ctx->pVgInfo->vgId, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -832,8 +1189,17 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, (char*)tbFName));
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
@@ -868,8 +1234,18 @@ int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **ou
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
+
+#if CTG_BATCH_FETCH
+ CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
+#else
+ SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
+ if (NULL == pTaskId) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
+#endif
}
SRpcMsg rpcMsg = {
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index 1f0f074a0f39f1a870dd54ef7c3218fe72803d7b..e61becbe17f6250702cda1ced553a48b5190daab 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -19,6 +19,39 @@
#include "catalogInt.h"
#include "systable.h"
+void ctgFreeMsgSendParam(void* param) {
+ if (NULL == param) {
+ return;
+ }
+
+ SCtgTaskCallbackParam* pParam = (SCtgTaskCallbackParam*)param;
+ taosArrayDestroy(pParam->taskId);
+
+ taosMemoryFree(param);
+}
+
+void ctgFreeBatch(SCtgBatch *pBatch) {
+ if (NULL == pBatch) {
+ return;
+ }
+
+ taosArrayDestroy(pBatch->pMsgs);
+ taosArrayDestroy(pBatch->pTaskIds);
+}
+
+void ctgFreeBatchs(SHashObj *pBatchs) {
+ void* p = taosHashIterate(pBatchs, NULL);
+ while (NULL != p) {
+ SCtgBatch* pBatch = (SCtgBatch*)p;
+
+ ctgFreeBatch(pBatch);
+
+ p = taosHashIterate(pBatchs, p);
+ }
+
+ taosHashCleanup(pBatchs);
+}
+
char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
switch (type) {
case CTG_TASK_GET_QNODE:
@@ -612,6 +645,7 @@ void ctgFreeJob(void* job) {
uint64_t qid = pJob->queryId;
ctgFreeTasks(pJob->pTasks);
+ ctgFreeBatchs(pJob->pBatchs);
ctgFreeSMetaData(&pJob->jobRes);
@@ -867,14 +901,10 @@ int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes) {
}
-int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask) {
- if (msgType == TDMT_VND_TABLE_META) {
- SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
- char dbFName[TSDB_DB_FNAME_LEN];
- tNameGetFullDbName(ctx->pName, dbFName);
-
+int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId) {
+ if (msgType == TDMT_VND_TABLE_META || msgType == TDMT_VND_TABLE_CFG || msgType == TDMT_VND_BATCH_META) {
pMsgSendInfo->target.type = TARGET_TYPE_VNODE;
- pMsgSendInfo->target.vgId = ctx->vgId;
+ pMsgSendInfo->target.vgId = vgId;
pMsgSendInfo->target.dbFName = strdup(dbFName);
} else {
pMsgSendInfo->target.type = TARGET_TYPE_MNODE;
diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c
index 391aef529f6b8bf46c976700986665edc7594665..06b7c13fa2cb52b8255098c5efb652d56ec57974 100644
--- a/source/libs/executor/src/dataDeleter.c
+++ b/source/libs/executor/src/dataDeleter.c
@@ -90,7 +90,8 @@ static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInp
pRes->uidList = pHandle->pParam->pUidList;
pRes->skey = pHandle->pDeleter->deleteTimeRange.skey;
pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey;
- strcpy(pRes->tableFName, pHandle->pDeleter->tableFName);
+ strcpy(pRes->tableName, pHandle->pDeleter->tableFName);
+ strcpy(pRes->tsColName, pHandle->pDeleter->tsColName);
pRes->affectedRows = *(int64_t*)pColRes->pData;
pBuf->useSize += pEntry->dataLen;
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 4d47eda52b5cf7624edf242b73bf120e62de501e..7f88e628c185a18eba3b76dda5847b572a19f986 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -30,8 +30,7 @@ static void cleanupRefPool() {
taosCloseRef(ref);
}
-static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid,
- char* id) {
+static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) {
ASSERT(pOperator != NULL);
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -44,12 +43,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
return TSDB_CODE_QRY_APP_ERROR;
}
pOperator->status = OP_NOT_OPENED;
- return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id);
+ return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id);
} else {
pOperator->status = OP_NOT_OPENED;
SStreamScanInfo* pInfo = pOperator->info;
- pInfo->assignBlockUid = assignUid;
+ /*pInfo->assignBlockUid = assignUid;*/
// TODO: if a block was set but not consumed,
// prevent setting a different type of block
@@ -95,11 +94,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
-int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
- return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
-}
-
-int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid) {
+int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
if (tinfo == NULL) {
return TSDB_CODE_QRY_APP_ERROR;
}
@@ -110,8 +105,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
- int32_t code =
- doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo));
+ int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo));
} else {
@@ -343,7 +337,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
}
code = dsCreateDataSinker(pSubplan->pDataSink, handle, pSinkParam);
- if(code != TSDB_CODE_SUCCESS){
+ if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pSinkParam);
}
}
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 7b3c590f07469009511987abf8f5075973657961..8902804fab478e906484be5d54d0cd636d18b814 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -116,7 +116,8 @@ void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
}
static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int32_t currRow,
- SSDataBlock* pLeftBlock, int32_t leftPos, SSDataBlock* pRightBlock, int32_t rightPos) {
+ SSDataBlock* pLeftBlock, int32_t leftPos, SSDataBlock* pRightBlock,
+ int32_t rightPos) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
@@ -129,7 +130,7 @@ static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock*
int32_t rowIndex = -1;
SColumnInfoData* pSrc = NULL;
- if (pJoinInfo->pLeft->info.blockId == blockId) {
+ if (pLeftBlock->info.blockId == blockId) {
pSrc = taosArrayGet(pLeftBlock->pDataBlock, slotId);
rowIndex = leftPos;
} else {
@@ -144,7 +145,128 @@ static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock*
colDataAppend(pDst, currRow, p, false);
}
}
+}
+typedef struct SRowLocation {
+ SSDataBlock* pDataBlock;
+ int32_t pos;
+} SRowLocation;
+
+// pBlock[tsSlotId][startPos, endPos) == timestamp,
+static int32_t mergeJoinGetBlockRowsEqualTs(SSDataBlock* pBlock, int16_t tsSlotId, int32_t startPos, int64_t timestamp,
+ int32_t* pEndPos, SArray* rowLocations, SArray* createdBlocks) {
+ int32_t numRows = pBlock->info.rows;
+ ASSERT(startPos < numRows);
+ SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsSlotId);
+
+ int32_t i = startPos;
+ for (; i < numRows; ++i) {
+ char* pNextVal = colDataGetData(pCol, i);
+ if (timestamp != *(int64_t*)pNextVal) {
+ break;
+ }
+ }
+ int32_t endPos = i;
+ *pEndPos = endPos;
+
+ if (endPos - startPos == 0) {
+ return 0;
+ }
+
+ SSDataBlock* block = pBlock;
+ bool createdNewBlock = false;
+ if (endPos == numRows) {
+ block = blockDataExtractBlock(pBlock, startPos, endPos-startPos);
+ taosArrayPush(createdBlocks, &block);
+ createdNewBlock = true;
+ }
+ SRowLocation location = {0};
+ for (int32_t j = startPos; j < endPos; ++j) {
+ location.pDataBlock = block;
+ location.pos = ( createdNewBlock ? j - startPos : j);
+ taosArrayPush(rowLocations, &location);
+ }
+ return 0;
+}
+
+// whichChild == 0, left child of join; whichChild ==1, right child of join
+static int32_t mergeJoinGetDownStreamRowsEqualTimeStamp(SOperatorInfo* pOperator, int32_t whichChild, int16_t tsSlotId,
+ SSDataBlock* startDataBlock, int32_t startPos,
+ int64_t timestamp, SArray* rowLocations,
+ SArray* createdBlocks) {
+ ASSERT(whichChild == 0 || whichChild == 1);
+
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+ int32_t endPos = -1;
+ SSDataBlock* dataBlock = startDataBlock;
+ mergeJoinGetBlockRowsEqualTs(dataBlock, tsSlotId, startPos, timestamp, &endPos, rowLocations, createdBlocks);
+ while (endPos == dataBlock->info.rows) {
+ SOperatorInfo* ds = pOperator->pDownstream[whichChild];
+ dataBlock = ds->fpSet.getNextFn(ds);
+ if (whichChild == 0) {
+ pJoinInfo->leftPos = 0;
+ pJoinInfo->pLeft = dataBlock;
+ } else if (whichChild == 1) {
+ pJoinInfo->rightPos = 0;
+ pJoinInfo->pRight = dataBlock;
+ }
+
+ if (dataBlock == NULL) {
+ setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
+ endPos = -1;
+ break;
+ }
+
+ mergeJoinGetBlockRowsEqualTs(dataBlock, tsSlotId, 0, timestamp, &endPos, rowLocations, createdBlocks);
+ }
+ if (endPos != -1) {
+ if (whichChild == 0) {
+ pJoinInfo->leftPos = endPos;
+ } else if (whichChild == 1) {
+ pJoinInfo->rightPos = endPos;
+ }
+ }
+ return 0;
+}
+
+static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t timestamp, SSDataBlock* pRes,
+ int32_t* nRows) {
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+ SArray* leftRowLocations = taosArrayInit(8, sizeof(SRowLocation));
+ SArray* leftCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
+
+ SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation));
+ SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
+
+ mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft,
+ pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks);
+ mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight,
+ pJoinInfo->rightPos, timestamp, rightRowLocations, rightCreatedBlocks);
+
+ size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
+ size_t rightNumJoin = taosArrayGetSize(rightRowLocations);
+ for (int32_t i = 0; i < leftNumJoin; ++i) {
+ for (int32_t j = 0; j < rightNumJoin; ++j) {
+ SRowLocation* leftRow = taosArrayGet(leftRowLocations, i);
+ SRowLocation* rightRow = taosArrayGet(rightRowLocations, j);
+ mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
+ rightRow->pos);
+ ++*nRows;
+ }
+ }
+ for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
+ SSDataBlock* pBlock = taosArrayGetP(rightCreatedBlocks, i);
+ blockDataDestroy(pBlock);
+ }
+ taosArrayDestroy(rightCreatedBlocks);
+ taosArrayDestroy(rightRowLocations);
+ for (int i = 0; i < taosArrayGetSize(leftCreatedBlocks); ++i) {
+ SSDataBlock* pBlock = taosArrayGetP(leftCreatedBlocks, i);
+ blockDataDestroy(pBlock);
+ }
+ taosArrayDestroy(leftCreatedBlocks);
+ taosArrayDestroy(leftRowLocations);
+ return TSDB_CODE_SUCCESS;
}
static bool mergeJoinGetNextTimestamp(SOperatorInfo* pOperator, int64_t* pLeftTs, int64_t* pRightTs) {
@@ -195,18 +317,15 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
while (1) {
int64_t leftTs = 0;
int64_t rightTs = 0;
- bool hasNextTs = mergeJoinGetNextTimestamp(pOperator, &leftTs, &rightTs);
+ bool hasNextTs = mergeJoinGetNextTimestamp(pOperator, &leftTs, &rightTs);
if (!hasNextTs) {
break;
}
if (leftTs == rightTs) {
- mergeJoinJoinLeftRight(pOperator, pRes, nrows,
- pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight, pJoinInfo->rightPos);
- pJoinInfo->leftPos += 1;
- pJoinInfo->rightPos += 1;
-
- nrows += 1;
+ mergeJoinJoinLeftRight(pOperator, pRes, nrows, pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight,
+ pJoinInfo->rightPos);
+ mergeJoinJoinDownstreamTsRanges(pOperator, leftTs, pRes, &nrows);
} else if (asc && leftTs < rightTs || !asc && leftTs > rightTs) {
pJoinInfo->leftPos += 1;
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 9a82b194a983b7d3ea188046924298cf1563738e..ed1580ed911e107dc4a8c8dcdb6179c8b1d466e5 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -2098,9 +2098,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
switch (pSliceInfo->fillType) {
- case TSDB_FILL_NULL:
+ case TSDB_FILL_NULL: {
colDataAppendNULL(pDst, rows);
+ pResBlock->info.rows += 1;
break;
+ }
case TSDB_FILL_SET_VALUE: {
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
@@ -2118,9 +2120,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
colDataAppend(pDst, rows, (char*)&v, false);
}
- } break;
+ pResBlock->info.rows += 1;
+ break;
+ }
- case TSDB_FILL_LINEAR:
+ case TSDB_FILL_LINEAR: {
#if 0
if (pCtx->start.key == INT64_MIN || pCtx->start.key > pCtx->startTs
|| pCtx->end.key == INT64_MIN || pCtx->end.key < pCtx->startTs) {
@@ -2151,17 +2155,22 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
}
#endif
+ // TODO: pResBlock->info.rows += 1;
break;
-
+ }
case TSDB_FILL_PREV: {
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
colDataAppend(pDst, rows, pkey->pData, false);
- } break;
+ pResBlock->info.rows += 1;
+ break;
+ }
case TSDB_FILL_NEXT: {
char* p = colDataGetData(pSrc, rowIndex);
colDataAppend(pDst, rows, p, colDataIsNull_s(pSrc, rowIndex));
- } break;
+ pResBlock->info.rows += 1;
+ break;
+ }
case TSDB_FILL_NONE:
default:
@@ -2169,7 +2178,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
}
- pResBlock->info.rows += 1;
}
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
@@ -2221,6 +2229,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
SInterval* pInterval = &pSliceInfo->interval;
SOperatorInfo* downstream = pOperator->pDownstream[0];
+ blockDataCleanup(pResBlock);
+
int32_t numOfRows = 0;
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h
index 256500ff8ce00e16fe0356da8fa1d7ce279e34ab..f5efcd520620c97e2dcec72e4a3c494124d23ff4 100644
--- a/source/libs/function/inc/builtins.h
+++ b/source/libs/function/inc/builtins.h
@@ -25,6 +25,7 @@ extern "C" {
typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t len);
typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
typedef int32_t (*FCreateMergeFuncParameters)(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters);
+typedef EFuncDataRequired (*FFuncDynDataRequired)(void* pRes, STimeWindow* pTimeWindow);
typedef struct SBuiltinFuncDefinition {
const char* name;
@@ -32,6 +33,7 @@ typedef struct SBuiltinFuncDefinition {
uint64_t classification;
FTranslateFunc translateFunc;
FFuncDataRequired dataRequiredFunc;
+ FFuncDynDataRequired dynDataRequiredFunc;
FExecGetEnv getEnvFunc;
FExecInit initFunc;
FExecProcess processFunc;
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index fb82ab206ccd1923955aff1e873ab7fe334b2848..200df6bc804fae3cbf487b090b28ee95e0c03704 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -612,8 +612,7 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) {
SSumRes* pSumRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
if (IS_NULL_TYPE(type)) {
- GET_RES_INFO(pCtx)->isNullRes = 1;
- numOfElem = 1;
+ numOfElem = 0;
goto _sum_over;
}
@@ -1172,8 +1171,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo);
if (IS_NULL_TYPE(type)) {
- GET_RES_INFO(pCtx)->isNullRes = 1;
- numOfElems = 1;
+ numOfElems = 0;
goto _min_max_over;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index c173522683c7403950d915a9d1fa2d82eb828d1f..020fd648e1e7a404c980a9576d5a6a73ac52487f 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -103,6 +103,13 @@ EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWin
return funcMgtBuiltins[pFunc->funcId].dataRequiredFunc(pFunc, pTimeWindow);
}
+EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow* pTimeWindow) {
+ if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
+ return TSDB_CODE_FAILED;
+ }
+ return funcMgtBuiltins[funcId].dynDataRequiredFunc(pRes, pTimeWindow);
+}
+
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet) {
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return TSDB_CODE_FAILED;
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 7fc41b8dff3a6344186ce0bf82c83762b15d8a81..e1c8ac0204f77f6e4756cd05fba36141b911290f 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -579,11 +579,13 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou
if (ctx->noExec == false) {
for (int32_t m = 0; m < node->pParameterList->length; m++) {
- // add impl later
if (node->condType == LOGIC_COND_TYPE_AND) {
taosArrayAddAll(output->result, params[m].result);
+ // taosArrayDestroy(params[m].result);
+ // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_OR) {
taosArrayAddAll(output->result, params[m].result);
+ // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_NOT) {
// taosArrayAddAll(output->result, params[m].result);
}
@@ -593,6 +595,8 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou
} else {
for (int32_t m = 0; m < node->pParameterList->length; m++) {
output->status = sifMergeCond(node->condType, output->status, params[m].status);
+ taosArrayDestroy(params[m].result);
+ params[m].result = NULL;
}
}
_return:
@@ -607,6 +611,7 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) {
SIFCtx *ctx = context;
ctx->code = sifExecFunction(node, ctx, &output);
if (ctx->code != TSDB_CODE_SUCCESS) {
+ sifFreeParam(&output);
return DEAL_RES_ERROR;
}
@@ -624,6 +629,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) {
SIFCtx *ctx = context;
ctx->code = sifExecLogic(node, ctx, &output);
if (ctx->code) {
+ sifFreeParam(&output);
return DEAL_RES_ERROR;
}
@@ -640,6 +646,7 @@ static EDealRes sifWalkOper(SNode *pNode, void *context) {
SIFCtx *ctx = context;
ctx->code = sifExecOper(node, ctx, &output);
if (ctx->code) {
+ sifFreeParam(&output);
return DEAL_RES_ERROR;
}
if (taosHashPut(ctx->pRes, &pNode, POINTER_BYTES, &output, sizeof(output))) {
@@ -698,7 +705,11 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
}
nodesWalkExprPostOrder(pNode, sifCalcWalker, &ctx);
- SIF_ERR_RET(ctx.code);
+
+ if (ctx.code != 0) {
+ sifFreeRes(ctx.pRes);
+ return ctx.code;
+ }
if (pDst) {
SIFParam *res = (SIFParam *)taosHashGet(ctx.pRes, (void *)&pNode, POINTER_BYTES);
@@ -714,8 +725,7 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
}
sifFreeRes(ctx.pRes);
-
- SIF_RET(code);
+ return code;
}
static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
@@ -732,8 +742,10 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
}
nodesWalkExprPostOrder(pNode, sifCalcWalker, &ctx);
-
- SIF_ERR_RET(ctx.code);
+ if (ctx.code != 0) {
+ sifFreeRes(ctx.pRes);
+ return ctx.code;
+ }
SIFParam *res = (SIFParam *)taosHashGet(ctx.pRes, (void *)&pNode, POINTER_BYTES);
if (res == NULL) {
@@ -745,8 +757,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
sifFreeParam(res);
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
taosHashCleanup(ctx.pRes);
-
- SIF_RET(code);
+ return code;
}
int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result, SIdxFltStatus *status) {
@@ -760,7 +771,11 @@ int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result,
SArray *output = taosArrayInit(8, sizeof(uint64_t));
SIFParam param = {.arg = *metaArg, .result = output};
- SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m));
+ int32_t code = sifCalculate((SNode *)pFilterNode, ¶m);
+ if (code != 0) {
+ sifFreeParam(¶m);
+ return code;
+ }
taosArrayAddAll(result, param.result);
sifFreeParam(¶m);
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 5279d015b438226fe6ceecc386213cd9faaf9b20..5fc94c2642b6d1bc7a9fc4fe90748b0346acef78 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -401,7 +401,8 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi
COPY_SCALAR_FIELD(tableId);
COPY_SCALAR_FIELD(stableId);
COPY_SCALAR_FIELD(tableType);
- COPY_CHAR_ARRAY_FIELD(tableFName);
+ COPY_CHAR_ARRAY_FIELD(tableName);
+ COPY_CHAR_ARRAY_FIELD(tsColName);
COPY_OBJECT_FIELD(deleteTimeRange, sizeof(STimeWindow));
CLONE_OBJECT_FIELD(pVgroupList, vgroupsInfoClone);
CLONE_NODE_LIST_FIELD(pInsertCols);
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 9722d1fc1072979aedbb8073ac73cc817cd962c2..af3f0c242b45aa19da29ccd37132b20d83582cfd 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -2339,7 +2339,7 @@ static int32_t physiQueryInsertNodeToJson(const void* pObj, SJson* pJson) {
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanTableType, pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonAddStringToObject(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableFName);
+ code = tjsonAddStringToObject(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanVgId, pNode->vgId);
@@ -2368,7 +2368,7 @@ static int32_t jsonToPhysiQueryInsertNode(const SJson* pJson, void* pObj) {
code = tjsonGetTinyIntValue(pJson, jkQueryInsertPhysiPlanTableType, &pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonGetStringValue(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableFName);
+ code = tjsonGetStringValue(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkQueryInsertPhysiPlanVgId, &pNode->vgId);
@@ -2383,6 +2383,7 @@ static int32_t jsonToPhysiQueryInsertNode(const SJson* pJson, void* pObj) {
static const char* jkDeletePhysiPlanTableId = "TableId";
static const char* jkDeletePhysiPlanTableType = "TableType";
static const char* jkDeletePhysiPlanTableFName = "TableFName";
+static const char* jkDeletePhysiPlanTsColName = "TsColName";
static const char* jkDeletePhysiPlanDeleteTimeRangeStartKey = "DeleteTimeRangeStartKey";
static const char* jkDeletePhysiPlanDeleteTimeRangeEndKey = "DeleteTimeRangeEndKey";
static const char* jkDeletePhysiPlanAffectedRows = "AffectedRows";
@@ -2400,6 +2401,9 @@ static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDeletePhysiPlanTableFName, pNode->tableFName);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkDeletePhysiPlanTsColName, pNode->tsColName);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDeletePhysiPlanDeleteTimeRangeStartKey, pNode->deleteTimeRange.skey);
}
@@ -2426,6 +2430,9 @@ static int32_t jsonToPhysiDeleteNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDeletePhysiPlanTableFName, pNode->tableFName);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkDeletePhysiPlanTsColName, pNode->tsColName);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBigIntValue(pJson, jkDeletePhysiPlanDeleteTimeRangeStartKey, &pNode->deleteTimeRange.skey);
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 30e3b676df5b24080561b51d09741fe7a5064861..d405b750037e980ea05263d4e5aaa457f195d62f 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1292,8 +1292,8 @@ static int32_t createVnodeModifLogicNodeByDelete(SLogicPlanContext* pCxt, SDelet
pModify->modifyType = MODIFY_TABLE_TYPE_DELETE;
pModify->tableId = pRealTable->pMeta->uid;
pModify->tableType = pRealTable->pMeta->tableType;
- snprintf(pModify->tableFName, sizeof(pModify->tableFName), "%d.%s.%s", pCxt->pPlanCxt->acctId,
- pRealTable->table.dbName, pRealTable->table.tableName);
+ snprintf(pModify->tableName, sizeof(pModify->tableName), "%s", pRealTable->table.tableName);
+ strcpy(pModify->tsColName, pRealTable->pMeta->schema->name);
pModify->deleteTimeRange = pDelete->timeRange;
pModify->pAffectedRows = nodesCloneNode(pDelete->pCountFunc);
if (NULL == pModify->pAffectedRows) {
@@ -1342,8 +1342,7 @@ static int32_t createVnodeModifLogicNodeByInsert(SLogicPlanContext* pCxt, SInser
pModify->tableId = pRealTable->pMeta->uid;
pModify->stableId = pRealTable->pMeta->suid;
pModify->tableType = pRealTable->pMeta->tableType;
- snprintf(pModify->tableFName, sizeof(pModify->tableFName), "%d.%s.%s", pCxt->pPlanCxt->acctId,
- pRealTable->table.dbName, pRealTable->table.tableName);
+ snprintf(pModify->tableName, sizeof(pModify->tableName), "%s", pRealTable->table.tableName);
TSWAP(pModify->pVgroupList, pRealTable->pVgroupList);
pModify->pInsertCols = nodesCloneList(pInsert->pCols);
if (NULL == pModify->pInsertCols) {
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index 5dc95a0f344e11df8fa955c72bb8728e7afee529..2e5c4255e68612a10968118ae8ad339e550632e6 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -1588,7 +1588,7 @@ static int32_t createQueryInserter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNod
pInserter->tableId = pModify->tableId;
pInserter->stableId = pModify->stableId;
pInserter->tableType = pModify->tableType;
- strcpy(pInserter->tableFName, pModify->tableFName);
+ strcpy(pInserter->tableName, pModify->tableName);
pInserter->vgId = pModify->pVgroupList->vgroups[0].vgId;
pInserter->epSet = pModify->pVgroupList->vgroups[0].epSet;
vgroupInfoToNodeAddr(pModify->pVgroupList->vgroups, &pSubplan->execNode);
@@ -1638,7 +1638,8 @@ static int32_t createDataDeleter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode*
pDeleter->tableId = pModify->tableId;
pDeleter->tableType = pModify->tableType;
- strcpy(pDeleter->tableFName, pModify->tableFName);
+ strcpy(pDeleter->tableFName, pModify->tableName);
+ strcpy(pDeleter->tsColName, pModify->tsColName);
pDeleter->deleteTimeRange = pModify->deleteTimeRange;
int32_t code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pAffectedRows,
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index df57d0fef11d96ed10cb4c3def0f4786d3f4e18d..d1f8a50dab556067dbc2bfa143a75c8ee2dda9c2 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -283,7 +283,8 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
pRes->skey = pDelRes->skey;
pRes->ekey = pDelRes->ekey;
pRes->affectedRows = pDelRes->affectedRows;
- strcpy(pRes->tableFName, pDelRes->tableFName);
+ strcpy(pRes->tableFName, pDelRes->tableName);
+ strcpy(pRes->tsColName, pDelRes->tsColName);
taosMemoryFree(output.pData);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 2e33632f12f9af3bf1d39bafb489954385ae57f3..834a3af0d5ac454cfbd5143968a9a9f90b5ba037 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -227,6 +227,8 @@ int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq,
msg.pCont = buf;
msg.msgType = pTask->dispatchMsgType;
+ qDebug("dispatch from task %d to task %d node %d", pTask->taskId, pReq->taskId, vgId);
+
tmsgSendReq(pEpSet, &msg);
code = 0;
@@ -281,8 +283,10 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
return code;
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
- SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
- ASSERT(pTask->shuffleDispatcher.waitingRspCnt == 0);
+ int32_t rspCnt = atomic_load_32(&pTask->shuffleDispatcher.waitingRspCnt);
+ ASSERT(rspCnt == 0);
+
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
int32_t vgSz = taosArrayGetSize(vgInfo);
SStreamDispatchReq* pReqs = taosMemoryCalloc(vgSz, sizeof(SStreamDispatchReq));
if (pReqs == NULL) {
@@ -301,7 +305,10 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
if (pReqs[i].data == NULL || pReqs[i].dataLen == NULL) {
goto FAIL_SHUFFLE_DISPATCH;
}
+ SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
+ pReqs[i].taskId = pVgInfo->taskId;
}
+
for (int32_t i = 0; i < blockNum; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, pDataBlock->info.groupId);
@@ -309,6 +316,9 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
// TODO: get hash function by hashMethod
uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
+ taosMemoryFree(ctbName);
+
+ bool found = false;
// TODO: optimize search
int32_t j;
for (j = 0; j < vgSz; j++) {
@@ -318,12 +328,17 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
goto FAIL_SHUFFLE_DISPATCH;
}
- pReqs[j].taskId = pVgInfo->taskId;
+ if (pReqs[j].blockNum == 0) {
+ atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
+ }
pReqs[j].blockNum++;
+ found = true;
break;
}
}
+ ASSERT(found);
}
+
for (int32_t i = 0; i < vgSz; i++) {
if (pReqs[i].blockNum > 0) {
// send
@@ -331,7 +346,6 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
if (streamDispatchOneReq(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet) < 0) {
goto FAIL_SHUFFLE_DISPATCH;
}
- pTask->shuffleDispatcher.waitingRspCnt++;
}
}
code = 0;
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index f782de95b9aca59e5113494ce0bf47c8ed513330..7c5cd6e391651883a58d51ca64016b0ea3648dee 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -22,22 +22,22 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
SStreamQueueItem* pItem = (SStreamQueueItem*)data;
if (pItem->type == STREAM_INPUT__GET_RES) {
SStreamTrigger* pTrigger = (SStreamTrigger*)data;
- qSetMultiStreamInput(exec, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK, false);
+ qSetMultiStreamInput(exec, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->isDataScan);
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
qDebug("task %d %p set submit input %p %p %d 1", pTask->taskId, pTask, pSubmit, pSubmit->data, *pSubmit->dataRef);
- qSetStreamInput(exec, pSubmit->data, STREAM_INPUT__DATA_SUBMIT, false);
+ qSetMultiStreamInput(exec, pSubmit->data, 1, STREAM_INPUT__DATA_SUBMIT);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
SArray* blocks = pBlock->blocks;
qDebug("task %d %p set ssdata input", pTask->taskId, pTask);
- qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false);
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)data;
SArray* blocks = pMerged->reqs;
qDebug("task %d %p set submit input (merged), batch num: %d", pTask->taskId, pTask, (int32_t)blocks->size);
- qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT, false);
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT);
} else {
ASSERT(0);
}
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 5921e44a9c51cc650d122e85e75b98ec0531acac..216e3fa761e910cee7749b4aabf0f4bc7b708b8b 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -64,7 +64,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
if (tEncodeI32(pEncoder, pTask->selfChildId) < 0) return -1;
if (tEncodeI32(pEncoder, pTask->nodeId) < 0) return -1;
if (tEncodeSEpSet(pEncoder, &pTask->epSet) < 0) return -1;
- if (tEncodeI32(pEncoder, pTask->numOfVgroups) < 0) return -1;
+ /*if (tEncodeI32(pEncoder, pTask->numOfVgroups) < 0) return -1;*/
int32_t epSz = taosArrayGetSize(pTask->childEpInfo);
if (tEncodeI32(pEncoder, epSz) < 0) return -1;
@@ -119,7 +119,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
if (tDecodeI32(pDecoder, &pTask->selfChildId) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->nodeId) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &pTask->epSet) < 0) return -1;
- if (tDecodeI32(pDecoder, &pTask->numOfVgroups) < 0) return -1;
+ /*if (tDecodeI32(pDecoder, &pTask->numOfVgroups) < 0) return -1;*/
int32_t epSz;
if (tDecodeI32(pDecoder, &epSz) < 0) return -1;
diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c
index 7a70b621c6f65db2eb58dbc84c48ecbfe087c94a..276b06b147586bbf18fe73f94cdb2592032d97e2 100644
--- a/source/libs/tdb/src/db/tdbPage.c
+++ b/source/libs/tdb/src/db/tdbPage.c
@@ -76,14 +76,17 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg)
ASSERT(xFree);
+ for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) {
+ tdbOsFree(pPage->apOvfl[iOvfl]);
+ }
+
ptr = pPage->pData;
xFree(arg, ptr);
return 0;
}
-void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int,
- TXN *, SBTree *pBt)) {
+void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
pPage->pPageHdr = pPage->pData + szAmHdr;
TDB_PAGE_NCELLS_SET(pPage, 0);
TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr));
@@ -99,8 +102,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell
ASSERT((u8 *)pPage->pPageFtr == pPage->pFreeEnd);
}
-void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int,
- TXN *, SBTree *pBt)) {
+void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
pPage->pPageHdr = pPage->pData + szAmHdr;
pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage);
pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage);
@@ -124,9 +126,8 @@ int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl
nFree = TDB_PAGE_NFREE(pPage);
nCells = TDB_PAGE_NCELLS(pPage);
- iOvfl = 0;
- for (; iOvfl < pPage->nOverflow; iOvfl++) {
+ for (iOvfl = 0; iOvfl < pPage->nOverflow; ++iOvfl) {
if (pPage->aiOvfl[iOvfl] >= idx) {
break;
}
@@ -146,6 +147,8 @@ int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl
pNewCell = (SCell *)tdbOsMalloc(szCell);
memcpy(pNewCell, pCell, szCell);
+ tdbDebug("tdbPage/new ovfl cell: %p", pNewCell);
+
pPage->apOvfl[iOvfl] = pNewCell;
pPage->aiOvfl[iOvfl] = idx;
pPage->nOverflow++;
@@ -193,6 +196,8 @@ int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt) {
for (; iOvfl < pPage->nOverflow; iOvfl++) {
if (pPage->aiOvfl[iOvfl] == idx) {
// remove the over flow cell
+ tdbOsFree(pPage->apOvfl[iOvfl]);
+ tdbDebug("tdbPage/free ovfl cell: %p", pPage->apOvfl[iOvfl]);
for (; (++iOvfl) < pPage->nOverflow;) {
pPage->aiOvfl[iOvfl - 1] = pPage->aiOvfl[iOvfl] - 1;
pPage->apOvfl[iOvfl - 1] = pPage->apOvfl[iOvfl];
@@ -248,7 +253,7 @@ void tdbPageCopy(SPage *pFromPage, SPage *pToPage) {
int tdbPageCapacity(int pageSize, int amHdrSize) {
int szPageHdr;
- int minCellIndexSize; // at least one cell in cell index
+ int minCellIndexSize; // at least one cell in cell index
if (pageSize < 65536) {
szPageHdr = pageMethods.szPageHdr;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 064c110a9f871582426ec1deb1e666a3760253b6..efbe110f6f224b2740281dadf420b1e551fb4c2c 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -1,5 +1,4 @@
/** Copyright (c) 2019 TAOS Data, Inc.
-
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
@@ -16,6 +15,10 @@
#ifdef USE_UV
#include "transComm.h"
+typedef struct SConnList {
+ queue conn;
+} SConnList;
+
typedef struct SCliConn {
T_REF_DECLARE()
uv_connect_t connReq;
@@ -26,7 +29,9 @@ typedef struct SCliConn {
SConnBuffer readBuf;
STransQueue cliMsgs;
- queue q;
+
+ queue q;
+ SConnList* list;
STransCtx ctx;
bool broken; // link broken or not
@@ -56,13 +61,14 @@ typedef struct SCliMsg {
} SCliMsg;
typedef struct SCliThrd {
- TdThread thread; // tid
- int64_t pid; // pid
- uv_loop_t* loop;
- SAsyncPool* asyncPool;
- uv_idle_t* idle;
- uv_timer_t timer;
- void* pool; // conn pool
+ TdThread thread; // tid
+ int64_t pid; // pid
+ uv_loop_t* loop;
+ SAsyncPool* asyncPool;
+ uv_idle_t* idle;
+ uv_prepare_t* prepare;
+ uv_timer_t timer;
+ void* pool; // conn pool
// msg queue
queue msg;
@@ -86,10 +92,6 @@ typedef struct SCliObj {
SCliThrd** pThreadObj;
} SCliObj;
-typedef struct SConnList {
- queue conn;
-} SConnList;
-
// conn pool
// add expire timeout and capacity limit
static void* createConnPool(int size);
@@ -101,7 +103,7 @@ static void doCloseIdleConn(void* param);
static int sockDebugInfo(struct sockaddr* sockname, char* dst) {
struct sockaddr_in addr = *(struct sockaddr_in*)sockname;
- char buf[20] = {0};
+ char buf[16] = {0};
int r = uv_ip4_name(&addr, (char*)buf, sizeof(buf));
sprintf(dst, "%s:%d", buf, ntohs(addr.sin_port));
return r;
@@ -118,6 +120,9 @@ static void cliSendCb(uv_write_t* req, int status);
static void cliConnCb(uv_connect_t* req, int status);
static void cliAsyncCb(uv_async_t* handle);
static void cliIdleCb(uv_idle_t* handle);
+static void cliPrepareCb(uv_prepare_t* handle);
+
+static int32_t allocConnRef(SCliConn* conn, bool update);
static int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg);
@@ -198,7 +203,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
pThrd = (SCliThrd*)(exh)->pThrd; \
} \
} while (0)
-#define CONN_PERSIST_TIME(para) ((para) == 0 ? 3 * 1000 : (para))
+#define CONN_PERSIST_TIME(para) ((para) <= 90000 ? 90000 : (para))
#define CONN_GET_HOST_THREAD(conn) (conn ? ((SCliConn*)conn)->hostThrd : NULL)
#define CONN_GET_INST_LABEL(conn) (((STrans*)(((SCliThrd*)(conn)->hostThrd)->pTransInst))->label)
#define CONN_SHOULD_RELEASE(conn, head) \
@@ -499,9 +504,8 @@ void* destroyConnPool(void* pool) {
}
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
- char key[128] = {0};
+ char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
-
SHashObj* pPool = pool;
SConnList* plist = taosHashGet(pPool, key, strlen(key));
if (plist == NULL) {
@@ -519,13 +523,44 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
QUEUE_INIT(&conn->q);
- assert(h == &conn->q);
transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task);
conn->task = NULL;
return conn;
}
+static void addConnToPool(void* pool, SCliConn* conn) {
+ if (conn->status == ConnInPool) {
+ return;
+ }
+ SCliThrd* thrd = conn->hostThrd;
+ CONN_HANDLE_THREAD_QUIT(thrd);
+
+ allocConnRef(conn, true);
+
+ STrans* pTransInst = thrd->pTransInst;
+ cliReleaseUnfinishedMsg(conn);
+ transQueueClear(&conn->cliMsgs);
+ transCtxCleanup(&conn->ctx);
+ conn->status = ConnInPool;
+
+ if (conn->list == NULL) {
+ char key[32] = {0};
+ CONN_CONSTRUCT_HASH_KEY(key, conn->ip, conn->port);
+ tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
+ conn->list = taosHashGet((SHashObj*)pool, key, strlen(key));
+ }
+ assert(conn->list != NULL);
+ QUEUE_INIT(&conn->q);
+ QUEUE_PUSH(&conn->list->conn, &conn->q);
+
+ assert(!QUEUE_IS_EMPTY(&conn->list->conn));
+
+ STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
+ arg->param1 = conn;
+ arg->param2 = thrd;
+ conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
+}
static int32_t allocConnRef(SCliConn* conn, bool update) {
if (update) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
@@ -556,38 +591,6 @@ static int32_t specifyConnRef(SCliConn* conn, bool update, int64_t handle) {
return 0;
}
-static void addConnToPool(void* pool, SCliConn* conn) {
- if (conn->status == ConnInPool) {
- return;
- }
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
- allocConnRef(conn, true);
-
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
- conn->status = ConnInPool;
-
- char key[128] = {0};
- CONN_CONSTRUCT_HASH_KEY(key, conn->ip, conn->port);
- tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
-
- SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
- // list already create before
- assert(plist != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&plist->conn, &conn->q);
-
- assert(!QUEUE_IS_EMPTY(&plist->conn));
-
- STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
- arg->param1 = conn;
- arg->param2 = thrd;
- conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
-}
static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
SCliConn* conn = handle->data;
SConnBuffer* pBuf = &conn->readBuf;
@@ -602,11 +605,9 @@ static void cliRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
SConnBuffer* pBuf = &conn->readBuf;
if (nread > 0) {
pBuf->len += nread;
- if (transReadComplete(pBuf)) {
+ while (transReadComplete(pBuf)) {
tTrace("%s conn %p read complete", CONN_GET_INST_LABEL(conn), conn);
cliHandleResp(conn);
- } else {
- tTrace("%s conn %p read partial packet, continue to read", CONN_GET_INST_LABEL(conn), conn);
}
return;
}
@@ -967,6 +968,62 @@ static void cliAsyncCb(uv_async_t* handle) {
static void cliIdleCb(uv_idle_t* handle) {
SCliThrd* thrd = handle->data;
tTrace("do idle work");
+
+ SAsyncPool* pool = thrd->asyncPool;
+ for (int i = 0; i < pool->nAsync; i++) {
+ uv_async_t* async = &(pool->asyncs[i]);
+ SAsyncItem* item = async->data;
+
+ queue wq;
+ taosThreadMutexLock(&item->mtx);
+ QUEUE_MOVE(&item->qmsg, &wq);
+ taosThreadMutexUnlock(&item->mtx);
+
+ int count = 0;
+ while (!QUEUE_IS_EMPTY(&wq)) {
+ queue* h = QUEUE_HEAD(&wq);
+ QUEUE_REMOVE(h);
+
+ SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
+ if (pMsg == NULL) {
+ continue;
+ }
+ (*cliAsyncHandle[pMsg->type])(pMsg, thrd);
+ count++;
+ }
+ }
+ tTrace("prepare work end");
+ if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
+}
+static void cliPrepareCb(uv_prepare_t* handle) {
+ SCliThrd* thrd = handle->data;
+ tTrace("prepare work start");
+
+ SAsyncPool* pool = thrd->asyncPool;
+ for (int i = 0; i < pool->nAsync; i++) {
+ uv_async_t* async = &(pool->asyncs[i]);
+ SAsyncItem* item = async->data;
+
+ queue wq;
+ taosThreadMutexLock(&item->mtx);
+ QUEUE_MOVE(&item->qmsg, &wq);
+ taosThreadMutexUnlock(&item->mtx);
+
+ int count = 0;
+ while (!QUEUE_IS_EMPTY(&wq)) {
+ queue* h = QUEUE_HEAD(&wq);
+ QUEUE_REMOVE(h);
+
+ SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
+ if (pMsg == NULL) {
+ continue;
+ }
+ (*cliAsyncHandle[pMsg->type])(pMsg, thrd);
+ count++;
+ }
+ }
+ tTrace("prepare work end");
+ if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
static void* cliWorkThread(void* arg) {
@@ -1033,7 +1090,12 @@ static SCliThrd* createThrdObj() {
// pThrd->idle = taosMemoryCalloc(1, sizeof(uv_idle_t));
// uv_idle_init(pThrd->loop, pThrd->idle);
// pThrd->idle->data = pThrd;
- // uv_idle_start(pThrd->idle, cliIdleCb);
+ // uv_idle_start(pThrd->idle, cliIdleCb);
+
+ pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t));
+ uv_prepare_init(pThrd->loop, pThrd->prepare);
+ pThrd->prepare->data = pThrd;
+ uv_prepare_start(pThrd->prepare, cliPrepareCb);
pThrd->pool = createConnPool(4);
transDQCreate(pThrd->loop, &pThrd->delayQueue);
@@ -1058,6 +1120,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
transDQDestroy(pThrd->timeoutQueue, NULL);
taosMemoryFree(pThrd->idle);
+ taosMemoryFree(pThrd->prepare);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index c99effb26f4628986e9c3be266219ff371e8ff93..8cf525a506fe856876f2e8577e6f97dedbdb8d26 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -120,8 +120,9 @@ int transInitBuffer(SConnBuffer* buf) {
buf->total = 0;
return 0;
}
-int transDestroyBuffer(SConnBuffer* buf) {
- taosMemoryFree(buf->buf);
+int transDestroyBuffer(SConnBuffer* p) {
+ taosMemoryFree(p->buf);
+ p->buf = NULL;
return 0;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index a97e0b53c1c5c4b443ddabe4061885bc64ee824d..4b579a1f9527bde98f9ac4d21723ed9d17e965d6 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -73,6 +73,7 @@ typedef struct SWorkThrd {
uv_os_fd_t fd;
uv_loop_t* loop;
SAsyncPool* asyncPool;
+ uv_prepare_t* prepare;
queue msg;
TdThreadMutex msgMtx;
@@ -112,6 +113,7 @@ static void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf)
static void uvWorkerAsyncCb(uv_async_t* handle);
static void uvAcceptAsyncCb(uv_async_t* handle);
static void uvShutDownCb(uv_shutdown_t* req, int status);
+static void uvPrepareCb(uv_prepare_t* handle);
/*
* time-consuming task throwed into BG work thread
@@ -238,8 +240,6 @@ static void uvHandleReq(SSvrConn* pConn) {
transMsg.msgType = pHead->msgType;
transMsg.code = pHead->code;
- // transClearBuffer(&pConn->readBuf);
-
pConn->inType = pHead->msgType;
if (pConn->status == ConnNormal) {
if (pHead->persist == 1) {
@@ -546,6 +546,52 @@ static void uvShutDownCb(uv_shutdown_t* req, int status) {
uv_close((uv_handle_t*)req->handle, uvDestroyConn);
taosMemoryFree(req);
}
+static void uvPrepareCb(uv_prepare_t* handle) {
+ // prepare callback
+ SWorkThrd* pThrd = handle->data;
+ SAsyncPool* pool = pThrd->asyncPool;
+
+ for (int i = 0; i < pool->nAsync; i++) {
+ uv_async_t* async = &(pool->asyncs[i]);
+ SAsyncItem* item = async->data;
+
+ queue wq;
+ taosThreadMutexLock(&item->mtx);
+ QUEUE_MOVE(&item->qmsg, &wq);
+ taosThreadMutexUnlock(&item->mtx);
+
+ while (!QUEUE_IS_EMPTY(&wq)) {
+ queue* head = QUEUE_HEAD(&wq);
+ QUEUE_REMOVE(head);
+
+ SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q);
+ if (msg == NULL) {
+ tError("unexcept occurred, continue");
+ continue;
+ }
+ // release handle to rpc init
+ if (msg->type == Quit) {
+ (*transAsyncHandle[msg->type])(msg, pThrd);
+ continue;
+ } else {
+ STransMsg transMsg = msg->msg;
+
+ SExHandle* exh1 = transMsg.info.handle;
+ int64_t refId = transMsg.info.refId;
+ SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), refId);
+ if (exh2 == NULL || exh1 != exh2) {
+ tTrace("handle except msg %p, ignore it", exh1);
+ transReleaseExHandle(transGetRefMgt(), refId);
+ destroySmsg(msg);
+ continue;
+ }
+ msg->pConn = exh1->handle;
+ transReleaseExHandle(transGetRefMgt(), refId);
+ (*transAsyncHandle[msg->type])(msg, pThrd);
+ }
+ }
+ }
+}
static void uvWorkDoTask(uv_work_t* req) {
// doing time-consumeing task
@@ -695,13 +741,17 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
}
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
- // int r = uv_pipe_open(pThrd->pipe, pThrd->fd);
pThrd->pipe->data = pThrd;
QUEUE_INIT(&pThrd->msg);
taosThreadMutexInit(&pThrd->msgMtx, NULL);
+ pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t));
+ uv_prepare_init(pThrd->loop, pThrd->prepare);
+ uv_prepare_start(pThrd->prepare, uvPrepareCb);
+ pThrd->prepare->data = pThrd;
+
// conn set
QUEUE_INIT(&pThrd->conn);
@@ -986,6 +1036,7 @@ void destroyWorkThrd(SWorkThrd* pThrd) {
SRV_RELEASE_UV(pThrd->loop);
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
transAsyncPoolDestroy(pThrd->asyncPool);
+ taosMemoryFree(pThrd->prepare);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
diff --git a/source/libs/wal/inc/walInt.h b/source/libs/wal/inc/walInt.h
index 20667fc91887392c9c67715309094f1b29f4b306..3aebb1c6ba1174a26516abe7c52298bb61642639 100644
--- a/source/libs/wal/inc/walInt.h
+++ b/source/libs/wal/inc/walInt.h
@@ -61,26 +61,31 @@ static inline int32_t compareWalFileInfo(const void* pLeft, const void* pRight)
}
static inline int64_t walGetLastFileSize(SWal* pWal) {
+ if (taosArrayGetSize(pWal->fileInfoSet) == 0) return 0;
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGetLast(pWal->fileInfoSet);
return pInfo->fileSize;
}
static inline int64_t walGetLastFileFirstVer(SWal* pWal) {
+ if (taosArrayGetSize(pWal->fileInfoSet) == 0) return -1;
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGetLast(pWal->fileInfoSet);
return pInfo->firstVer;
}
static inline int64_t walGetCurFileFirstVer(SWal* pWal) {
+ if (pWal->writeCur == -1) return -1;
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGet(pWal->fileInfoSet, pWal->writeCur);
return pInfo->firstVer;
}
static inline int64_t walGetCurFileLastVer(SWal* pWal) {
+ if (pWal->writeCur == -1) return -1;
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGet(pWal->fileInfoSet, pWal->writeCur);
return pInfo->firstVer;
}
static inline int64_t walGetCurFileOffset(SWal* pWal) {
+ if (pWal->writeCur == -1) return -1;
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGet(pWal->fileInfoSet, pWal->writeCur);
return pInfo->fileSize;
}
@@ -88,6 +93,7 @@ static inline int64_t walGetCurFileOffset(SWal* pWal) {
static inline bool walCurFileClosed(SWal* pWal) { return taosArrayGetSize(pWal->fileInfoSet) != pWal->writeCur; }
static inline SWalFileInfo* walGetCurFileInfo(SWal* pWal) {
+ if (pWal->writeCur == -1) return NULL;
return (SWalFileInfo*)taosArrayGet(pWal->fileInfoSet, pWal->writeCur);
}
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index a5fd3fca35c01b84f0b2c5b7cc4fe3b7bbf76662..a8da6809100fa5789e5b7e57e051631257782e1e 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -139,7 +139,7 @@ int walCheckAndRepairMeta(SWal* pWal) {
const char* idxPattern = "^[0-9]+.idx$";
regex_t logRegPattern;
regex_t idxRegPattern;
- SArray* pLogInfoArray = taosArrayInit(8, sizeof(SWalFileInfo));
+ SArray* actualLog = taosArrayInit(8, sizeof(SWalFileInfo));
regcomp(&logRegPattern, logPattern, REG_EXTENDED);
regcomp(&idxRegPattern, idxPattern, REG_EXTENDED);
@@ -159,7 +159,7 @@ int walCheckAndRepairMeta(SWal* pWal) {
SWalFileInfo fileInfo;
memset(&fileInfo, -1, sizeof(SWalFileInfo));
sscanf(name, "%" PRId64 ".log", &fileInfo.firstVer);
- taosArrayPush(pLogInfoArray, &fileInfo);
+ taosArrayPush(actualLog, &fileInfo);
}
}
@@ -167,10 +167,10 @@ int walCheckAndRepairMeta(SWal* pWal) {
regfree(&logRegPattern);
regfree(&idxRegPattern);
- taosArraySort(pLogInfoArray, compareWalFileInfo);
+ taosArraySort(actualLog, compareWalFileInfo);
int metaFileNum = taosArrayGetSize(pWal->fileInfoSet);
- int actualFileNum = taosArrayGetSize(pLogInfoArray);
+ int actualFileNum = taosArrayGetSize(actualLog);
#if 0
for (int32_t fileNo = actualFileNum - 1; fileNo >= 0; fileNo--) {
@@ -196,11 +196,11 @@ int walCheckAndRepairMeta(SWal* pWal) {
taosArrayPopFrontBatch(pWal->fileInfoSet, metaFileNum - actualFileNum);
} else if (metaFileNum < actualFileNum) {
for (int i = metaFileNum; i < actualFileNum; i++) {
- SWalFileInfo* pFileInfo = taosArrayGet(pLogInfoArray, i);
+ SWalFileInfo* pFileInfo = taosArrayGet(actualLog, i);
taosArrayPush(pWal->fileInfoSet, pFileInfo);
}
}
- taosArrayDestroy(pLogInfoArray);
+ taosArrayDestroy(actualLog);
pWal->writeCur = actualFileNum - 1;
if (actualFileNum > 0) {
@@ -221,7 +221,7 @@ int walCheckAndRepairMeta(SWal* pWal) {
int code = walSaveMeta(pWal);
if (code < 0) {
- taosArrayDestroy(pLogInfoArray);
+ taosArrayDestroy(actualLog);
return -1;
}
}
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index ac62b7d98dfeec5e7df073ef165a79d1ad95b7f6..787c9af31703df50a4b91589e20f5f9373d71c56 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -423,37 +423,38 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
return 0;
}
-int32_t walReadVer(SWalReader *pRead, int64_t ver) {
- wDebug("vgId:%d wal start to read ver %ld", pRead->pWal->cfg.vgId, ver);
+int32_t walReadVer(SWalReader *pReader, int64_t ver) {
+ wDebug("vgId:%d wal start to read ver %ld", pReader->pWal->cfg.vgId, ver);
int64_t contLen;
+ int32_t code;
bool seeked = false;
- if (pRead->pWal->vers.firstVer == -1) {
+ if (pReader->pWal->vers.firstVer == -1) {
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
return -1;
}
- if (ver > pRead->pWal->vers.lastVer || ver < pRead->pWal->vers.firstVer) {
- wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
- ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.lastVer);
+ if (ver > pReader->pWal->vers.lastVer || ver < pReader->pWal->vers.firstVer) {
+ wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pReader->pWal->cfg.vgId,
+ ver, pReader->pWal->vers.firstVer, pReader->pWal->vers.lastVer);
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
return -1;
}
- if (pRead->curInvalid || pRead->curVersion != ver) {
- if (walReadSeekVer(pRead, ver) < 0) {
- wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, ver, terrstr());
+ if (pReader->curInvalid || pReader->curVersion != ver) {
+ if (walReadSeekVer(pReader, ver) < 0) {
+ wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pReader->pWal->cfg.vgId, ver, terrstr());
return -1;
}
seeked = true;
}
while (1) {
- contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead));
+ contLen = taosReadFile(pReader->pLogFile, pReader->pHead, sizeof(SWalCkHead));
if (contLen == sizeof(SWalCkHead)) {
break;
} else if (contLen == 0 && !seeked) {
- walReadSeekVerImpl(pRead, ver);
+ walReadSeekVerImpl(pReader, ver);
seeked = true;
continue;
} else {
@@ -467,26 +468,26 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
}
}
- contLen = walValidHeadCksum(pRead->pHead);
- if (contLen != 0) {
- wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId,
+ code = walValidHeadCksum(pReader->pHead);
+ if (code != 0) {
+ wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pReader->pWal->cfg.vgId,
ver);
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
- if (pRead->capacity < pRead->pHead->head.bodyLen) {
- void *ptr = taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pRead->pHead->head.bodyLen);
+ if (pReader->capacity < pReader->pHead->head.bodyLen) {
+ void *ptr = taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReader->pHead->head.bodyLen);
if (ptr == NULL) {
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
return -1;
}
- pRead->pHead = ptr;
- pRead->capacity = pRead->pHead->head.bodyLen;
+ pReader->pHead = ptr;
+ pReader->capacity = pReader->pHead->head.bodyLen;
}
- if ((contLen = taosReadFile(pRead->pLogFile, pRead->pHead->head.body, pRead->pHead->head.bodyLen)) !=
- pRead->pHead->head.bodyLen) {
+ if ((contLen = taosReadFile(pReader->pLogFile, pReader->pHead->head.body, pReader->pHead->head.bodyLen)) !=
+ pReader->pHead->head.bodyLen) {
if (contLen < 0)
terrno = TAOS_SYSTEM_ERROR(errno);
else {
@@ -496,25 +497,28 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
return -1;
}
- if (pRead->pHead->head.version != ver) {
- wError("vgId:%d, unexpected wal log, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId,
- pRead->pHead->head.version, ver);
- pRead->curInvalid = 1;
+ if (pReader->pHead->head.version != ver) {
+ wError("vgId:%d, unexpected wal log, index:%" PRId64 ", read request index:%" PRId64, pReader->pWal->cfg.vgId,
+ pReader->pHead->head.version, ver);
+ pReader->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
ASSERT(0);
return -1;
}
- contLen = walValidBodyCksum(pRead->pHead);
- if (contLen != 0) {
- wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
+ code = walValidBodyCksum(pReader->pHead);
+ if (code != 0) {
+ wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since body checksum not passed", pReader->pWal->cfg.vgId,
ver);
- pRead->curInvalid = 1;
+ uint32_t readCkSum = walCalcBodyCksum(pReader->pHead->head.body, pReader->pHead->head.bodyLen);
+ uint32_t logCkSum = pReader->pHead->cksumBody;
+ wError("checksum written into log: %u, checksum calculated: %u", logCkSum, readCkSum);
+ pReader->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
ASSERT(0);
return -1;
}
- pRead->curVersion++;
+ pReader->curVersion++;
return 0;
}
diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c
index 67b2b90d6a2dfedee9bc5a5787527556a32f2e78..4eadc92f705da24df20555ffef1bf82d7fca9858 100644
--- a/source/libs/wal/src/walWrite.c
+++ b/source/libs/wal/src/walWrite.c
@@ -250,13 +250,11 @@ int32_t walBeginSnapshot(SWal *pWal, int64_t ver) {
}
int32_t walEndSnapshot(SWal *pWal) {
- uInfo("%s:%d rsma: WAL walEndSnapshot entry for %s", __func__, __LINE__, pWal->path);
int32_t code = 0;
taosThreadMutexLock(&pWal->mutex);
int64_t ver = pWal->vers.verInSnapshotting;
if (ver == -1) {
code = -1;
- uInfo("%s:%d rsma: WAL walEndSnapshot code = -1 for %s", __func__, __LINE__, pWal->path);
goto END;
};
@@ -291,29 +289,31 @@ int32_t walEndSnapshot(SWal *pWal) {
newTotSize -= iter->fileSize;
}
}
- char fnameStr[WAL_FILE_LEN];
+ int32_t actualDelete = 0;
+ char fnameStr[WAL_FILE_LEN];
// remove file
- uInfo("%s:%d rsma: WAL walEndSnapshot deleteCnt=%d %s", __func__, __LINE__, (int32_t)deleteCnt, pWal->path);
for (int i = 0; i < deleteCnt; i++) {
pInfo = taosArrayGet(pWal->fileInfoSet, i);
walBuildLogName(pWal, pInfo->firstVer, fnameStr);
- taosRemoveFile(fnameStr);
- uInfo("rsma: removed WAL log file %s", fnameStr);
+ if (taosRemoveFile(fnameStr) < 0) {
+ goto UPDATE_META;
+ }
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
- taosRemoveFile(fnameStr);
- uInfo("rsma: removed WAL idx file %s", fnameStr);
+ if (taosRemoveFile(fnameStr) < 0) {
+ ASSERT(0);
+ }
+ actualDelete++;
}
+ UPDATE_META:
// make new array, remove files
- taosArrayPopFrontBatch(pWal->fileInfoSet, deleteCnt);
+ taosArrayPopFrontBatch(pWal->fileInfoSet, actualDelete);
if (taosArrayGetSize(pWal->fileInfoSet) == 0) {
pWal->writeCur = -1;
pWal->vers.firstVer = -1;
} else {
pWal->vers.firstVer = ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, 0))->firstVer;
}
- } else {
- uInfo("%s:%d rsma: WAL walEndSnapshot %s", __func__, __LINE__, pWal->path);
}
pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1;
pWal->totSize = newTotSize;
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 9b72312028aaffaa2c25092e6e82f057a297c9ea..6384195a1b89deba2c93876572ea11fd093e500c 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -18,7 +18,7 @@ import time
import socket
import json
import toml
-from .boundary import DataBoundary
+from util.boundary import DataBoundary
import taos
from util.log import *
from util.sql import *
@@ -80,23 +80,18 @@ class DataSet:
self.bool_data.append( bool((i + bool_start) % 2 ))
self.vchar_data.append( f"{vchar_prefix}_{i * vchar_step}" )
self.nchar_data.append( f"{nchar_prefix}_{i * nchar_step}")
- self.ts_data.append( int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000 - i * ts_step))
-
- def get_disorder_set(self,
- rows,
- int_low :int = INT_MIN,
- int_up :int = INT_MAX,
- bint_low :int = BIGINT_MIN,
- bint_up :int = BIGINT_MAX,
- sint_low :int = SMALLINT_MIN,
- sint_up :int = SMALLINT_MAX,
- tint_low :int = TINYINT_MIN,
- tint_up :int = TINYINT_MAX,
- ubint_low :int = BIGINT_UN_MIN,
- ubint_up :int = BIGINT_UN_MAX,
-
-
- ):
+ self.ts_data.append( int(datetime.timestamp(datetime.now()) * 1000 - i * ts_step))
+
+ def get_disorder_set(self, rows, **kwargs):
+ for k, v in kwargs.items():
+ int_low = v if k == "int_low" else INT_MIN
+ int_up = v if k == "int_up" else INT_MAX
+ bint_low = v if k == "bint_low" else BIGINT_MIN
+ bint_up = v if k == "bint_up" else BIGINT_MAX
+ sint_low = v if k == "sint_low" else SMALLINT_MIN
+ sint_up = v if k == "sint_up" else SMALLINT_MAX
+ tint_low = v if k == "tint_low" else TINYINT_MIN
+ tint_up = v if k == "tint_up" else TINYINT_MAX
pass
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 01955ec93ae05e03c8835fd766f7ceaf0eb2fde3..b9177d22699d8ef4b36e09a66b6c5716c2e9d7a5 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -49,18 +49,23 @@ class TDSql:
def close(self):
self.cursor.close()
- def prepare(self):
- tdLog.info("prepare database:db")
+ def prepare(self, dbname="db", drop=True, **kwargs):
+ tdLog.info(f"prepare database:{dbname}")
s = 'reset query cache'
try:
self.cursor.execute(s)
except:
tdLog.notice("'reset query cache' is not supported")
- s = 'drop database if exists db'
- self.cursor.execute(s)
- s = 'create database db duration 300'
+ if drop:
+ s = f'drop database if exists {dbname}'
+ self.cursor.execute(s)
+ s = f'create database {dbname}'
+ for k, v in kwargs.items():
+ s += f" {k} {v}"
+ if "duration" not in kwargs:
+ s += " duration 300"
self.cursor.execute(s)
- s = 'use db'
+ s = f'use {dbname}'
self.cursor.execute(s)
time.sleep(2)
@@ -106,7 +111,7 @@ class TDSql:
if row_tag:
return self.queryResult
return self.queryRows
- except Exception as e:
+ except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
@@ -304,7 +309,7 @@ class TDSql:
tdLog.notice("Try to execute sql again, query times: %d "%i)
time.sleep(1)
pass
- else:
+ else:
try:
tdLog.notice("Try the last execute sql ")
self.affectedRows = self.cursor.execute(sql)
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index f34f29fb642971c5c24df2bb66f1c30faf8bb0af..6de5a9ab98cc4ac5b6f20deae355a7021345dca6 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -1,7 +1,7 @@
#======================b1-start===============
-# ---- user
+# ---- user ----
./test.sh -f tsim/user/basic.sim
./test.sh -f tsim/user/password.sim
./test.sh -f tsim/user/privilege_db.sim
@@ -58,13 +58,13 @@
# unsupport ./test.sh -f tsim/dnode/vnode_clean.sim
./test.sh -f tsim/dnode/use_dropped_dnode.sim
-# ---- import
+# ---- import ----
./test.sh -f tsim/import/basic.sim
./test.sh -f tsim/import/commit.sim
./test.sh -f tsim/import/large.sim
./test.sh -f tsim/import/replica1.sim
-# ---- insert
+# ---- insert ----
./test.sh -f tsim/insert/backquote.sim
./test.sh -f tsim/insert/basic.sim
./test.sh -f tsim/insert/basic0.sim
@@ -164,9 +164,9 @@
# TD-17704 ./test.sh -f tsim/parser/union_sysinfo.sim
# TD-17661 ./test.sh -f tsim/parser/where.sim
-# ---- query
+# ---- query ----
./test.sh -f tsim/query/charScalarFunction.sim
-# ./test.sh -f tsim/query/explain.sim
+./test.sh -f tsim/query/explain.sim
./test.sh -f tsim/query/interval-offset.sim
./test.sh -f tsim/query/interval.sim
./test.sh -f tsim/query/scalarFunction.sim
@@ -187,10 +187,10 @@
./test.sh -f tsim/mnode/basic1.sim
./test.sh -f tsim/mnode/basic2.sim
./test.sh -f tsim/mnode/basic3.sim
-./test.sh -f tsim/mnode/basic4.sim
+# TD-17919 ./test.sh -f tsim/mnode/basic4.sim
./test.sh -f tsim/mnode/basic5.sim
-# ---- show
+# ---- show ----
./test.sh -f tsim/show/basic.sim
# ---- table
@@ -260,7 +260,7 @@
./test.sh -f tsim/tmq/snapshot.sim
./test.sh -f tsim/tmq/snapshot1.sim
-# --- stable
+# --- stable ----
./test.sh -f tsim/stable/alter_comment.sim
./test.sh -f tsim/stable/alter_count.sim
./test.sh -f tsim/stable/alter_import.sim
@@ -274,7 +274,6 @@
./test.sh -f tsim/stable/dnode3.sim
./test.sh -f tsim/stable/metrics.sim
./test.sh -f tsim/stable/refcount.sim
-./test.sh -f tsim/stable/show.sim
./test.sh -f tsim/stable/tag_add.sim
./test.sh -f tsim/stable/tag_drop.sim
./test.sh -f tsim/stable/tag_filter.sim
@@ -298,8 +297,9 @@
# --- sma
./test.sh -f tsim/sma/drop_sma.sim
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
-./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
-./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
+# temp disable
+#./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
+#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
# --- valgrind
./test.sh -f tsim/valgrind/checkError1.sim
@@ -308,6 +308,7 @@
./test.sh -f tsim/valgrind/checkError4.sim
./test.sh -f tsim/valgrind/checkError5.sim
./test.sh -f tsim/valgrind/checkError6.sim
+./test.sh -f tsim/valgrind/checkError7.sim
# --- vnode
# unsupport ./test.sh -f tsim/vnode/replica3_basic.sim
@@ -328,35 +329,35 @@
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim
-# --- catalog
+# --- catalog ----
./test.sh -f tsim/catalog/alterInCurrent.sim
# --- scalar
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
-# ---- alter
+# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
./test.sh -f tsim/alter/dnode.sim
./test.sh -f tsim/alter/table.sim
-# ---- cache
+# ---- cache ----
./test.sh -f tsim/cache/new_metrics.sim
./test.sh -f tsim/cache/restart_table.sim
./test.sh -f tsim/cache/restart_metrics.sim
-# ---- column
+# ---- column ----
./test.sh -f tsim/column/commit.sim
./test.sh -f tsim/column/metrics.sim
./test.sh -f tsim/column/table.sim
-# ---- compress
+# ---- compress ----
./test.sh -f tsim/compress/commitlog.sim
./test.sh -f tsim/compress/compress2.sim
./test.sh -f tsim/compress/compress.sim
./test.sh -f tsim/compress/uncompress.sim
-# ---- compute
+# ---- compute ----
./test.sh -f tsim/compute/avg.sim
./test.sh -f tsim/compute/block_dist.sim
./test.sh -f tsim/compute/bottom.sim
diff --git a/tests/script/tsim/alter/cached_schema_after_alter.sim b/tests/script/tsim/alter/cached_schema_after_alter.sim
index 043f360856e4b4f0533bf4dc5e4be7cea71c3325..bd2b1d272ce83525fc645451ea5a48bbaa2611be 100644
--- a/tests/script/tsim/alter/cached_schema_after_alter.sim
+++ b/tests/script/tsim/alter/cached_schema_after_alter.sim
@@ -50,11 +50,8 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
-sleep 3000
-sql connect
sql use $db
sql select * from $stb
diff --git a/tests/script/tsim/parser/join_multivnode.sim b/tests/script/tsim/parser/join_multivnode.sim
index c33fa85fa255c732e7b358e2d9014d520a6beaac..f1204326d3c9de769b1fa68b4ce6c725478a18bf 100644
--- a/tests/script/tsim/parser/join_multivnode.sim
+++ b/tests/script/tsim/parser/join_multivnode.sim
@@ -98,6 +98,11 @@ while $i < $tbNum
endw
print ===============multivnode projection join.sim
+sql select join_mt0.ts,join_mt0.ts,join_mt0.t1 from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts;
+print ===> rows $row
+if $row != 9000 then
+ print expect 9000, actual: $row
+endi
sql select join_mt0.ts,join_mt0.ts,join_mt0.t1 from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1;
print ===> rows $row
if $row != 3000 then
diff --git a/tests/script/tsim/parser/limit1_stb.sim b/tests/script/tsim/parser/limit1_stb.sim
index 7745e2d032a09ba52fba9ab1e6a46966ae2881b7..43fa06230caf73a014383c34ad91716c4c2f2eef 100644
--- a/tests/script/tsim/parser/limit1_stb.sim
+++ b/tests/script/tsim/parser/limit1_stb.sim
@@ -51,40 +51,10 @@ endi
##TBASE-352
$offset = $tbNum * $rowNum
$offset = $offset - 1
-sql select * from $stb limit 2 offset $offset
+sql select * from $stb order by ts limit 2 offset $offset
if $rows != 1 then
return -1
endi
-if $data00 != @18-11-25 19:30:00.000@ then
- return -1
-endi
-if $data01 != 9 then
- return -1
-endi
-if $data02 != NULL then
- return -1
-endi
-if $data03 != 9.00000 then
- return -1
-endi
-if $data04 != NULL then
- return -1
-endi
-if $data05 != 9 then
- return -1
-endi
-if $data06 != 9 then
- return -1
-endi
-if $data07 != 1 then
- return -1
-endi
-if $data08 != binary9 then
- return -1
-endi
-if $data09 != nchar9 then
- return -1
-endi
$offset = $tbNum * $rowNum
$offset = $offset / 2
@@ -207,21 +177,6 @@ sql select * from $stb where ts > '2018-09-17 09:30:00.000' and ts < '2018-09-17
if $rows != 5 then
return -1
endi
-if $data01 != 5 then
- return -1
-endi
-if $data11 != 6 then
- return -1
-endi
-if $data21 != 7 then
- return -1
-endi
-if $data31 != 8 then
- return -1
-endi
-if $data41 != 4 then
- return -1
-endi
$offset = $totalNum / 2
sql select * from $stb where ts >= $ts0 and ts <= $tsu limit 5 offset $offset
diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim
index 4e95095172befd74914009cf17cdaffef54d48f2..5d69887c864621187d204f66511e7043876a1fc7 100644
--- a/tests/script/tsim/query/udf.sim
+++ b/tests/script/tsim/query/udf.sim
@@ -1,13 +1,9 @@
system sh/stop_dnodes.sh
-
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 2
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c udf -v 1
print ========= start dnode1 as leader
system sh/exec.sh -n dnode1 -s start
-sleep 1000
sql connect
print ======== step1 udf
diff --git a/tests/script/tsim/stable/show.sim b/tests/script/tsim/stable/show.sim
deleted file mode 100644
index d3ab75adf5ac08dbd4c2a8a0870cfe4fbfd62a4d..0000000000000000000000000000000000000000
--- a/tests/script/tsim/stable/show.sim
+++ /dev/null
@@ -1,61 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print ======================== create stable
-sql create database d1
-sql use d1
-
-$x = 0
-while $x < 128
- $tb = d1.s . $x
- sql create table $tb (ts timestamp, i int) tags (j int)
- $x = $x + 1
-endw
-
-print ======================== describe stables
-# TODO : create stable error
-$m = 0
-while $m < 128
- $tb = s . $m
- $filter = ' . $tb
- $filter = $filter . '
- sql show stables like $filter
- print sql : show stables like $filter
- if $rows != 1 then
- print expect 1, actual: $rows
- return -1
- endi
- $m = $m + 1
-endw
-
-
-print ======================== show stables
-
-sql show d1.stables
-
-print num of stables is $rows
-if $rows != 128 then
- return -1
-endi
-
-print ======================== create table
-
-$x = 0
-while $x < 424
- $tb = d1.t . $x
- sql create table $tb using d1.s0 tags( $x )
- $x = $x + 1
-endw
-
-print ======================== show stables
-
-sql show d1.tables
-
-print num of tables is $rows
-if $rows != 424 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/valgrind/checkError2.sim b/tests/script/tsim/valgrind/checkError2.sim
index e81d702d828433fd18e81a07798a57bac4f4eb6a..e3322f03660a5e63b6bcf955766ad817d2469095 100644
--- a/tests/script/tsim/valgrind/checkError2.sim
+++ b/tests/script/tsim/valgrind/checkError2.sim
@@ -62,6 +62,8 @@ sql select * from ct1 where ts < now -1d and ts > now +1d
sql select * from stb where ts < now -1d and ts > now +1d
sql select * from ct1 where ts < now -1d and ts > now +1d order by ts desc
sql select * from stb where ts < now -1d and ts > now +1d order by ts desc
+sql select * from ct1 where t1 between 1000 and 2500
+sql select * from stb where t1 between 1000 and 2500
print =============== step7: count
sql select count(*) from ct1;
diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim
index 804961e7b0cd67ffed6a215a33239ce51a965321..ec8ca0ad8c6b615b6cd6748743f51aad603f7073 100644
--- a/tests/script/tsim/valgrind/checkError6.sim
+++ b/tests/script/tsim/valgrind/checkError6.sim
@@ -63,13 +63,15 @@ sql show table distributed tb1
sql select count(1) from tb1
sql select count(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m)
sql select diff(tbcol) from tb1 where ts <= 1601481840000
-sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20
+sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 order by ts
sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol order by tgcol
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1
sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1
@@ -94,6 +96,7 @@ sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s)
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb
@@ -105,6 +108,40 @@ sql select * from stb where tbcol + 3 <> null;
print =============== step5: explain
+print =============== check
+$null=
+
+system_content sh/checkValgrind.sh -n dnode1
+print cmd return result ----> [ $system_content ]
+if $system_content > 0 then
+ return -1
+endi
+
+if $system_content == $null then
+ return -1
+endi
+
+print =============== restart
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start -v
+
+sql select avg(tbcol) as c from stb
+sql select avg(tbcol) as c from stb where ts <= 1601481840000
+sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000
+sql select avg(tbcol) as c from stb interval(1m)
+sql select avg(tbcol) as c from stb interval(1d)
+sql select avg(tbcol) as b from stb where ts <= 1601481840000 interval(1m)
+sql select avg(tbcol) as c from stb group by tgcol
+sql select avg(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
+sql show table distributed stb
+sql select count(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
+sql select diff(tbcol) from stb where ts <= 1601481840000
+sql select first(tbcol), last(tbcol) as c from stb group by tgcol
+sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
+sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
+sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
+
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
diff --git a/tests/system-test/1-insert/mutil_stage.py b/tests/system-test/1-insert/mutil_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcad114edde773ebd059f30ca852accceb404b63
--- /dev/null
+++ b/tests/system-test/1-insert/mutil_stage.py
@@ -0,0 +1,257 @@
+from datetime import datetime
+import time
+
+from typing import List, Any, Tuple
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+
+PRIMARY_COL = "ts"
+
+INT_COL = "c_int"
+BINT_COL = "c_bint"
+SINT_COL = "c_sint"
+TINT_COL = "c_tint"
+FLOAT_COL = "c_float"
+DOUBLE_COL = "c_double"
+BOOL_COL = "c_bool"
+TINT_UN_COL = "c_utint"
+SINT_UN_COL = "c_usint"
+BINT_UN_COL = "c_ubint"
+INT_UN_COL = "c_uint"
+BINARY_COL = "c_binary"
+NCHAR_COL = "c_nchar"
+TS_COL = "c_ts"
+
+NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
+CHAR_COL = [BINARY_COL, NCHAR_COL, ]
+BOOLEAN_COL = [BOOL_COL, ]
+TS_TYPE_COL = [TS_COL, ]
+
+INT_TAG = "t_int"
+
+TAG_COL = [INT_TAG]
+# insert data args:
+TIME_STEP = 10000
+NOW = int(datetime.timestamp(datetime.now()) * 1000)
+
+# init db/table
+DBNAME = "db"
+STBNAME = "stb1"
+CTB_PRE = "ct"
+NTB_PRE = "nt"
+
+L0 = 0
+L1 = 1
+L2 = 2
+
+PRIMARY_DIR = 1
+NON_PRIMARY_DIR = 0
+
+DATA_PRE0 = f"data0"
+DATA_PRE1 = f"data1"
+DATA_PRE2 = f"data2"
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), False)
+ self.taos_cfg_path = tdDnodes.dnodes[0].cfgPath
+ self.taos_data_dir = tdDnodes.dnodes[0].dataDir
+
+
+ def cfg(self, filename, **update_dict):
+ cmd = "echo "
+ for k, v in update_dict.items():
+ cmd += f"{k} {v}\n"
+
+ cmd += f" >> {filename}"
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+
+ def cfg_str(self, filename, update_str):
+ cmd = f'echo "{update_str}" >> {filename}'
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+
+ def cfg_str_list(self, filename, update_list):
+ for update_str in update_list:
+ self.cfg_str(filename, update_str)
+
+ def del_old_datadir(self, filename):
+ cmd = f"sed -i '/^dataDir/d' {filename}"
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+
+ @property
+ def __err_cfg(self):
+ cfg_list = []
+ err_case1 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE1}1 {L1} {PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L2} {NON_PRIMARY_DIR}"
+ ]
+ err_case2 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE1}1 {L1} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L2} {PRIMARY_DIR}"
+ ]
+ err_case3 = [
+ f"dataDir {self.taos_data_dir}/data33 3 {NON_PRIMARY_DIR}"
+ ]
+ err_case4 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE1}1 {L1} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L2} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L1} {NON_PRIMARY_DIR}"
+ ]
+ err_case5 = [f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {PRIMARY_DIR}"]
+ for i in range(16):
+ err_case5.append(f"dataDir {self.taos_data_dir}/{DATA_PRE0}{i+1} {L0} {NON_PRIMARY_DIR}")
+
+ err_case6 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}1 {L0} {PRIMARY_DIR}",
+ ]
+ err_case7 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L2} {PRIMARY_DIR}",
+ ]
+ err_case8 = [
+ f"dataDir {self.taos_data_dir}/data33 3 {PRIMARY_DIR}"
+ ]
+ err_case9 = [
+ f"dataDir {self.taos_data_dir}/data33 -1 {NON_PRIMARY_DIR}"
+ ]
+
+ cfg_list.append(err_case1)
+ cfg_list.append(err_case2)
+ cfg_list.append(err_case3)
+ cfg_list.append(err_case4)
+ cfg_list.append(err_case5)
+ cfg_list.append(err_case6)
+ cfg_list.append(err_case7)
+ cfg_list.append(err_case8)
+ cfg_list.append(err_case9)
+
+ return cfg_list
+
+ @property
+ def __current_cfg(self):
+ cfg_list = []
+ current_case1 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}1 {L0} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE1}1 {L1} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}2 {L2} {NON_PRIMARY_DIR}"
+ ]
+
+ current_case2 = [f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 {L0} {PRIMARY_DIR}"]
+ for i in range(9):
+ current_case2.append(f"dataDir {self.taos_data_dir}/{DATA_PRE0}{i+1} {L0} {NON_PRIMARY_DIR}")
+
+ # TD-17773bug
+ current_case3 = [
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}0 ",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE0}1 {L0} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE1}0 {L1} {NON_PRIMARY_DIR}",
+ f"dataDir {self.taos_data_dir}/{DATA_PRE2}0 {L2} {NON_PRIMARY_DIR}",
+ ]
+ cfg_list.append(current_case1)
+ cfg_list.append(current_case3)
+
+ # case2 must in last of least, because use this cfg as data uniformity test
+ cfg_list.append(current_case2)
+
+ return cfg_list
+
+ def cfg_check(self):
+ for cfg_case in self.__err_cfg:
+ self.del_old_datadir(filename=self.taos_cfg_path)
+ tdDnodes.stop(1)
+ tdDnodes.deploy(1)
+ self.cfg_str_list(filename=self.taos_cfg_path, update_list=cfg_case)
+ tdDnodes.starttaosd(1)
+ time.sleep(2)
+ tdSql.error(f"show databases")
+
+ for cfg_case in self.__current_cfg:
+ self.del_old_datadir(filename=self.taos_cfg_path)
+ tdDnodes.stop(1)
+ tdDnodes.deploy(1)
+ self.cfg_str_list(filename=self.taos_cfg_path, update_list=cfg_case)
+ tdDnodes.start(1)
+ tdSql.query(f"show databases")
+
+ def __create_tb(self, stb=STBNAME, ctb_pre = CTB_PRE, ctb_num=20, ntb_pre=NTB_PRE, ntbnum=1, dbname=DBNAME):
+ tdLog.printNoPrefix("==========step: create table")
+ create_stb_sql = f'''create table {dbname}.{stb}(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
+ {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
+ {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
+ ) tags ({INT_TAG} int)
+ '''
+ tdSql.execute(create_stb_sql)
+
+ for i in range(ntbnum):
+ create_ntb_sql = f'''create table {dbname}.{ntb_pre}{i+1}(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
+ {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
+ {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
+ )
+ '''
+ tdSql.execute(create_ntb_sql)
+
+ for i in range(ctb_num):
+ tdSql.execute(f'create table {dbname}.{ctb_pre}{i+1} using {dbname}.{stb} tags ( {i+1} )')
+
+ def __insert_data(self, rows, dbname=DBNAME, ctb_num=20):
+ data = DataSet()
+ data.get_order_set(rows)
+
+ tdLog.printNoPrefix("==========step: start inser data into tables now.....")
+ for i in range(self.rows):
+ row_data = f'''
+ {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
+ {data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]},
+ {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]}
+ '''
+ neg_row_data = f'''
+ {-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]},
+ {data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.utint_data[i]},
+ {1 * data.usint_data[i]}, {1 * data.uint_data[i]}, {1 * data.ubint_data[i]}
+ '''
+
+ for j in range(ctb_num):
+ tdSql.execute(
+ f"insert into {dbname}.{CTB_PRE}{j + 1} values ( {NOW - i * TIME_STEP}, {row_data} )")
+
+ # tdSql.execute(
+ # f"insert into {dbname}.{CTB_PRE}2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )")
+ # tdSql.execute(
+ # f"insert into {dbname}.{CTB_PRE}4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )")
+ tdSql.execute(
+ f"insert into {dbname}.{NTB_PRE}1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )")
+
+ def run(self):
+ self.rows = 10
+ self.cfg_check()
+ tdSql.prepare(dbname=DBNAME, **{"keep": "1d, 1500m, 26h", "duration":"1h", "vgroups": 10})
+ self.__create_tb(dbname=DBNAME)
+ self.__insert_data(rows=self.rows, dbname=DBNAME)
+ tdSql.query(f"select count(*) from {DBNAME}.{NTB_PRE}1")
+ tdSql.execute(f"flush database {DBNAME}")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py
index f945bafe3b9b4347d8bb9b11fa57ef4d2e716d67..e65dded6013fdecf3075e9fbbbb8e0321e28cb7b 100644
--- a/tests/system-test/1-insert/time_range_wise.py
+++ b/tests/system-test/1-insert/time_range_wise.py
@@ -1,4 +1,5 @@
import datetime
+import time
from dataclasses import dataclass
from typing import List, Any, Tuple
@@ -328,11 +329,15 @@ class TDTestCase:
tdSql.query("select database()")
dbname = tdSql.getData(0,0)
tdSql.query("show databases")
+ for index , value in enumerate(tdSql.cursor.description):
+ if value[0] == "retention":
+ r_index = index
+ break
for row in tdSql.queryResult:
if row[0] == dbname:
- if row[-1] is None:
+ if row[r_index] is None:
continue
- if ":" in row[-1]:
+ if ":" in row[r_index]:
sma.rollup_db = True
if sma.rollup_db :
return False
@@ -393,8 +398,6 @@ class TDTestCase:
else:
tdSql.error(self.__create_sma_index(sma))
-
-
def __drop_sma_index(self, sma:SMAschema):
sql = f"{sma.drop} {sma.drop_flag} {sma.index_name}"
return sql
@@ -416,8 +419,7 @@ class TDTestCase:
self.sma_created_index = list(filter(lambda x: x != sma.index_name, self.sma_created_index))
tdSql.query("show streams")
tdSql.checkRows(self.sma_count)
-
-
+ time.sleep(1)
else:
tdSql.error(self.__drop_sma_index(sma))
diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py
index 59fae9b59d62599e3bca23c393ecc854aed9c186..23b964012ab24ae4eb4ed34b0fc679813a070e37 100644
--- a/tests/system-test/2-query/concat.py
+++ b/tests/system-test/2-query/concat.py
@@ -136,23 +136,23 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
- "ct1",
- "ct2",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct2",
+ f"{dbname}.ct4",
]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
- "t1",
- "stb1",
+ f"{dbname}.t1",
+ f"{dbname}.stb1",
]
for tb in tbname:
@@ -163,22 +163,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -188,29 +186,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -226,7 +224,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -242,13 +240,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -268,22 +266,23 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- self.__create_tb()
+ self.__create_tb(dbname="db")
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
- self.__insert_data(self.rows)
+ self.__insert_data(self.rows, dbname="db")
tdLog.printNoPrefix("==========step3:all check")
- self.all_test()
+ self.all_test(dbname="db")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
- self.all_test()
+ self.all_test(dbname="db")
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/concat2.py b/tests/system-test/2-query/concat2.py
index 717766e7ffcaafcc164cc1519d0a3a657d5e387c..544222007607e5dddb10f9347f86a603b6aebdd3 100644
--- a/tests/system-test/2-query/concat2.py
+++ b/tests/system-test/2-query/concat2.py
@@ -137,22 +137,22 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
- "t1",
- "stb1",
+ f"{dbname}.t1",
+ f"{dbname}.stb1",
]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
- "ct1",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
]
for tb in tbname:
@@ -163,22 +163,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -188,29 +186,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -226,7 +224,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -242,13 +240,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -268,23 +266,23 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- self.__create_tb()
+ self.__create_tb(dbname="db")
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
- self.__insert_data(self.rows)
+ self.__insert_data(self.rows, dbname="db")
tdLog.printNoPrefix("==========step3:all check")
- self.all_test()
+ self.all_test(dbname="db")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
- self.all_test()
-
+ self.all_test(dbname="db")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py
index 2c179b97ce0757670f31498c4dfa3926018854d9..ad784d92ec28ffc8da74eaa5bffa15a78c46a019 100644
--- a/tests/system-test/2-query/concat_ws.py
+++ b/tests/system-test/2-query/concat_ws.py
@@ -137,23 +137,23 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self,dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
- "t1",
- "stb1"
+ f"{dbname}.t1",
+ f"{dbname}.stb1"
]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
- "ct1",
- "ct2",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct2",
+ f"{dbname}.ct4",
]
for tb in tbname:
@@ -164,22 +164,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self,dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -189,29 +188,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -227,7 +226,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -243,13 +242,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -269,22 +268,23 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- self.__create_tb()
+ self.__create_tb(dbname="db")
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
- self.__insert_data(self.rows)
+ self.__insert_data(self.rows, dbname="db")
tdLog.printNoPrefix("==========step3:all check")
- self.all_test()
+ self.all_test(dbname="db")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
- self.all_test()
+ self.all_test(dbname="db")
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/concat_ws2.py b/tests/system-test/2-query/concat_ws2.py
index 477e5d1b557de513473adb31fc8cec9536b683f6..caaae6cecb655a69bddb64919eb5509a4b4faa9a 100644
--- a/tests/system-test/2-query/concat_ws2.py
+++ b/tests/system-test/2-query/concat_ws2.py
@@ -137,23 +137,23 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
- "ct1",
- "ct2",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct2",
+ f"{dbname}.ct4",
]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
- "t1",
- "stb1"
+ f"{dbname}.t1",
+ f"{dbname}.stb1"
]
for tb in tbname:
@@ -164,22 +164,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname="db")
+ self.__test_error(dbname="db")
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -189,29 +188,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -227,7 +226,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -243,13 +242,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -269,22 +268,23 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- self.__create_tb()
+ self.__create_tb(dbname="db")
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
- self.__insert_data(self.rows)
+ self.__insert_data(self.rows, dbname="db")
tdLog.printNoPrefix("==========step3:all check")
- self.all_test()
+ self.all_test(dbname="db")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
- self.all_test()
+ self.all_test(dbname="db")
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/cos.py b/tests/system-test/2-query/cos.py
index e0941b91579a76c85ff8896364bc2e8b525ceb09..ab6814727e597f03b1989770bbb3e29af4ab2669 100644
--- a/tests/system-test/2-query/cos.py
+++ b/tests/system-test/2-query/cos.py
@@ -9,48 +9,48 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -84,12 +84,17 @@ class TDTestCase:
auto_result.append(row_check)
check_status = True
+ print("========",pow_query, origin_query )
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
+ if auto_result[row_index][col_index] == None and elem:
check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
+ elif auto_result[row_index][col_index] != None and ((auto_result[row_index][col_index] != elem) and (str(auto_result[row_index][col_index])[:6] != str(elem)[:6] )):
+ # elif auto_result[row_index][col_index] != None and (abs(auto_result[row_index][col_index] - elem) > 0.000001):
+ print("=====")
+ print(row_index, col_index)
+ print(auto_result[row_index][col_index], elem, origin_result[row_index][col_index])
check_status = False
else:
pass
@@ -99,68 +104,68 @@ class TDTestCase:
else:
tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select cos from t1",
- # "select cos(-+--+c1 ) from t1",
- # "select +-cos(c1) from t1",
- # "select ++-cos(c1) from t1",
- # "select ++--cos(c1) from t1",
- # "select - -cos(c1)*0 from t1",
- # "select cos(tbname+1) from t1 ",
- "select cos(123--123)==1 from t1",
- "select cos(c1) as 'd1' from t1",
- "select cos(c1 ,c2) from t1",
- "select cos(c1 ,NULL ) from t1",
- "select cos(,) from t1;",
- "select cos(cos(c1) ab from t1)",
- "select cos(c1 ) as int from t1",
- "select cos from stb1",
- # "select cos(-+--+c1) from stb1",
- # "select +-cos(c1) from stb1",
- # "select ++-cos(c1) from stb1",
- # "select ++--cos(c1) from stb1",
- # "select - -cos(c1)*0 from stb1",
- # "select cos(tbname+1) from stb1 ",
- "select cos(123--123)==1 from stb1",
- "select cos(c1) as 'd1' from stb1",
- "select cos(c1 ,c2 ) from stb1",
- "select cos(c1 ,NULL) from stb1",
- "select cos(,) from stb1;",
- "select cos(cos(c1) ab from stb1)",
- "select cos(c1) as int from stb1"
+ f"select cos from {dbname}.t1",
+ # f"select cos(-+--+c1 ) from {dbname}.t1",
+ # f"select +-cos(c1) from {dbname}.t1",
+ # f"select ++-cos(c1) from {dbname}.t1",
+ # f"select ++--cos(c1) from {dbname}.t1",
+ # f"select - -cos(c1)*0 from {dbname}.t1",
+ # f"select cos(tbname+1) from {dbname}.t1 ",
+ f"select cos(123--123)==1 from {dbname}.t1",
+ f"select cos(c1) as 'd1' from {dbname}.t1",
+ f"select cos(c1 ,c2) from {dbname}.t1",
+ f"select cos(c1 ,NULL ) from {dbname}.t1",
+ f"select cos(,) from {dbname}.t1;",
+ f"select cos(cos(c1) ab from {dbname}.t1)",
+ f"select cos(c1 ) as int from {dbname}.t1",
+ f"select cos from {dbname}.stb1",
+ # f"select cos(-+--+c1) from {dbname}.stb1",
+ # f"select +-cos(c1) from {dbname}.stb1",
+ # f"select ++-cos(c1) from {dbname}.stb1",
+ # f"select ++--cos(c1) from {dbname}.stb1",
+ # f"select - -cos(c1)*0 from {dbname}.stb1",
+ # f"select cos(tbname+1) from {dbname}.stb1 ",
+ f"select cos(123--123)==1 from {dbname}.stb1",
+ f"select cos(c1) as 'd1' from {dbname}.stb1",
+ f"select cos(c1 ,c2 ) from {dbname}.stb1",
+ f"select cos(c1 ,NULL) from {dbname}.stb1",
+ f"select cos(,) from {dbname}.stb1;",
+ f"select cos(cos(c1) ab from {dbname}.stb1)",
+ f"select cos(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select cos(ts) from t1" ,
- "select cos(c7) from t1",
- "select cos(c8) from t1",
- "select cos(c9) from t1",
- "select cos(ts) from ct1" ,
- "select cos(c7) from ct1",
- "select cos(c8) from ct1",
- "select cos(c9) from ct1",
- "select cos(ts) from ct3" ,
- "select cos(c7) from ct3",
- "select cos(c8) from ct3",
- "select cos(c9) from ct3",
- "select cos(ts) from ct4" ,
- "select cos(c7) from ct4",
- "select cos(c8) from ct4",
- "select cos(c9) from ct4",
- "select cos(ts) from stb1" ,
- "select cos(c7) from stb1",
- "select cos(c8) from stb1",
- "select cos(c9) from stb1" ,
-
- "select cos(ts) from stbbb1" ,
- "select cos(c7) from stbbb1",
-
- "select cos(ts) from tbname",
- "select cos(c9) from tbname"
+ f"select cos(ts) from {dbname}.t1" ,
+ f"select cos(c7) from {dbname}.t1",
+ f"select cos(c8) from {dbname}.t1",
+ f"select cos(c9) from {dbname}.t1",
+ f"select cos(ts) from {dbname}.ct1" ,
+ f"select cos(c7) from {dbname}.ct1",
+ f"select cos(c8) from {dbname}.ct1",
+ f"select cos(c9) from {dbname}.ct1",
+ f"select cos(ts) from {dbname}.ct3" ,
+ f"select cos(c7) from {dbname}.ct3",
+ f"select cos(c8) from {dbname}.ct3",
+ f"select cos(c9) from {dbname}.ct3",
+ f"select cos(ts) from {dbname}.ct4" ,
+ f"select cos(c7) from {dbname}.ct4",
+ f"select cos(c8) from {dbname}.ct4",
+ f"select cos(c9) from {dbname}.ct4",
+ f"select cos(ts) from {dbname}.stb1" ,
+ f"select cos(c7) from {dbname}.stb1",
+ f"select cos(c8) from {dbname}.stb1",
+ f"select cos(c9) from {dbname}.stb1" ,
+
+ f"select cos(ts) from {dbname}.stbbb1" ,
+ f"select cos(c7) from {dbname}.stbbb1",
+
+ f"select cos(ts) from {dbname}.tbname",
+ f"select cos(c9) from {dbname}.tbname"
]
@@ -169,103 +174,103 @@ class TDTestCase:
type_sql_lists = [
- "select cos(c1) from t1",
- "select cos(c2) from t1",
- "select cos(c3) from t1",
- "select cos(c4) from t1",
- "select cos(c5) from t1",
- "select cos(c6) from t1",
-
- "select cos(c1) from ct1",
- "select cos(c2) from ct1",
- "select cos(c3) from ct1",
- "select cos(c4) from ct1",
- "select cos(c5) from ct1",
- "select cos(c6) from ct1",
-
- "select cos(c1) from ct3",
- "select cos(c2) from ct3",
- "select cos(c3) from ct3",
- "select cos(c4) from ct3",
- "select cos(c5) from ct3",
- "select cos(c6) from ct3",
-
- "select cos(c1) from stb1",
- "select cos(c2) from stb1",
- "select cos(c3) from stb1",
- "select cos(c4) from stb1",
- "select cos(c5) from stb1",
- "select cos(c6) from stb1",
-
- "select cos(c6) as alisb from stb1",
- "select cos(c6) alisb from stb1",
+ f"select cos(c1) from {dbname}.t1",
+ f"select cos(c2) from {dbname}.t1",
+ f"select cos(c3) from {dbname}.t1",
+ f"select cos(c4) from {dbname}.t1",
+ f"select cos(c5) from {dbname}.t1",
+ f"select cos(c6) from {dbname}.t1",
+
+ f"select cos(c1) from {dbname}.ct1",
+ f"select cos(c2) from {dbname}.ct1",
+ f"select cos(c3) from {dbname}.ct1",
+ f"select cos(c4) from {dbname}.ct1",
+ f"select cos(c5) from {dbname}.ct1",
+ f"select cos(c6) from {dbname}.ct1",
+
+ f"select cos(c1) from {dbname}.ct3",
+ f"select cos(c2) from {dbname}.ct3",
+ f"select cos(c3) from {dbname}.ct3",
+ f"select cos(c4) from {dbname}.ct3",
+ f"select cos(c5) from {dbname}.ct3",
+ f"select cos(c6) from {dbname}.ct3",
+
+ f"select cos(c1) from {dbname}.stb1",
+ f"select cos(c2) from {dbname}.stb1",
+ f"select cos(c3) from {dbname}.stb1",
+ f"select cos(c4) from {dbname}.stb1",
+ f"select cos(c5) from {dbname}.stb1",
+ f"select cos(c6) from {dbname}.stb1",
+
+ f"select cos(c6) as alisb from {dbname}.stb1",
+ f"select cos(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_cosin_function(self):
+ def basic_cos_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select cos(c1) from ct3")
+ tdSql.query(f"select cos(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select cos(c2) from ct3")
+ tdSql.query(f"select cos(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select cos(c3) from ct3")
+ tdSql.query(f"select cos(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select cos(c4) from ct3")
+ tdSql.query(f"select cos(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select cos(c5) from ct3")
+ tdSql.query(f"select cos(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select cos(c6) from ct3")
+ tdSql.query(f"select cos(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select cos(c1) from t1")
+ tdSql.query(f"select cos(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.540302306)
tdSql.checkData(3 , 0, -0.989992497)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from t1")
+ self.check_result_auto_cos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,cos(c2) from ct1")
+ tdSql.query(f"select c2 ,cos(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.975339851)
tdSql.checkData(1 , 1, -0.830564903)
tdSql.checkData(3 , 1, 0.602244939)
tdSql.checkData(4 , 1, 1.000000000)
- tdSql.query("select c1, c5 ,cos(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,cos(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, -0.855242438)
tdSql.checkData(2 , 2, 0.083882969)
tdSql.checkData(3 , 2, 0.929841474)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_cos( "select c1, c2, c3 , c4, c5 from ct1", "select cos(c1), cos(c2) ,cos(c3), cos(c4), cos(c5) from ct1")
+ self.check_result_auto_cos( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select cos(c1), cos(c2) ,cos(c3), cos(c4), cos(c5) from {dbname}.ct1")
# nest query for cos functions
- tdSql.query("select c4 , cos(c4) ,cos(cos(c4)) , cos(cos(cos(c4))) from ct1;")
+ tdSql.query(f"select c4 , cos(c4) ,cos(cos(c4)) , cos(cos(cos(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.999373284)
tdSql.checkData(0 , 2 , 0.540829563)
@@ -283,22 +288,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select cos(c1) from stb1")
+ tdSql.query(f"select cos(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select cos(c1) from stbbb1")
- tdSql.error("select cos(c1) from tbname")
- tdSql.error("select cos(c1) from ct5")
+ tdSql.error(f"select cos(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select cos(c1) from {dbname}.tbname")
+ tdSql.error(f"select cos(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, cos(c1) from ct1")
- tdSql.query("select c2, cos(c2) from ct4")
+ tdSql.query(f"select c1, cos(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, cos(c2) from {dbname}.ct4")
# mix with common functions
- tdSql.query("select c1, cos(c1),cos(c1), cos(cos(c1)) from ct4 ")
+ tdSql.query(f"select c1, cos(c1),cos(c1), cos(cos(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -309,24 +314,24 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,0.960170287)
tdSql.checkData(3 , 3 ,0.573380480)
- tdSql.query("select c1, cos(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, cos(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, cos(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, cos(c1),c5, count(c5) from ct1 ")
- tdSql.error("select cos(c1), count(c5) from stb1 ")
- tdSql.error("select cos(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, cos(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, cos(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select cos(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select cos(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# # bug fix for compute
- tdSql.query("select c1, cos(c1) -0 ,cos(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, cos(c1) -0 ,cos(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,43 +339,42 @@ class TDTestCase:
tdSql.checkData(1, 1, -0.145500034)
tdSql.checkData(1, 2, -0.653643621)
- tdSql.query(" select c1, cos(c1) -0 ,cos(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, cos(c1) -0 ,cos(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(1, 1, -0.145500034)
tdSql.checkData(1, 2, -0.146002126)
+ tdSql.query(f"select c1, cos(c1), c2, cos(c2), c3, cos(c3) from {dbname}.ct1")
- tdSql.query("select c1, cos(c1), c2, cos(c2), c3, cos(c3) from ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, cos(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.cos(100000000))
-
- tdSql.query("select c1, cos(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.cos(10000000000000))
- tdSql.query("select c1, cos(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, cos(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, cos(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.cos(10000000000000000000000000.0))
- tdSql.query("select c1, cos(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, cos(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, cos(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.cos(10000000000000000000000000000000000.0))
- tdSql.query("select c1, cos(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, cos(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, cos(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.cos(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, cos(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, cos(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +382,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,0.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +390,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,0.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from ct4 where c1>cos(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(cos(c1)-0.5) from {dbname}.ct4 where c1>cos(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -395,44 +399,38 @@ class TDTestCase:
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,0.000000000)
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
- )
- tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from sub1_bound")
+ # self.check_result_auto_cos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from {dbname}.sub1_bound")
- self.check_result_auto_cos( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select cos(c1), cos(c2) ,cos(c3), cos(c3), cos(c2) ,cos(c1) from sub1_bound")
+ self.check_result_auto_cos( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select cos(c1), cos(c2) ,cos(c3), cos(c3), cos(c2) ,cos(c1) from {dbname}.sub1_bound")
- self.check_result_auto_cos("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select cos(abs(c1)) from sub1_bound" )
+ self.check_result_auto_cos(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select cos(abs(c1)) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select cos(abs(c1)) ,cos(abs(c2)) , cos(abs(c3)) , cos(abs(c4)), cos(abs(c5)), cos(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select cos(abs(c1)) ,cos(abs(c2)) , cos(abs(c3)) , cos(abs(c4)), cos(abs(c5)), cos(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.cos(2147483647))
tdSql.checkData(0,1,math.cos(9223372036854775807))
tdSql.checkData(0,2,math.cos(32767))
@@ -450,45 +448,44 @@ class TDTestCase:
tdSql.checkData(3,4,math.cos(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select cos(abs(c1+1)) ,cos(abs(c2)) , cos(abs(c3*1)) , cos(abs(c4/2)), cos(abs(c5))/2, cos(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select cos(abs(c1+1)) ,cos(abs(c2)) , cos(abs(c3*1)) , cos(abs(c4/2)), cos(abs(c5))/2, cos(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.cos(2147483648.000000000))
tdSql.checkData(0,1,math.cos(9223372036854775807))
tdSql.checkData(0,2,math.cos(32767.000000000))
tdSql.checkData(0,3,math.cos(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
+
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
+ tdSql.execute(f'insert into {dbname}.tb3 values (now()+{i}s, {PI*(5+i)/2}, {PI*(5+i)/2})')
- self.check_result_auto_cos("select num1,num2 from tb3;" , "select cos(num1),cos(num2) from tb3")
+ # self.check_result_auto_cos(f"select num1,num2 from {dbname}.tb3;" , f"select cos(num1),cos(num2) from {dbname}.tb3")
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_cos( " select c5 from stb1 order by ts " , "select cos(c5) from stb1 order by ts" )
- self.check_result_auto_cos( " select c5 from stb1 order by tbname " , "select cos(c5) from stb1 order by tbname" )
- self.check_result_auto_cos( " select c5 from stb1 where c1 > 0 order by tbname " , "select cos(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_cos( " select c5 from stb1 where c1 > 0 order by tbname " , "select cos(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto_cos( f" select c5 from {dbname}.stb1 order by ts " , f"select cos(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_cos( f" select c5 from {dbname}.stb1 order by tbname " , f"select cos(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_cos( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select cos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_cos( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select cos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_cos( " select t1,c5 from stb1 order by ts " , "select cos(t1), cos(c5) from stb1 order by ts" )
- self.check_result_auto_cos( " select t1,c5 from stb1 order by tbname " , "select cos(t1) ,cos(c5) from stb1 order by tbname" )
- self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) ,cos(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) , cos(c5) from stb1 where c1 > 0 order by tbname" )
- pass
+ self.check_result_auto_cos( f" select t1,c5 from {dbname}.stb1 order by ts " , f"select cos(t1), cos(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_cos( f" select t1,c5 from {dbname}.stb1 order by tbname " , f"select cos(t1) ,cos(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_cos( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select cos(t1) ,cos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_cos( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select cos(t1) , cos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
@@ -507,7 +504,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step4: cos basic query ============")
- self.basic_cosin_function()
+ self.basic_cos_function()
tdLog.printNoPrefix("==========step5: big number cos query ============")
diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py
index e047225c1f762be80a7cb0749c7b0dd617ba25c4..4d2a1cf07ce530f0cfd156981f8dcf1c5d5f135a 100644
--- a/tests/system-test/2-query/count.py
+++ b/tests/system-test/2-query/count.py
@@ -5,13 +5,14 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(),logSql)
+ tdSql.init(conn.cursor(),False)
self.setsql = TDSetSql()
self.rowNum = 10
self.ts = 1537146000000
- self.ntbname = 'ntb'
- self.stbname = 'stb'
+ dbname = "db"
+ self.ntbname = f'{dbname}.ntb'
+ self.stbname = f'{dbname}.stb'
self.column_dict = {
'ts':'timestamp',
'c1':'int',
diff --git a/tests/system-test/2-query/count_partition.py b/tests/system-test/2-query/count_partition.py
index a25b4c09c14d92f4bd11d5a838f43ef776b91a28..90a6d9225b76fcc500c886645e9f304299d4180c 100644
--- a/tests/system-test/2-query/count_partition.py
+++ b/tests/system-test/2-query/count_partition.py
@@ -11,17 +11,17 @@ class TDTestCase:
self.row_nums = 10
self.tb_nums = 10
self.ts = 1537146000000
-
- def prepare_datas(self, stb_name , tb_nums , row_nums ):
- tdSql.execute(" use db ")
- tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
+
+ def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ):
+ tdSql.execute(f" use {dbname} ")
+ tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
-
+
for i in range(tb_nums):
- tbname = f"sub_{stb_name}_{i}"
+ tbname = f"{dbname}.sub_{stb_name}_{i}"
ts = self.ts + i*10000
- tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
+ tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
for row in range(row_nums):
ts = self.ts + row*1000
@@ -30,143 +30,144 @@ class TDTestCase:
for null in range(5):
ts = self.ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
-
- def basic_query(self):
- tdSql.query("select count(*) from stb")
+
+ def basic_query(self, dbname="db"):
+ tdSql.query(f"select count(*) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
- tdSql.query("select count(c1) from stb")
+ tdSql.query(f"select count(c1) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums )*self.tb_nums)
- tdSql.query(" select tbname , count(*) from stb partition by tbname ")
+ tdSql.query(f"select tbname , count(*) from {dbname}.stb partition by tbname ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select count(c1) from stb group by t1 order by t1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb group by t1 order by t1 ")
tdSql.checkRows(self.tb_nums)
- tdSql.error(" select count(c1) from stb group by c1 order by t1 ")
- tdSql.error(" select count(t1) from stb group by c1 order by t1 ")
- tdSql.query(" select count(c1) from stb group by tbname order by tbname ")
+ tdSql.error(f"select count(c1) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.error(f"select count(t1) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb group by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
- # bug need fix
- # tdSql.query(" select count(t1) from stb group by t2 order by t2 ")
+ # bug need fix
+ # tdSql.query(f"select count(t1) from {dbname}.stb group by t2 order by t2 ")
# tdSql.checkRows(self.tb_nums)
- tdSql.query(" select count(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1 , count(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select c1 , count(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select count(c1) from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select abs(c1+c3), count(c1+c3) from stb group by abs(c1+c3) order by abs(c1+c3)")
+ tdSql.query(f"select abs(c1+c3), count(c1+c3) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
- tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+count(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2")
+ tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+count(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) , count(t2) from stb where abs(c1+t2)=1 partition by tbname")
+ tdSql.query(f"select count(c1) , count(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname")
tdSql.checkRows(2)
- tdSql.query("select count(c1) from stb where abs(c1+t2)=1 partition by tbname")
+ tdSql.query(f"select count(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname")
tdSql.checkRows(2)
-
- tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
+
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums)
- tdSql.error("select tbname , count(c1) from stb partition by t1 order by t1")
- tdSql.error("select tbname , count(t1) from stb partition by t1 order by t1")
- tdSql.error("select tbname , count(t1) from stb partition by t2 order by t2")
+ tdSql.error(f"select tbname , count(c1) from {dbname}.stb partition by t1 order by t1")
+ tdSql.error(f"select tbname , count(t1) from {dbname}.stb partition by t1 order by t1")
+ tdSql.error(f"select tbname , count(t1) from {dbname}.stb partition by t2 order by t2")
- # # bug need fix
- # tdSql.query("select t2 , count(t1) from stb partition by t2 order by t2")
+ # # bug need fix
+ # tdSql.query(f"select t2 , count(t1) from {dbname}.stb partition by t2 order by t2")
# tdSql.checkRows(self.tb_nums)
- tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums)
-
- tdSql.error("select tbname , count(c1) from stb partition by t2 order by t2")
- tdSql.query("select c2, count(c1) from stb partition by c2 order by c2 desc")
+ tdSql.error(f"select tbname , count(c1) from {dbname}.stb partition by t2 order by t2")
+
+ tdSql.query(f"select c2, count(c1) from {dbname}.stb partition by c2 order by c2 desc")
tdSql.checkRows(self.tb_nums+1)
tdSql.checkData(0,1,self.tb_nums)
- tdSql.error("select tbname , count(c1) from stb partition by c1 order by c2")
+ tdSql.error(f"select tbname , count(c1) from {dbname}.stb partition by c1 order by c2")
- tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
+ tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2")
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
- tdSql.query("select count(c1) , count(t2) from stb partition by c2 ")
+ tdSql.query(f"select count(c1) , count(t2) from {dbname}.stb partition by c2 ")
tdSql.checkRows(self.row_nums+1)
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select count(c1) , count(t2) ,c2 from stb partition by c2 order by c2")
+ tdSql.query(f"select count(c1) , count(t2) ,c2 from {dbname}.stb partition by c2 order by c2")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
+ tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkCols(4)
- tdSql.query("select count(c1) , count(t2) ,t1 from stb partition by t1 order by t1")
+ tdSql.query(f"select count(c1) , count(t2) ,t1 from {dbname}.stb partition by t1 order by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,self.row_nums)
- # bug need fix
- # tdSql.query("select count(c1) , count(t1) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
+ # bug need fix
+ # tdSql.query(f"select count(c1) , count(t1) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)")
# tdSql.checkRows(self.row_nums+1)
-
- tdSql.query("select count(ceil(c2)) , count(floor(t2)) ,count(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
+
+ tdSql.query(f"select count(ceil(c2)) , count(floor(t2)) ,count(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(ceil(c1-2)) , count(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
+ tdSql.query(f"select count(ceil(c1-2)) , count(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))")
tdSql.checkRows(self.row_nums+1)
-
- # interval
- tdSql.query("select count(c1) from stb interval(2s) sliding(1s)")
+
+ # interval
+ tdSql.query(f"select count(c1) from {dbname}.stb interval(2s) sliding(1s)")
# bug need fix
- tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
- tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ")
- tdSql.query("select tbname , count(c1) from stb partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s)")
- tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)")
tdSql.checkData(0,0,'sub_stb_1')
tdSql.checkData(0,1,self.row_nums)
- # tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ # tdSql.checkRows(5)
+
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 1 ")
# tdSql.checkRows(5)
-
- # tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
- # tdSql.checkRows(5)
-
- tdSql.query(" select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
-
- tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
- tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 interval(50s) sliding(30s)')
- tdSql.query(f'select tbname , count(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
+
+ tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
+
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+10000 interval(50s) sliding(30s)')
+ tdSql.query(f'select tbname , count(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
def run(self):
tdSql.prepare()
self.prepare_datas("stb",self.tb_nums,self.row_nums)
self.basic_query()
+ dbname="db"
+
+ # # coverage case for taosd crash about bug fix
+ tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ")
+ tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ")
+ tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ")
+ tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ")
+ tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ")
- # # coverage case for taosd crash about bug fix
- tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
- tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
- tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
- tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
- tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py
index a4d603bada851fab2870a6331cb215ee90738e53..f2d85ebf6599970f99c302f080cda0b3ef6f3320 100644
--- a/tests/system-test/2-query/db.py
+++ b/tests/system-test/2-query/db.py
@@ -10,9 +10,6 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
- "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
- "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
@@ -26,22 +23,22 @@ class TDTestCase:
tdSql.execute("create table dbms.ntb (ts timestamp, c1 int, c2 bigint)")
tdSql.execute("create table dbus.ntb (ts timestamp, c1 int, c2 bigint)")
tdSql.execute("create table dbns.ntb (ts timestamp, c1 int, c2 bigint)")
-
+
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.001', 1, 2)")
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.002', 3, 4)")
-
+
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000001', 1, 2)")
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000002', 3, 4)")
-
+
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000001', 1, 2)")
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000002', 3, 4)")
-
+
tdSql.query("select count(c1) from dbms.ntb interval(1a)")
tdSql.checkRows(2)
-
+
tdSql.query("select count(c1) from dbus.ntb interval(1u)")
tdSql.checkRows(2)
-
+
tdSql.query("select count(c1) from dbns.ntb interval(1b)")
tdSql.checkRows(2)
diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py
index c6800d9a8a8ec4137551adcd2f79a4d1c8e58496..76d4891a1c8c3e7d90c0c498f70a7b1fd7b59d6a 100644
--- a/tests/system-test/2-query/diff.py
+++ b/tests/system-test/2-query/diff.py
@@ -18,188 +18,117 @@ class TDTestCase:
def run(self):
tdSql.prepare()
+ dbname = "db"
tdSql.execute(
- "create table ntb(ts timestamp,c1 int,c2 double,c3 float)")
+ f"create table {dbname}.ntb(ts timestamp,c1 int,c2 double,c3 float)")
tdSql.execute(
- "insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)")
+ f"insert into {dbname}.ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)")
- tdSql.query("select diff(c1,0) from ntb")
+ tdSql.query(f"select diff(c1,0) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 9)
tdSql.checkData(1, 0, -11)
- tdSql.query("select diff(c1,1) from ntb")
+ tdSql.query(f"select diff(c1,1) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 9)
tdSql.checkData(1, 0, None)
- tdSql.query("select diff(c2,0) from ntb")
+ tdSql.query(f"select diff(c2,0) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, -101)
tdSql.checkData(1, 0, 115.1)
- tdSql.query("select diff(c2,1) from ntb")
+ tdSql.query(f"select diff(c2,1) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 115.1)
- tdSql.query("select diff(c3,0) from ntb")
+ tdSql.query(f"select diff(c3,0) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, -5.4)
tdSql.checkData(1, 0, -0.1)
- tdSql.query("select diff(c3,1) from ntb")
+ tdSql.query(f"select diff(c3,1) from {dbname}.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
tdSql.execute(
- "insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1))
+ f"insert into {dbname}.stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1))
# diff verifacation
- tdSql.query("select diff(col1) from stb_1")
+ tdSql.query(f"select diff(col1) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col2) from stb_1")
+ tdSql.query(f"select diff(col2) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col3) from stb_1")
+ tdSql.query(f"select diff(col3) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col4) from stb_1")
+ tdSql.query(f"select diff(col4) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col5) from stb_1")
+ tdSql.query(f"select diff(col5) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col6) from stb_1")
+ tdSql.query(f"select diff(col6) from {dbname}.stb_1")
tdSql.checkRows(0)
- tdSql.query("select diff(col7) from stb_1")
+ tdSql.query(f"select diff(col7) from {dbname}.stb_1")
tdSql.checkRows(0)
for i in range(self.rowNum):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
- tdSql.error("select diff(ts) from stb")
- tdSql.error("select diff(ts) from stb_1")
+ tdSql.error(f"select diff(ts) from {dbname}.stb")
+ tdSql.error(f"select diff(ts) from {dbname}.stb_1")
- # tdSql.error("select diff(col7) from stb")
+ # tdSql.error(f"select diff(col7) from {dbname}.stb")
- tdSql.error("select diff(col8) from stb")
- tdSql.error("select diff(col8) from stb_1")
- tdSql.error("select diff(col9) from stb")
- tdSql.error("select diff(col9) from stb_1")
- tdSql.error("select diff(col11) from stb_1")
- tdSql.error("select diff(col12) from stb_1")
- tdSql.error("select diff(col13) from stb_1")
- tdSql.error("select diff(col14) from stb_1")
+ tdSql.error(f"select diff(col8) from {dbname}.stb")
+ tdSql.error(f"select diff(col8) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col9) from {dbname}.stb")
+ tdSql.error(f"select diff(col9) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col11) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col12) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col13) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col14) from {dbname}.stb_1")
+ tdSql.query(f"select ts,diff(col1),ts from {dbname}.stb_1")
- tdSql.query("select diff(col1) from stb_1")
+ tdSql.query(f"select diff(col1) from {dbname}.stb_1")
tdSql.checkRows(10)
- tdSql.query("select diff(col2) from stb_1")
+ tdSql.query(f"select diff(col2) from {dbname}.stb_1")
tdSql.checkRows(10)
- tdSql.query("select diff(col3) from stb_1")
+ tdSql.query(f"select diff(col3) from {dbname}.stb_1")
tdSql.checkRows(10)
- tdSql.query("select diff(col4) from stb_1")
+ tdSql.query(f"select diff(col4) from {dbname}.stb_1")
tdSql.checkRows(10)
- tdSql.query("select diff(col5) from stb_1")
+ tdSql.query(f"select diff(col5) from {dbname}.stb_1")
tdSql.checkRows(10)
- tdSql.query("select diff(col6) from stb_1")
+ tdSql.query(f"select diff(col6) from {dbname}.stb_1")
tdSql.checkRows(10)
- # check selectivity
- tdSql.query("select ts, diff(col1), col2 from stb_1")
- tdSql.checkRows(10)
- tdSql.checkData(0, 0, "2018-09-17 09:00:00.000")
- tdSql.checkData(1, 0, "2018-09-17 09:00:00.001")
- tdSql.checkData(2, 0, "2018-09-17 09:00:00.002")
- tdSql.checkData(3, 0, "2018-09-17 09:00:00.003")
- tdSql.checkData(4, 0, "2018-09-17 09:00:00.004")
- tdSql.checkData(5, 0, "2018-09-17 09:00:00.005")
- tdSql.checkData(6, 0, "2018-09-17 09:00:00.006")
- tdSql.checkData(7, 0, "2018-09-17 09:00:00.007")
- tdSql.checkData(8, 0, "2018-09-17 09:00:00.008")
- tdSql.checkData(9, 0, "2018-09-17 09:00:00.009")
-
- tdSql.checkData(0, 1, 1)
- tdSql.checkData(1, 1, 1)
- tdSql.checkData(2, 1, 1)
- tdSql.checkData(3, 1, 1)
- tdSql.checkData(4, 1, 1)
- tdSql.checkData(5, 1, 1)
- tdSql.checkData(6, 1, 1)
- tdSql.checkData(7, 1, 1)
- tdSql.checkData(8, 1, 1)
- tdSql.checkData(9, 1, 1)
-
- tdSql.checkData(0, 2, 0)
- tdSql.checkData(1, 2, 1)
- tdSql.checkData(2, 2, 2)
- tdSql.checkData(3, 2, 3)
- tdSql.checkData(4, 2, 4)
- tdSql.checkData(5, 2, 5)
- tdSql.checkData(6, 2, 6)
- tdSql.checkData(7, 2, 7)
- tdSql.checkData(8, 2, 8)
- tdSql.checkData(9, 2, 9)
-
- tdSql.query("select ts, diff(col1), col2 from stb order by ts")
- tdSql.checkRows(10)
-
- tdSql.checkData(0, 0, "2018-09-17 09:00:00.000")
- tdSql.checkData(1, 0, "2018-09-17 09:00:00.001")
- tdSql.checkData(2, 0, "2018-09-17 09:00:00.002")
- tdSql.checkData(3, 0, "2018-09-17 09:00:00.003")
- tdSql.checkData(4, 0, "2018-09-17 09:00:00.004")
- tdSql.checkData(5, 0, "2018-09-17 09:00:00.005")
- tdSql.checkData(6, 0, "2018-09-17 09:00:00.006")
- tdSql.checkData(7, 0, "2018-09-17 09:00:00.007")
- tdSql.checkData(8, 0, "2018-09-17 09:00:00.008")
- tdSql.checkData(9, 0, "2018-09-17 09:00:00.009")
-
- tdSql.checkData(0, 1, 1)
- tdSql.checkData(1, 1, 1)
- tdSql.checkData(2, 1, 1)
- tdSql.checkData(3, 1, 1)
- tdSql.checkData(4, 1, 1)
- tdSql.checkData(5, 1, 1)
- tdSql.checkData(6, 1, 1)
- tdSql.checkData(7, 1, 1)
- tdSql.checkData(8, 1, 1)
- tdSql.checkData(9, 1, 1)
-
- tdSql.checkData(0, 2, 0)
- tdSql.checkData(1, 2, 1)
- tdSql.checkData(2, 2, 2)
- tdSql.checkData(3, 2, 3)
- tdSql.checkData(4, 2, 4)
- tdSql.checkData(5, 2, 5)
- tdSql.checkData(6, 2, 6)
- tdSql.checkData(7, 2, 7)
- tdSql.checkData(8, 2, 8)
- tdSql.checkData(9, 2, 9)
-
-
- tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {dbname}.stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
- tdSql.execute("create table stb1_1 using stb tags('shanghai')")
+ tdSql.execute(f"create table {dbname}.stb1_1 using {dbname}.stb tags('shanghai')")
for i in range(self.rowNum):
- tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
for i in range(self.rowNum):
- tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts - i-1, i-1, i-1, i-1, i-1, -i - 0.1, -i - 0.1, -i % 2, i - 1, i - 1, i + 1, i + 1, i + 1, i + 1))
- tdSql.query("select diff(col1,0) from stb1_1")
+ tdSql.query(f"select diff(col1,0) from {dbname}.stb1_1")
tdSql.checkRows(19)
- tdSql.query("select diff(col1,1) from stb1_1")
+ tdSql.query(f"select diff(col1,1) from {dbname}.stb1_1")
tdSql.checkRows(19)
tdSql.checkData(0,0,None)
diff --git a/tests/system-test/2-query/distinct.py b/tests/system-test/2-query/distinct.py
index 937ff78c71eba9ec617ddd9a05b04f22e2229820..7214caec969f89090f7cc1db28ae787a5de84b17 100644
--- a/tests/system-test/2-query/distinct.py
+++ b/tests/system-test/2-query/distinct.py
@@ -16,6 +16,8 @@ class TDTestCase:
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
+ dbname = "db"
+
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
@@ -34,223 +36,224 @@ class TDTestCase:
tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
- # tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
- # tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
- # tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
- # tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
- # tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
- # tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
- # tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
- # tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
- # tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
- # tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
- #========== distinct multi-data-coloumn ==========
- # tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
- # tdSql.checkRows(tbnum)
- # tdSql.query(f"select distinct c2 from stb1")
- # tdSql.checkRows(4)
- # tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
- # tdSql.checkRows(tbnum*3)
- # tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
- # tdSql.checkRows(tbnum)
- # tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
- # tdSql.checkRows(3)
- # tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
- # tdSql.checkRows(2)
+ # #========== distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from {dbname}.stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from {dbname}.stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from {dbname}.stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from {dbname}.stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from {dbname}.stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from {dbname}.stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
- tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.query(f"select distinct c1 from {dbname}.t1 where c1 <{tbnum}")
tdSql.checkRows(1)
- tdSql.query(f"select distinct c2 from t1")
+ tdSql.query(f"select distinct c2 from {dbname}.t1")
tdSql.checkRows(4)
- tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.query(f"select distinct c1,c2 from {dbname}.t1 where c1 <{tbnum}")
tdSql.checkRows(3)
- tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.query(f"select distinct c1,c1 from {dbname}.t1 ")
tdSql.checkRows(2)
- tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.query(f"select distinct c1,c1 from {dbname}.t1 where c1 <{tbnum}")
tdSql.checkRows(1)
- tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.query(f"select distinct c1,c2 from {dbname}.t1 where c1 <{tbnum} limit 3")
tdSql.checkRows(3)
- tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.query(f"select distinct c1,c2 from {dbname}.t1 where c1 <{tbnum} limit 3 offset 2")
tdSql.checkRows(1)
- # tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ # tdSql.query(f"select distinct c3 from {dbname}.stb2 where c2 <{tbnum} ")
# tdSql.checkRows(3)
- # tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ # tdSql.query(f"select distinct c3, c2 from {dbname}.stb2 where c2 <{tbnum} limit 2")
# tdSql.checkRows(2)
- # tdSql.error("select distinct c5 from stb1")
- tdSql.error("select distinct c5 from t1")
- tdSql.error("select distinct c1 from db.*")
- tdSql.error("select c2, distinct c1 from stb1")
- tdSql.error("select c2, distinct c1 from t1")
- tdSql.error("select distinct c2 from ")
- tdSql.error("distinct c2 from stb1")
- tdSql.error("distinct c2 from t1")
- tdSql.error("select distinct c1, c2, c3 from stb1")
- tdSql.error("select distinct c1, c2, c3 from t1")
- tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
- tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
- tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
- # tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
- # tdSql.checkRows(tbnum*3)
- tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ # tdSql.error(f"select distinct c5 from {dbname}.stb1")
+ tdSql.error(f"select distinct c5 from {dbname}.t1")
+ tdSql.error(f"select distinct c1 from db.*")
+ tdSql.error(f"select c2, distinct c1 from {dbname}.stb1")
+ tdSql.error(f"select c2, distinct c1 from {dbname}.t1")
+ tdSql.error(f"select distinct c2 from ")
+ tdSql.error("distinct c2 from {dbname}.stb1")
+ tdSql.error("distinct c2 from {dbname}.t1")
+ tdSql.error(f"select distinct c1, c2, c3 from {dbname}.stb1")
+ tdSql.error(f"select distinct c1, c2, c3 from {dbname}.t1")
+ tdSql.error(f"select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from {dbname}.stb1")
+ tdSql.error(f"select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from {dbname}.t1")
+ tdSql.error(f"select distinct t1.c1, t1.c2, t2.c1, t2.c2 from {dbname}.t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from {dbname}.stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from {dbname}.t1 where c1 <{tbnum}")
tdSql.checkRows(3)
- # tdSql.error("select distinct c1, c2 from stb1 order by ts")
- tdSql.error("select distinct c1, c2 from t1 order by ts")
- # tdSql.error("select distinct c1, ts from stb1 group by c2")
- tdSql.error("select distinct c1, ts from t1 group by c2")
- # tdSql.error("select distinct c1, max(c2) from stb1 ")
- # tdSql.error("select distinct c1, max(c2) from t1 ")
- # tdSql.error("select max(c2), distinct c1 from stb1 ")
- tdSql.error("select max(c2), distinct c1 from t1 ")
- # tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
- tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
- # tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
- tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
- # tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
- tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
- # tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
- tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
- # tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
- # tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
- # tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
- # tdSql.checkRows(6)
- tdSql.query(f"select distinct c1, c2 from t1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 order by ts")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 order by ts")
+ tdSql.error(f"select distinct c1, ts from {dbname}.stb1 group by c2")
+ tdSql.error(f"select distinct c1, ts from {dbname}.t1 group by c2")
+ tdSql.query(f"select distinct c1, max(c2) from {dbname}.stb1 ")
+ tdSql.query(f"select distinct c1, max(c2) from {dbname}.t1 ")
+ tdSql.error(f"select max(c2), distinct c1 from {dbname}.stb1 ")
+ tdSql.error(f"select max(c2), distinct c1 from {dbname}.t1 ")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 where c1 > 3 group by t0")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 where c1 > 3 group by t0")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 where c1 > 3 interval(1d) ")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 where c1 > 3 interval(1d) ")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.stb1 where c1 > 3 slimit 1")
+ tdSql.error(f"select distinct c1, c2 from {dbname}.t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from {dbname}.stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query(f"select distinct c1, c2 from {dbname}.t1 where c1 between {tbnum-2} and {tbnum} ")
# tdSql.checkRows(1)
- # tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
- # tdSql.checkRows(15)
- tdSql.query("select distinct c1, c2 from t1 where c1 in (1,2,3,4,5)")
+ tdSql.query(f"select distinct c1, c2 from {dbname}.stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query(f"select distinct c1, c2 from {dbname}.t1 where c1 in (1,2,3,4,5)")
# tdSql.checkRows(1)
- # tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
- # tdSql.checkRows(3)
- tdSql.query("select distinct c1, c2 from t1 where c1 in (100,1000,10000)")
- # tdSql.checkRows(0)
+ tdSql.query(f"select distinct c1, c2 from {dbname}.stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1, c2 from {dbname}.t1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(0)
- # tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
- # tdSql.checkRows(3)
- # tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
- # tdSql.checkRows(3)
- # tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
- # tdSql.checkRows(0)
- # tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
- # tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
- # tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
- # tdSql.checkRows(3)
- # tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
- # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
- # tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
- # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
- # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
- # tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
- # tdSql.checkRows(1)
- # tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
- # tdSql.checkRows(1)
- # tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
- # tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+ tdSql.query(f"select distinct c1,c2 from (select * from {dbname}.stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from {dbname}.t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from {dbname}.stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.query(f"select distinct c1, c2 from (select distinct c1, c2 from {dbname}.stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query(f"select c1, c2 from (select distinct c1, c2 from {dbname}.stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query(f"select distinct c1, c2 from (select c2, c1 from {dbname}.stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error(f"select distinct c1, c2 from (select c1 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query(f"select distinct c1, c2 from (select c2, c1 from {dbname}.stb1 where c1 > 2 order by ts)")
+ tdSql.query(f"select distinct c1, c2 from (select c2, c1 from {dbname}.t1 where c1 > 2 order by ts)")
+ tdSql.error(f"select distinct c1, c2 from (select c2, c1 from {dbname}.stb1 where c1 > 2 group by c1)")
+ tdSql.query(f"select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from {dbname}.stb1 group by c1)")
+ tdSql.query(f"select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from {dbname}.t1 group by c1)")
+ tdSql.query(f"select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from {dbname}.stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from {dbname}.t1 )")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct stb1.c1, stb1.c2 from {dbname}.stb1, {dbname}.stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.query(f"select distinct t1.c1, t1.c2 from {dbname}.t1, {dbname}.t2 where t1.ts=t2.ts ")
- # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
- # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+ tdSql.query(f"select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from {dbname}.stb1 group by ts)")
+ tdSql.query(f"select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from {dbname}.t1 group by ts)")
- # #========== suport distinct multi-tags-coloumn ==========
- # tdSql.query("select distinct t1 from stb1")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t0, t1 from stb1")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t1, t0 from stb1")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t1, t2 from stb1")
- # tdSql.checkRows(maxRemainderNum*2+1)
- # tdSql.query("select distinct t0, t1, t2 from stb1")
- # tdSql.checkRows(maxRemainderNum*2+1)
- # tdSql.query("select distinct t0 t1, t1 t2 from stb1")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t0, t0, t0 from stb1")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t0, t1 from t1")
- # tdSql.checkRows(1)
- # tdSql.query("select distinct t0, t1 from t100num")
- # tdSql.checkRows(1)
+ #========== suport distinct multi-tags-coloumn ==========
+ tdSql.query(f"select distinct t1 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t1, t0 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t1, t2 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query(f"select distinct t0, t1, t2 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query(f"select distinct t0 t1, t1 t2 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t0, t0, t0 from {dbname}.stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t0, t1 from {dbname}.t1")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct t0, t1 from {dbname}.t100num")
+ tdSql.checkRows(1)
- # tdSql.query("select distinct t3 from stb2")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t2, t3 from stb2")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t3, t2 from stb2")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t4, t2 from stb2")
- # tdSql.checkRows(maxRemainderNum*3+1)
- # tdSql.query("select distinct t2, t3, t4 from stb2")
- # tdSql.checkRows(maxRemainderNum*3+1)
- # tdSql.query("select distinct t2 t1, t3 t2 from stb2")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t3, t3, t3 from stb2")
- # tdSql.checkRows(maxRemainderNum+1)
- # tdSql.query("select distinct t2, t3 from t01")
- # tdSql.checkRows(1)
- # tdSql.query("select distinct t3, t4 from t0100num")
- # tdSql.checkRows(1)
+ tdSql.query(f"select distinct t3 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t2, t3 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t3, t2 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t4, t2 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query(f"select distinct t2, t3, t4 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query(f"select distinct t2 t1, t3 t2 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t3, t3, t3 from {dbname}.stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query(f"select distinct t2, t3 from {dbname}.t01")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct t3, t4 from {dbname}.t0100num")
+ tdSql.checkRows(1)
- # ########## should be error #########
- # tdSql.error("select distinct from stb1")
- # tdSql.error("select distinct t3 from stb1")
- # tdSql.error("select distinct t1 from db.*")
- # tdSql.error("select distinct t2 from ")
- # tdSql.error("distinct t2 from stb1")
- # tdSql.error("select distinct stb1")
- # tdSql.error("select distinct t0, t1, t2, t3 from stb1")
- # tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+ ########## should be error #########
+ tdSql.error(f"select distinct from {dbname}.stb1")
+ tdSql.error(f"select distinct t3 from {dbname}.stb1")
+ tdSql.error(f"select distinct t1 from db.*")
+ tdSql.error(f"select distinct t2 from ")
+ tdSql.error(f"distinct t2 from {dbname}.stb1")
+ tdSql.error(f"select distinct stb1")
+ tdSql.error(f"select distinct t0, t1, t2, t3 from {dbname}.stb1")
+ tdSql.error(f"select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from {dbname}.stb1")
- # tdSql.error("select dist t0 from stb1")
- # tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
- # tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+ tdSql.error(f"select dist t0 from {dbname}.stb1")
+ tdSql.error(f"select distinct stb2.t2, stb2.t3 from {dbname}.stb1")
+ tdSql.error(f"select distinct stb2.t2 t1, stb2.t3 t2 from {dbname}.stb1")
- # tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+ tdSql.query(f"select distinct t0, t1 from {dbname}.t1 where t0 < 7")
- # ########## add where condition ##########
- # tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
- # tdSql.checkRows(3)
- # tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
- # tdSql.checkRows(2)
- # tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
- # tdSql.checkRows(1)
- # tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
- # tdSql.checkRows(3)
- # tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
- # tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
- # tdSql.checkRows(1)
- # tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
- # tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
- # tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
- # tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
- # tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
- # tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ ########## add where condition ##########
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 slimit 2")
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where c1 > 2")
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error(f"select distinct stb1.t0, stb1.t1 from {dbname}.stb1, {dbname}.stb2 where stb1.t2=stb2.t4")
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where stb2.t4 > 2")
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 group by t0")
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 interval(1d) ")
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error(f"select distinct t0, t1 from {dbname}.stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
- # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
- # tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
- # tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
- # tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
- # tdSql.checkRows(5)
- # tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
- # tdSql.checkRows(4)
- # tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
- # tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.checkRows(1)
- # tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
- # tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
- # tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.checkRows(1)
- # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
- # tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
- # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
- # tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
- # tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+ tdSql.error(f"select max(c1), distinct t0 from {dbname}.stb1 where t0 > 2")
+ tdSql.query(f"select distinct t0, max(c1) from {dbname}.stb1 where t0 > 2")
+ tdSql.error(f"select distinct t0 from {dbname}.stb1 where t0 in (select t0 from {dbname}.stb1 where t0 > 2)")
+ tdSql.query(f"select distinct t0, t1 from {dbname}.stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query(f"select distinct t1 from (select t0, t1 from {dbname}.stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct t1 from (select distinct t0, t1 from {dbname}.stb1 where t0 > 2 and t1 < 3) ")
+ # TODO: BUG of TD-17561
+ # tdSql.query(f"select distinct t1 from (select distinct t0, t1 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query(f"select distinct t1 from (select t0, t1 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(f"select distinct t1, t0 from (select t1 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query(f"select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from {dbname}.stb1 group by t1)")
+ tdSql.query(f"select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from {dbname}.stb1)")
+ tdSql.query(f"select distinct t1, t0 from (select t1,t0 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct t1, t0 from (select t1,t0 from {dbname}.stb1 where t0 > 2 order by ts) where t1 < 3")
+ # TODO: BUG of TD-17561
+ # tdSql.error(f"select t1, t0 from (select distinct t1,t0 from {dbname}.stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(f"select distinct t1, t0 from (select t1,t0 from {dbname}.stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.query(f"select distinct stb1.t1, stb1.t2 from {dbname}.stb1, {dbname}.stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.query(f"select distinct t1.t1, t1.t2 from {dbname}.t1, {dbname}.t2 where t1.ts=t2.ts ")
diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py
index eb5e8333c20803c424941bd75321aaa7e1ea1d52..1fd853f9eb193ac6ab9601f9ae8963d13e10f94c 100644
--- a/tests/system-test/2-query/distribute_agg_apercentile.py
+++ b/tests/system-test/2-query/distribute_agg_apercentile.py
@@ -6,86 +6,60 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
-
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -95,7 +69,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -109,28 +83,28 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def distribute_agg_query(self):
+ def distribute_agg_query(self, dbname="testdb"):
# basic filter
- tdSql.query("select apercentile(c1 , 20) from stb1 where c1 is null")
+ tdSql.query(f"select apercentile(c1 , 20) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(0)
- tdSql.query("select apercentile(c1 , 20) from stb1 where t1=1")
+ tdSql.query(f"select apercentile(c1 , 20) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,2.800000000)
- tdSql.query("select apercentile(c1+c2 ,100) from stb1 where c1 =1 ")
+ tdSql.query(f"select apercentile(c1+c2 ,100) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select apercentile(c1 ,10 ) from stb1 where tbname=\"ct2\"")
+ tdSql.query(f"select apercentile(c1 ,10 ) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,2.000000000)
- tdSql.query("select apercentile(c1,20) from stb1 partition by tbname")
+ tdSql.query(f"select apercentile(c1,20) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select apercentile(c1,20) from stb1 where t1> 4 partition by tbname")
+ tdSql.query(f"select apercentile(c1,20) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
- tdSql.query("select apercentile(c1,20) from stb1 union all select apercentile(c1,20) from stb1 ")
+ tdSql.query(f"select apercentile(c1,20) from {dbname}.stb1 union all select apercentile(c1,20) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,7.389181281)
@@ -138,44 +112,44 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query("select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from tb1, tb2 where tb1.ts=tb2.ts")
+ tdSql.query(f"select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9.000000000)
tdSql.checkData(0,0,9.000000000)
# group by
- tdSql.execute(" use testdb ")
- tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
+ tdSql.execute(f"use {dbname} ")
+ tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by t1 ")
tdSql.checkRows(20)
- tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
+ tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by c1 ")
tdSql.checkRows(30)
- tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
+ tdSql.query(f" select max(c1),c2 from {dbname}.stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
- tdSql.query("select apercentile(c1 ,10)from stb1 partition by tbname")
+ tdSql.query(f"select apercentile(c1 ,10)from {dbname}.stb1 partition by tbname")
query_data = tdSql.queryResult
# nest query for support max
- tdSql.query("select apercentile(c2+2,10)+1 from (select max(c1) c2 from stb1)")
+ tdSql.query(f"select apercentile(c2+2,10)+1 from (select max(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,31.000000000)
- tdSql.query("select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
+ tdSql.query(f"select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,7.560701700)
- tdSql.query("select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
+ tdSql.query(f"select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,7.560701700)
# mixup with other functions
- tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from stb1")
+ tdSql.query(f"select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
diff --git a/tests/system-test/2-query/distribute_agg_avg.py b/tests/system-test/2-query/distribute_agg_avg.py
index 2f449595bd34653ea035867dd3625ca6ea778cbd..3892ae0da15269f3aa9b7d89e4fbef9824aab723 100644
--- a/tests/system-test/2-query/distribute_agg_avg.py
+++ b/tests/system-test/2-query/distribute_agg_avg.py
@@ -7,11 +7,8 @@ import platform
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
@@ -34,75 +31,52 @@ class TDTestCase:
tdSql.query(avg_sql)
tdSql.checkData(0,0,pre_avg)
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
-
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -112,7 +86,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -126,7 +100,7 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def check_avg_distribute_diff_vnode(self,col_name):
+ def check_avg_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@@ -144,9 +118,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
- avg_sql = f"select avg({col_name}) from stb1 where tbname in ({tbname_filters});"
+ avg_sql = f"select avg({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
- same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
+ same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
tdSql.query(same_sql)
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
@@ -157,16 +131,16 @@ class TDTestCase:
tdSql.query(avg_sql)
tdSql.checkData(0,0,pre_avg)
- def check_avg_status(self):
+ def check_avg_status(self, dbname="testdb"):
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
- tablenames.append(table_name[0])
+ tablenames.append(f"{dbname}.{table_name[0]}")
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -182,41 +156,41 @@ class TDTestCase:
for colname in colnames:
if colname.startswith("c"):
- self.check_avg_distribute_diff_vnode(colname)
+ self.check_avg_distribute_diff_vnode(colname, dbname)
else:
- # self.check_avg_distribute_diff_vnode(colname) # bug for tag
+ # self.check_avg_distribute_diff_vnode(colname, dbname) # bug for tag
pass
- def distribute_agg_query(self):
+ def distribute_agg_query(self, dbname="testdb"):
# basic filter
- tdSql.query(" select avg(c1) from stb1 ")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,14.086956522)
- tdSql.query(" select avg(a) from (select avg(c1) a from stb1 partition by tbname) ")
+ tdSql.query(f"select avg(a) from (select avg(c1) a from {dbname}.stb1 partition by tbname) ")
tdSql.checkData(0,0,14.292307692)
- tdSql.query(" select avg(c1) from stb1 where t1=1")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,6.000000000)
- tdSql.query("select avg(c1+c2) from stb1 where c1 =1 ")
+ tdSql.query(f"select avg(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select avg(c1) from stb1 where tbname=\"ct2\"")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,6.000000000)
- tdSql.query("select avg(c1) from stb1 partition by tbname")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select avg(c1) from stb1 where t1> 4 partition by tbname")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
- tdSql.query("select avg(c1) from stb1 union all select avg(c1) from stb1 ")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 union all select avg(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,14.086956522)
- tdSql.query("select avg(a) from (select avg(c1) a from stb1 union all select avg(c1) a from stb1)")
+ tdSql.query(f"select avg(a) from (select avg(c1) a from {dbname}.stb1 union all select avg(c1) a from {dbname}.stb1)")
tdSql.checkRows(1)
tdSql.checkData(0,0,14.086956522)
@@ -224,38 +198,38 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query("select avg(tb1.c1), avg(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
+ tdSql.query(f"select avg(tb1.c1), avg(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
# group by
- tdSql.execute(" use testdb ")
+ tdSql.execute(f" use {dbname} ")
# partition by tbname or partition by tag
- tdSql.query("select avg(c1) from stb1 partition by tbname")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
# nest query for support max
- tdSql.query("select avg(c2+2)+1 from (select avg(c1) c2 from stb1)")
+ tdSql.query(f"select avg(c2+2)+1 from (select avg(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,17.086956522)
- tdSql.query("select avg(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
+ tdSql.query(f"select avg(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,16.086956522)
- tdSql.query("select avg(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
+ tdSql.query(f"select avg(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,16.086956522)
# mixup with other functions
- tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1) from stb1")
+ tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py
index 67f7e283258cac6db3e2fc2454e60977ccb034b7..835d1eeb5734f84319f8087262acb7a35fd4a6e8 100644
--- a/tests/system-test/2-query/distribute_agg_count.py
+++ b/tests/system-test/2-query/distribute_agg_count.py
@@ -6,11 +6,8 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
@@ -35,76 +32,52 @@ class TDTestCase:
else:
tdLog.info(" count function work as expected, sql : %s "% max_sql)
-
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
-
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -114,7 +87,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -128,7 +101,7 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def check_count_distribute_diff_vnode(self,col_name):
+ def check_count_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@@ -146,9 +119,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
- max_sql = f"select count({col_name}) from stb1 where tbname in ({tbname_filters});"
+ max_sql = f"select count({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
- same_sql = f"select sum(c) from (select {col_name} ,1 as c from stb1 where tbname in ({tbname_filters}) and {col_name} is not null) "
+ same_sql = f"select sum(c) from (select {col_name} ,1 as c from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null) "
tdSql.query(max_sql)
max_result = tdSql.queryResult
@@ -161,16 +134,16 @@ class TDTestCase:
else:
tdLog.info(" count function work as expected, sql : %s "% max_sql)
- def check_count_status(self):
+ def check_count_status(self, dbname="testdb"):
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
- tablenames.append(table_name[0])
+ tablenames.append(f"{dbname}.{table_name[0]}")
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -186,34 +159,33 @@ class TDTestCase:
for colname in colnames:
if colname.startswith("c"):
- self.check_count_distribute_diff_vnode(colname)
+ self.check_count_distribute_diff_vnode(colname, dbname)
else:
- # self.check_count_distribute_diff_vnode(colname) # bug for tag
+ # self.check_count_distribute_diff_vnode(colname, dbname) # bug for tag
pass
-
- def distribute_agg_query(self):
+ def distribute_agg_query(self, dbname="testdb"):
# basic filter
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,184)
- tdSql.query("select count(c1) from stb1 where t1=1")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,9)
- tdSql.query("select count(c1+c2) from stb1 where c1 =1 ")
+ tdSql.query(f"select count(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,2)
- tdSql.query("select count(c1) from stb1 where tbname=\"ct2\"")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,9)
- tdSql.query("select count(c1) from stb1 partition by tbname")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select count(c1) from stb1 where t1> 4 partition by tbname")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
- tdSql.query("select count(c1) from stb1 union all select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 union all select count(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,184)
@@ -221,60 +193,60 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query("select count(tb1.c1), count(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
+ tdSql.query(f"select count(tb1.c1), count(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,10)
tdSql.checkData(0,1,10)
# group by
- tdSql.execute(" use testdb ")
+ tdSql.execute(f" use {dbname} ")
- tdSql.query(" select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,187)
- tdSql.query(" select count(*) from stb1 group by t1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 group by t1 ")
tdSql.checkRows(20)
- tdSql.query(" select count(*) from stb1 group by c1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 group by c1 ")
tdSql.checkRows(30)
- tdSql.query(" select count(*) from stb1 group by c2 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
- tdSql.query("select max(c1),tbname from stb1 partition by tbname")
+ tdSql.query(f"select max(c1),tbname from {dbname}.stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select max(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
- tdSql.query("select max(c1),tbname from stb1 partition by t1")
+ tdSql.query(f"select max(c1),tbname from {dbname}.stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select max(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
# nest query for support max
- tdSql.query("select abs(c2+2)+1 from (select count(c1) c2 from stb1)")
+ tdSql.query(f"select abs(c2+2)+1 from (select count(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,187.000000000)
- tdSql.query("select count(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
+ tdSql.query(f"select count(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,184)
- tdSql.query("select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
+ tdSql.query(f"select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,184)
# mixup with other functions
- tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
+ tdSql.query(f"select max(c1),count(c1),last(c2,c3) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py
index d4b71dbdd77e32bc57f6129ad91d9263cad055c7..a7b31a2084af6665a83fac722c8ca1cb653d5dd9 100644
--- a/tests/system-test/2-query/distribute_agg_max.py
+++ b/tests/system-test/2-query/distribute_agg_max.py
@@ -6,10 +6,8 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -36,75 +34,52 @@ class TDTestCase:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
-
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -112,9 +87,8 @@ class TDTestCase:
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
-
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -128,7 +102,7 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def check_max_distribute_diff_vnode(self,col_name):
+ def check_max_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@@ -146,9 +120,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
- max_sql = f"select max({col_name}) from stb1 where tbname in ({tbname_filters});"
+ max_sql = f"select max({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
- same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) order by {col_name} desc limit 1"
+ same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) order by {col_name} desc limit 1"
tdSql.query(max_sql)
max_result = tdSql.queryResult
@@ -161,16 +135,16 @@ class TDTestCase:
else:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
- def check_max_status(self):
+ def check_max_status(self, dbname="testdb"):
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
- tablenames.append(table_name[0])
+ tablenames.append(f"{dbname}.{table_name[0]}")
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -186,34 +160,33 @@ class TDTestCase:
for colname in colnames:
if colname.startswith("c"):
- self.check_max_distribute_diff_vnode(colname)
+ self.check_max_distribute_diff_vnode(colname, dbname)
else:
- # self.check_max_distribute_diff_vnode(colname) # bug for tag
+ # self.check_max_distribute_diff_vnode(colname, dbname) # bug for tag
pass
-
- def distribute_agg_query(self):
+ def distribute_agg_query(self, dbname="testdb"):
# basic filter
- tdSql.query("select max(c1) from stb1 where c1 is null")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(0)
- tdSql.query("select max(c1) from stb1 where t1=1")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,10)
- tdSql.query("select max(c1+c2) from stb1 where c1 =1 ")
+ tdSql.query(f"select max(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select max(c1) from stb1 where tbname=\"ct2\"")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,10)
- tdSql.query("select max(c1) from stb1 partition by tbname")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select max(c1) from stb1 where t1> 4 partition by tbname")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
- tdSql.query("select max(c1) from stb1 union all select max(c1) from stb1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb1 union all select max(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,28)
@@ -221,45 +194,45 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query("select max(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
+ tdSql.query(f"select max(tb1.c1), tb2.c2 from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9)
tdSql.checkData(0,0,9.00000)
# group by
- tdSql.execute(" use testdb ")
- tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
+ tdSql.execute("use testdb ")
+ tdSql.query(f"select max(c1),c1 from {dbname}.stb1 group by t1 ")
tdSql.checkRows(20)
- tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
+ tdSql.query(f"select max(c1),c1 from {dbname}.stb1 group by c1 ")
tdSql.checkRows(30)
- tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
+ tdSql.query(f"select max(c1),c2 from {dbname}.stb1 group by c2 ")
tdSql.checkRows(31)
# selective common cols of datas
- tdSql.query("select max(c1),c2,c3,c5 from stb1")
+ tdSql.query(f"select max(c1),c2,c3,c5 from {dbname}.stb1")
tdSql.checkRows(1)
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,311108)
tdSql.checkData(0,2,3108)
tdSql.checkData(0,3,31.08000)
- tdSql.query("select max(c1),t1,c2,t3 from stb1")
+ tdSql.query(f"select max(c1),t1,c2,t3 from {dbname}.stb1")
tdSql.checkRows(1)
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,19)
tdSql.checkData(0,2,311108)
- tdSql.query("select max(c1),ceil(t1),pow(c2,1)+2,abs(t3) from stb1")
+ tdSql.query(f"select max(c1),ceil(t1),pow(c2,1)+2,abs(t3) from {dbname}.stb1")
tdSql.checkRows(1)
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,19)
@@ -267,32 +240,32 @@ class TDTestCase:
tdSql.checkData(0,3,2109)
# partition by tbname or partition by tag
- tdSql.query("select max(c1),tbname from stb1 partition by tbname")
+ tdSql.query(f"select max(c1),tbname from {dbname}.stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select max(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
- tdSql.query("select max(c1),tbname from stb1 partition by t1")
+ tdSql.query(f"select max(c1),tbname from {dbname}.stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select max(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select max(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
# nest query for support max
- tdSql.query("select abs(c2+2)+1 from (select max(c1) c2 from stb1)")
+ tdSql.query(f"select abs(c2+2)+1 from (select max(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,31.000000000)
- tdSql.query("select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
+ tdSql.query(f"select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,31.000000000)
- tdSql.query("select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
+ tdSql.query(f"select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,31.000000000)
# mixup with other functions
- tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
+ tdSql.query(f"select max(c1),count(c1),last(c2,c3) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py
index 059efe02cddeddc794a70ea255b8460b9d1fa9e1..cc50092451167bcf72ad57480b03cae370a7b297 100644
--- a/tests/system-test/2-query/distribute_agg_min.py
+++ b/tests/system-test/2-query/distribute_agg_min.py
@@ -6,10 +6,8 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -35,76 +33,52 @@ class TDTestCase:
else:
tdLog.info(" min function work as expected, sql : %s "% min_sql)
-
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
-
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -112,9 +86,8 @@ class TDTestCase:
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
-
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -128,7 +101,7 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def check_min_distribute_diff_vnode(self,col_name):
+ def check_min_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@@ -146,9 +119,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
- min_sql = f"select min({col_name}) from stb1 where tbname in ({tbname_filters});"
+ min_sql = f"select min({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
- same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null order by {col_name} asc limit 1"
+ same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null order by {col_name} asc limit 1"
tdSql.query(min_sql)
min_result = tdSql.queryResult
@@ -161,16 +134,16 @@ class TDTestCase:
else:
tdLog.info(" min function work as expected, sql : %s "% min_sql)
- def check_min_status(self):
- # check max function work status
+ def check_min_status(self, dbname="testdb"):
+ # check min function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
- tablenames.append(table_name[0])
+ tablenames.append(f"{dbname}.{table_name[0]}")
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -182,119 +155,117 @@ class TDTestCase:
for colname in colnames:
self.check_min_functions(tablename,colname)
- # check max function for different vnode
+ # check min function for different vnode
for colname in colnames:
if colname.startswith("c"):
- self.check_min_distribute_diff_vnode(colname)
+ self.check_min_distribute_diff_vnode(colname, dbname)
else:
- # self.check_min_distribute_diff_vnode(colname) # bug for tag
+ # self.check_min_distribute_diff_vnode(colname, dbname) # bug for tag
pass
-
- def distribute_agg_query(self):
+ def distribute_agg_query(self, dbname="testdb"):
# basic filter
- tdSql.query("select min(c1) from stb1 where c1 is null")
+ tdSql.query(f"select min(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(0)
- tdSql.query("select min(c1) from stb1 where t1=1")
+ tdSql.query(f"select min(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,2)
- tdSql.query("select min(c1+c2) from stb1 where c1 =1 ")
+ tdSql.query(f"select min(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select min(c1) from stb1 where tbname=\"ct2\"")
- tdSql.checkData(0,0,2)
+ tdSql.query(f"select min(c1) from {dbname}.stb1 where tbname=\"ct2\"")
+ tdSql.checkData(0, 0, 2)
- tdSql.query("select min(c1) from stb1 partition by tbname")
+ tdSql.query(f"select min(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select min(c1) from stb1 where t1> 4 partition by tbname")
+ tdSql.query(f"select min(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
- tdSql.query("select min(c1) from stb1 union all select min(c1) from stb1 ")
+ tdSql.query(f"select min(c1) from {dbname}.stb1 union all select min(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.checkData(0,0,0)
+ tdSql.checkData(0, 0, 0)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query("select min(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
+ tdSql.query(f"select min(tb1.c1), tb2.c2 from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,0,0.00000)
# group by
- tdSql.execute(" use testdb ")
- tdSql.query(" select min(c1),c1 from stb1 group by t1 ")
+ tdSql.execute(f"use {dbname} ")
+ tdSql.query(f"select min(c1),c1 from {dbname}.stb1 group by t1 ")
tdSql.checkRows(20)
- tdSql.query(" select min(c1),c1 from stb1 group by c1 ")
+ tdSql.query(f"select min(c1),c1 from {dbname}.stb1 group by c1 ")
tdSql.checkRows(30)
- tdSql.query(" select min(c1),c2 from stb1 group by c2 ")
+ tdSql.query(f"select min(c1),c2 from {dbname}.stb1 group by c2 ")
tdSql.checkRows(31)
# selective common cols of datas
- tdSql.query("select min(c1),c2,c3,c5 from stb1")
+ tdSql.query(f"select min(c1),c2,c3,c5 from {dbname}.stb1")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,0)
- tdSql.query("select min(c1),t1,c2,t3 from stb1 where c1 >5")
+ tdSql.query(f"select min(c1),t1,c2,t3 from {dbname}.stb1 where c1 > 5")
tdSql.checkRows(1)
tdSql.checkData(0,0,6)
tdSql.checkData(0,2,66666)
- tdSql.query("select min(c1),ceil(t1),pow(c2,1)+2,abs(t3) from stb1 where c1>12")
+ tdSql.query(f"select min(c1),ceil(t1),pow(c2,1)+2,abs(t3) from {dbname}.stb1 where c1 > 12")
tdSql.checkRows(1)
tdSql.checkData(0,0,13)
tdSql.checkData(0,2,144445.000000000)
# partition by tbname or partition by tag
- tdSql.query("select min(c1),tbname from stb1 partition by tbname")
+ tdSql.query(f"select min(c1),tbname from {dbname}.stb1 partition by tbname")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select min(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select min(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
- tdSql.query("select min(c1),tbname from stb1 partition by t1")
+ tdSql.query(f"select min(c1),tbname from {dbname}.stb1 partition by t1")
query_data = tdSql.queryResult
for row in query_data:
- tbname = row[1]
- tdSql.query(" select min(c1) from %s "%tbname)
+ tbname = f"{dbname}.{row[1]}"
+ tdSql.query(f"select min(c1) from %s "%tbname)
tdSql.checkData(0,0,row[0])
- # nest query for support max
- tdSql.query("select abs(c2+2)+1 from (select min(c1) c2 from stb1)")
+ # nest query for support min
+ tdSql.query(f"select abs(c2+2)+1 from (select min(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,3.000000000)
- tdSql.query("select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
+ tdSql.query(f"select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,3.000000000)
- tdSql.query("select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
+ tdSql.query(f"select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,3.000000000)
# mixup with other functions
- tdSql.query("select max(c1),count(c1),last(c2,c3),min(c1) from stb1")
+ tdSql.query(f"select max(c1),count(c1),last(c2,c3) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
tdSql.checkData(0,3,-999)
- tdSql.checkData(0,4,0)
def run(self):
diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py
index 2348873a34283572116e6eb97760733d400c6914..9d30e1946a16c487f22e43fe03461d559dc7c945 100644
--- a/tests/system-test/2-query/join.py
+++ b/tests/system-test/2-query/join.py
@@ -377,11 +377,11 @@ class TDTestCase:
tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
tdSql.checkRows(self.rows)
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
- tdSql.checkRows(self.rows)
+ tdSql.checkRows(self.rows + int(self.rows * 0.6 //3)+ int(self.rows * 0.8 // 4))
tdSql.query("select ct1.c_int from db.nt1 as ct1 join db1.nt1 as cy1 on ct1.ts=cy1.ts")
tdSql.checkRows(self.rows + 3)
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.stb1 as cy1 on ct1.ts=cy1.ts")
- tdSql.checkRows(self.rows * 3 + 6)
+ tdSql.checkRows(50)
tdSql.query("select count(*) from db.ct1")
tdSql.checkData(0, 0, self.rows)
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 6b7e7375ffeca382bb77e4e8b8ab1704a83854f3..6f8183bf06cfa501f62c22c82c2915638ea7414b 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -51,32 +51,32 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create 4 topics")
sqlString = "create topic %s as database %s" %(topicNameList[0], paraDict['dbName'])
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
sqlString = "create topic %s as stable %s.%s" %(topicNameList[1], paraDict['dbName'], paraDict['stbName'])
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
+ tdSql.execute(sqlString)
queryString = "select * from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s " %(topicNameList[3], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query("show topics")
- tdLog.debug(tdSql.queryResult)
+ tdLog.debug(tdSql.queryResult)
rows = tdSql.getRows()
if rows != len(consumerIdList):
tdLog.exit("topic rows error")
-
+
for i in range (rows):
topicName = tdSql.getData(i,0)
matchFlag = 0
@@ -87,24 +87,24 @@ class TDTestCase:
break
if matchFlag == 0:
tdLog.exit("topic name: %s is error", topicName)
-
+
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 0
- ifManualCommit = 0
+ ifManualCommit = 0
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0]
tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
topicList = topicNameList[1]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1]
tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
topicList = topicNameList[2]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2]
tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
topicList = topicNameList[3]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3]
tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
@@ -118,27 +118,27 @@ class TDTestCase:
time.sleep(5)
tdLog.info("check show consumers")
tdSql.query("show consumers")
- # tdLog.info(tdSql.queryResult)
+ # tdLog.info(tdSql.queryResult)
rows = tdSql.getRows()
tdLog.info("show consumers rows: %d"%rows)
if rows != len(topicNameList):
tdLog.exit("show consumers rows error")
-
- tdLog.info("check show subscriptions")
+
+ tdLog.info("check show subscriptions")
tdSql.query("show subscriptions")
- # tdLog.debug(tdSql.queryResult)
+ # tdLog.debug(tdSql.queryResult)
rows = tdSql.getRows()
tdLog.info("show subscriptions rows: %d"%rows)
if rows != paraDict['vgroups'] * len(topicNameList):
tdLog.exit("show subscriptions rows error")
pThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = len(consumerIdList)
_ = tmqCom.selectConsumeResult(expectRows)
-
- time.sleep(10)
+
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
new file mode 100644
index 0000000000000000000000000000000000000000..00b0aed5ee6277c6dc5e79b3c0b112135f4c7ac6
--- /dev/null
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -0,0 +1,91 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def checkFileContent(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ srcFile = '%s/../log/tmq_taosx_tmp.source'%(cfgPath)
+ dstFile = '%s/../log/tmq_taosx_tmp.result'%(cfgPath)
+ tdLog.info("compare file: %s, %s"%(srcFile, dstFile))
+
+ consumeFile = open(srcFile, mode='r')
+ queryFile = open(dstFile, mode='r')
+
+ while True:
+ dst = queryFile.readline()
+ src = consumeFile.readline()
+
+ if dst:
+ if dst != src:
+ tdLog.exit("compare error: %s != %s"%src, dst)
+ else:
+ break
+
+ tdSql.execute('use db_taosx')
+ tdSql.query("select * from ct3 order by c1 desc")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 51)
+ tdSql.checkData(0, 4, 940)
+ tdSql.checkData(1, 1, 23)
+ tdSql.checkData(1, 4, None)
+
+ tdSql.query("select * from ct1")
+ tdSql.checkRows(4)
+
+ tdSql.query("select * from ct2")
+ tdSql.checkRows(0)
+
+ tdSql.query("select * from ct0 order by c1")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 3, "a")
+ tdSql.checkData(1, 4, None)
+
+ tdSql.query("select * from n1 order by cc3 desc")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, "eeee")
+ tdSql.checkData(1, 2, 940)
+
+ tdSql.query("select * from jt order by i desc")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 11)
+ tdSql.checkData(0, 2, None)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
+
+ return
+
+ def run(self):
+ tdSql.prepare()
+ self.checkFileContent()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 8c19a88ac65738859bcaeba62928048c943f9f7e..0f72eaa172f9f4102c6b6deb2bafab8cd2f56133 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -27,7 +27,7 @@ python3 ./test.py -f 1-insert/alter_stable.py
python3 ./test.py -f 1-insert/alter_table.py
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
python3 ./test.py -f 1-insert/table_comment.py
-#python3 ./test.py -f 1-insert/time_range_wise.py
+python3 ./test.py -f 1-insert/time_range_wise.py
python3 ./test.py -f 1-insert/block_wise.py
python3 ./test.py -f 1-insert/create_retentions.py
python3 ./test.py -f 1-insert/table_param_ttl.py
@@ -59,17 +59,44 @@ python3 ./test.py -f 2-query/ceil.py
python3 ./test.py -f 2-query/ceil.py -R
python3 ./test.py -f 2-query/char_length.py
python3 ./test.py -f 2-query/char_length.py -R
-#python3 ./test.py -f 2-query/check_tsdb.py
-#python3 ./test.py -f 2-query/check_tsdb.py -R
+# python3 ./test.py -f 2-query/check_tsdb.py
+# python3 ./test.py -f 2-query/check_tsdb.py -R
+python3 ./test.py -f 2-query/concat.py
+python3 ./test.py -f 2-query/concat.py -R
+python3 ./test.py -f 2-query/concat_ws.py
+python3 ./test.py -f 2-query/concat_ws.py -R
+python3 ./test.py -f 2-query/concat_ws2.py
+python3 ./test.py -f 2-query/concat_ws2.py -R
+python3 ./test.py -f 2-query/cos.py
+python3 ./test.py -f 2-query/cos.py -R
+python3 ./test.py -f 2-query/count_partition.py
+python3 ./test.py -f 2-query/count_partition.py -R
+python3 ./test.py -f 2-query/count.py
+python3 ./test.py -f 2-query/count.py -R
+python3 ./test.py -f 2-query/db.py
+python3 ./test.py -f 2-query/db.py -R
+python3 ./test.py -f 2-query/diff.py
+python3 ./test.py -f 2-query/diff.py -R
+python3 ./test.py -f 2-query/distinct.py
+python3 ./test.py -f 2-query/distinct.py -R
+python3 ./test.py -f 2-query/distribute_agg_apercentile.py
+python3 ./test.py -f 2-query/distribute_agg_apercentile.py -R
+python3 ./test.py -f 2-query/distribute_agg_avg.py
+python3 ./test.py -f 2-query/distribute_agg_avg.py -R
+python3 ./test.py -f 2-query/distribute_agg_count.py
+python3 ./test.py -f 2-query/distribute_agg_count.py -R
+python3 ./test.py -f 2-query/distribute_agg_max.py
+python3 ./test.py -f 2-query/distribute_agg_max.py -R
+python3 ./test.py -f 2-query/distribute_agg_min.py
+python3 ./test.py -f 2-query/distribute_agg_min.py -R
+
+
+
python3 ./test.py -f 1-insert/update_data.py
python3 ./test.py -f 1-insert/delete_data.py
-python3 ./test.py -f 2-query/db.py
-
-python3 ./test.py -f 2-query/db.py
-python3 ./test.py -f 2-query/distinct.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
python3 ./test.py -f 2-query/rtrim.py
@@ -81,10 +108,7 @@ python3 ./test.py -f 2-query/join2.py
python3 ./test.py -f 2-query/substr.py
python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
-python3 ./test.py -f 2-query/concat.py
python3 ./test.py -f 2-query/concat2.py
-python3 ./test.py -f 2-query/concat_ws.py
-python3 ./test.py -f 2-query/concat_ws2.py
python3 ./test.py -f 2-query/spread.py
python3 ./test.py -f 2-query/hyperloglog.py
python3 ./test.py -f 2-query/explain.py
@@ -97,13 +121,11 @@ python3 ./test.py -f 2-query/Now.py
python3 ./test.py -f 2-query/Today.py
python3 ./test.py -f 2-query/max.py
python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/count.py
python3 ./test.py -f 2-query/last.py
python3 ./test.py -f 2-query/first.py
python3 ./test.py -f 2-query/To_iso8601.py
python3 ./test.py -f 2-query/To_unixtimestamp.py
python3 ./test.py -f 2-query/timetruncate.py
-python3 ./test.py -f 2-query/diff.py
python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
@@ -115,7 +137,6 @@ python3 ./test.py -f 2-query/log.py
python3 ./test.py -f 2-query/pow.py
python3 ./test.py -f 2-query/sqrt.py
python3 ./test.py -f 2-query/sin.py
-python3 ./test.py -f 2-query/cos.py
python3 ./test.py -f 2-query/tan.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
@@ -126,7 +147,6 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
python3 ./test.py -f 2-query/mavg.py
-python3 ./test.py -f 2-query/diff.py
python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
python3 ./test.py -f 2-query/unique.py
@@ -135,17 +155,11 @@ python3 ./test.py -f 2-query/function_stateduration.py
python3 ./test.py -f 2-query/statecount.py
python3 ./test.py -f 2-query/tail.py
python3 ./test.py -f 2-query/ttl_comment.py
-python3 ./test.py -f 2-query/distribute_agg_count.py
-python3 ./test.py -f 2-query/distribute_agg_max.py
-python3 ./test.py -f 2-query/distribute_agg_min.py
python3 ./test.py -f 2-query/distribute_agg_sum.py
python3 ./test.py -f 2-query/distribute_agg_spread.py
-python3 ./test.py -f 2-query/distribute_agg_apercentile.py
-python3 ./test.py -f 2-query/distribute_agg_avg.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
-python3 ./test.py -f 2-query/count_partition.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 2-query/queryQnode.py
python3 ./test.py -f 2-query/max_partition.py
@@ -168,12 +182,12 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartMnodeInsertData.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartVnodeInsertData.py -N 5 -M 3
-python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
+python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
-python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
+python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py
python3 ./test.py -f 7-tmq/subscribeDb0.py
@@ -227,6 +241,7 @@ python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
+python3 ./test.py -f 7-tmq/tmq_taosx.py
# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
#------------querPolicy 2-----------
@@ -295,7 +310,6 @@ python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
python3 ./test.py -f 2-query/mavg.py -Q 2
-python3 ./test.py -f 2-query/diff.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@@ -383,7 +397,6 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
python3 ./test.py -f 2-query/mavg.py -Q 3
-python3 ./test.py -f 2-query/diff.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 5db97a0f0fc21486c642c5c4ea6b92c5edf184e9..605eef9be3bc15580858922c0001352fb3b8f079 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -1,6 +1,7 @@
add_executable(tmq_demo tmqDemo.c)
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
+add_executable(tmq_taosx_ci tmq_taosx_ci.c)
target_link_libraries(
create_table
PUBLIC taos_static
@@ -22,6 +23,13 @@ target_link_libraries(
PUBLIC common
PUBLIC os
)
+target_link_libraries(
+ tmq_taosx_ci
+ PUBLIC taos_static
+ PUBLIC util
+ PUBLIC common
+ PUBLIC os
+)
add_executable(sdbDump sdbDump.c)
target_link_libraries(
diff --git a/tests/test/c/tmq_taosx_ci.c b/tests/test/c/tmq_taosx_ci.c
new file mode 100644
index 0000000000000000000000000000000000000000..ece7ad4819f2947cb0a474491255dd296136581b
--- /dev/null
+++ b/tests/test/c/tmq_taosx_ci.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "taos.h"
+#include "types.h"
+
+static int running = 1;
+TdFilePtr g_fp = NULL;
+char dir[64]={0};
+
+static TAOS* use_db(){
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return NULL;
+ }
+
+ TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
+ return NULL;
+ }
+ taos_free_result(pRes);
+ return pConn;
+}
+
+static void msg_process(TAOS_RES* msg) {
+ /*memset(buf, 0, 1024);*/
+ printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
+ printf("db: %s\n", tmq_get_db_name(msg));
+ printf("vg: %d\n", tmq_get_vgroup_id(msg));
+ TAOS *pConn = use_db();
+ if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
+ char* result = tmq_get_json_meta(msg);
+ if (result) {
+ printf("meta result: %s\n", result);
+ }
+ taosFprintfFile(g_fp, result);
+ taosFprintfFile(g_fp, "\n");
+ tmq_free_json_meta(result);
+ }
+
+ tmq_raw_data raw = {0};
+ tmq_get_raw(msg, &raw);
+ int32_t ret = tmq_write_raw(pConn, raw);
+ printf("write raw data: %s\n", tmq_err2str(ret));
+
+// else{
+// while(1){
+// int numOfRows = 0;
+// void *pData = NULL;
+// taos_fetch_raw_block(msg, &numOfRows, &pData);
+// if(numOfRows == 0) break;
+// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
+// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
+// printf("write raw data: %s\n", tmq_err2str(ret));
+// }
+// }
+
+ taos_close(pConn);
+}
+
+int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop database if exists abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "use abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn,
+ "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
+ "nchar(8), t4 bool)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into ct3 select * from ct1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table n1 comment 'hello'");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "alter table n1 drop column c1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table jt2 using jt tags('')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into jt1 values(now, 1)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table jt1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into jt2 values(now, 11)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+int32_t create_topic() {
+ printf("create topic\n");
+ TAOS_RES* pRes;
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ pRes = taos_query(pConn, "use abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
+ printf("commit %d tmq %p param %p\n", code, tmq, param);
+}
+
+tmq_t* build_consumer() {
+#if 0
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ assert(pConn != NULL);
+
+ TAOS_RES* pRes = taos_query(pConn, "use abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db, reason:%s\n", taos_errstr(pRes));
+ }
+ taos_free_result(pRes);
+#endif
+
+ tmq_conf_t* conf = tmq_conf_new();
+ tmq_conf_set(conf, "group.id", "tg2");
+ tmq_conf_set(conf, "client.id", "my app 1");
+ tmq_conf_set(conf, "td.connect.user", "root");
+ tmq_conf_set(conf, "td.connect.pass", "taosdata");
+ tmq_conf_set(conf, "msg.with.table.name", "true");
+ tmq_conf_set(conf, "enable.auto.commit", "true");
+ tmq_conf_set(conf, "enable.heartbeat.background", "true");
+
+ /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
+
+ tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+ tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+ assert(tmq);
+ tmq_conf_destroy(conf);
+ return tmq;
+}
+
+tmq_list_t* build_topic_list() {
+ tmq_list_t* topic_list = tmq_list_new();
+ tmq_list_append(topic_list, "topic_ctb_column");
+ /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
+ return topic_list;
+}
+
+void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
+ int32_t code;
+
+ if ((code = tmq_subscribe(tmq, topics))) {
+ fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
+ printf("subscribe err\n");
+ return;
+ }
+ int32_t cnt = 0;
+ while (running) {
+ TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
+ if (tmqmessage) {
+ cnt++;
+ msg_process(tmqmessage);
+ /*if (cnt >= 2) break;*/
+ /*printf("get data\n");*/
+ taos_free_result(tmqmessage);
+ /*} else {*/
+ /*break;*/
+ /*tmq_commit_sync(tmq, NULL);*/
+ }else{
+ break;
+ }
+ }
+
+ code = tmq_consumer_close(tmq);
+ if (code)
+ fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
+ else
+ fprintf(stderr, "%% Consumer closed\n");
+}
+
+void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
+ static const int MIN_COMMIT_COUNT = 1;
+
+ int msg_count = 0;
+ int32_t code;
+
+ if ((code = tmq_subscribe(tmq, topics))) {
+ fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
+ return;
+ }
+
+ tmq_list_t* subList = NULL;
+ tmq_subscription(tmq, &subList);
+ char** subTopics = tmq_list_to_c_array(subList);
+ int32_t sz = tmq_list_get_size(subList);
+ printf("subscribed topics: ");
+ for (int32_t i = 0; i < sz; i++) {
+ printf("%s, ", subTopics[i]);
+ }
+ printf("\n");
+ tmq_list_destroy(subList);
+
+ while (running) {
+ TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
+ if (tmqmessage) {
+ msg_process(tmqmessage);
+ taos_free_result(tmqmessage);
+
+ /*tmq_commit_sync(tmq, NULL);*/
+ /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
+ }
+ }
+
+ code = tmq_consumer_close(tmq);
+ if (code)
+ fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
+ else
+ fprintf(stderr, "%% Consumer closed\n");
+}
+
+void initLogFile() {
+ char f1[256] = {0};
+ char f2[256] = {0};
+
+ sprintf(f1, "%s/../log/tmq_taosx_tmp.source", dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp.result", dir);
+ TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
+ if (NULL == pFile) {
+ fprintf(stderr, "Failed to open %s for save result\n", f1);
+ exit(-1);
+ }
+ g_fp = pFile;
+
+ TdFilePtr pFile2 = taosOpenFile(f2, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
+ if (NULL == pFile2) {
+ fprintf(stderr, "Failed to open %s for save result\n", f2);
+ exit(-1);
+ }
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
+ taosCloseFile(&pFile2);
+}
+
+int main(int argc, char* argv[]) {
+ if(argc == 3 && strcmp(argv[1], "-c") == 0) {
+ strcpy(dir, argv[2]);
+ }else{
+ strcpy(dir, "../../../sim/psim/cfg");
+ }
+
+ printf("env init\n");
+ initLogFile();
+
+ if (init_env() < 0) {
+ return -1;
+ }
+ create_topic();
+
+ tmq_t* tmq = build_consumer();
+ tmq_list_t* topic_list = build_topic_list();
+ basic_consume_loop(tmq, topic_list);
+ /*sync_consume_loop(tmq, topic_list);*/
+ taosCloseFile(&g_fp);
+}