提交 ad958177 编写于 作者: D dapan1121

Merge branch '3.0' into fix/TD-17147

...@@ -443,7 +443,7 @@ pipeline { ...@@ -443,7 +443,7 @@ pipeline {
} }
} }
} }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { /*catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 15, unit: 'MINUTES'){ timeout(time: 15, unit: 'MINUTES'){
script { script {
sh ''' sh '''
...@@ -455,7 +455,7 @@ pipeline { ...@@ -455,7 +455,7 @@ pipeline {
''' '''
} }
} }
} }*/
} }
} }
} }
......
...@@ -23,7 +23,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of ...@@ -23,7 +23,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of
- `metric` will be used as the STable name. - `metric` will be used as the STable name.
- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported. - `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
- `value` is a metric which must be a numeric value, the corresponding column name is "value". - `value` is a metric which must be a numeric value, the corresponding column name is "_value".
- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically. - The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically.
For example: For example:
...@@ -74,7 +74,7 @@ taos> show STables; ...@@ -74,7 +74,7 @@ taos> show STables;
Query OK, 2 row(s) in set (0.002544s) Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`; taos> select tbname, * from `meters.current`;
tbname | ts | value | groupid | location | tbname | _ts | _value | groupid | location |
================================================================================================================================== ==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles | t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles |
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles | t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles |
......
...@@ -91,7 +91,7 @@ taos> show STables; ...@@ -91,7 +91,7 @@ taos> show STables;
Query OK, 2 row(s) in set (0.001954s) Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`; taos> select * from `meters.current`;
ts | value | groupid | location | _ts | _value | groupid | location |
=================================================================================================================== ===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
......
...@@ -23,7 +23,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB ...@@ -23,7 +23,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
- metric 将作为超级表名。 - metric 将作为超级表名。
- timestamp 本行数据对应的时间戳。根据时间戳的长度自动识别时间精度。支持秒和毫秒两种时间精度 - timestamp 本行数据对应的时间戳。根据时间戳的长度自动识别时间精度。支持秒和毫秒两种时间精度
- value 度量值,必须为一个数值。对应的列名也是 “value”。 - value 度量值,必须为一个数值。对应的列名是 “_value”。
- 最后一部分是标签集, 用空格分隔不同标签, 所有标签自动转化为 nchar 数据类型; - 最后一部分是标签集, 用空格分隔不同标签, 所有标签自动转化为 nchar 数据类型;
例如: 例如:
...@@ -74,7 +74,7 @@ taos> show stables; ...@@ -74,7 +74,7 @@ taos> show stables;
Query OK, 2 row(s) in set (0.002544s) Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`; taos> select tbname, * from `meters.current`;
tbname | ts | value | groupid | location | tbname | _ts | _value | groupid | location |
================================================================================================================================== ==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles | t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles | t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles |
......
...@@ -91,7 +91,7 @@ taos> show stables; ...@@ -91,7 +91,7 @@ taos> show stables;
Query OK, 2 row(s) in set (0.001954s) Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`; taos> select * from `meters.current`;
ts | value | groupid | location | _ts | _value | groupid | location |
=================================================================================================================== ===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
......
--- ---
title: 配置参数 title: 配置参数
description: 'TDengine 客户端和服务配置列表' description: "TDengine 客户端和服务配置列表"
--- ---
## 为服务端指定配置文件 ## 为服务端指定配置文件
...@@ -21,8 +21,6 @@ taosd -C ...@@ -21,8 +21,6 @@ taosd -C
TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg,表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help` TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg,表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help`
**2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置**
```bash ```bash
taos -C taos -C
``` ```
...@@ -47,19 +45,19 @@ taos --dump-config ...@@ -47,19 +45,19 @@ taos --dump-config
### firstEp ### firstEp
| 属性 | 说明 | | 属性 | 说明 |
| -------- | --------------------------------------------------------------- | | -------- | -------------------------------------------------------------- |
| 适用范围 | 服务端和客户端均适用 | | 适用范围 | 服务端和客户端均适用 |
| 含义 | taosd 或者 taos 启动时,主动连接的集群中首个 dnode 的 endpoint | | 含义 | taosd 或者 taos 启动时,主动连接的集群中首个 dnode 的 endpoint |
| 缺省值 | localhost:6030 | | 缺省值 | localhost:6030 |
### secondEp ### secondEp
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------------------------------------------------------------------- | | -------- | ------------------------------------------------------------------------------------- |
| 适用范围 | 服务端和客户端均适用 | | 适用范围 | 服务端和客户端均适用 |
| 含义 | taosd 或者 taos 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint | | 含义 | taosd 或者 taos 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint |
| 缺省值 | 无 | | 缺省值 | 无 |
### fqdn ### fqdn
...@@ -77,7 +75,6 @@ taos --dump-config ...@@ -77,7 +75,6 @@ taos --dump-config
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | taosd 启动后,对外服务的端口号 | | 含义 | taosd 启动后,对外服务的端口号 |
| 缺省值 | 6030 | | 缺省值 | 6030 |
| 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 |
:::note :::note
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表) 确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
...@@ -87,8 +84,8 @@ taos --dump-config ...@@ -87,8 +84,8 @@ taos --dump-config
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 | | TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 | | TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | | TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化|
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | | UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | | TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
### maxShellConns ### maxShellConns
...@@ -104,28 +101,28 @@ taos --dump-config ...@@ -104,28 +101,28 @@ taos --dump-config
### monitor ### monitor
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 TaosKeeper 监控服务 | | 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 TaosKeeper 监控服务 |
| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 | | 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
| 缺省值 | 1 | | 缺省值 | 1 |
### monitorFqdn ### monitorFqdn
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------------------------- | | -------- | -------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的 FQDN | | 含义 | TaosKeeper 监控服务的 FQDN |
| 缺省值 | 无 | | 缺省值 | 无 |
### monitorPort ### monitorPort
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------------------------- | | -------- | --------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的端口号 | | 含义 | TaosKeeper 监控服务的端口号 |
| 缺省值 | 6043 | | 缺省值 | 6043 |
### monitorInterval ### monitorInterval
...@@ -134,10 +131,9 @@ taos --dump-config ...@@ -134,10 +131,9 @@ taos --dump-config
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | 监控数据库记录系统参数(CPU/内存)的时间间隔 | | 含义 | 监控数据库记录系统参数(CPU/内存)的时间间隔 |
| 单位 | 秒 | | 单位 | 秒 |
| 取值范围 | 1-200000 | | 取值范围 | 1-200000 |
| 缺省值 | 30 | | 缺省值 | 30 |
### telemetryReporting ### telemetryReporting
| 属性 | 说明 | | 属性 | 说明 |
...@@ -149,25 +145,43 @@ taos --dump-config ...@@ -149,25 +145,43 @@ taos --dump-config
## 查询相关 ## 查询相关
### queryBufferSize ### queryPolicy
| 属性 | 说明 |
| -------- | ----------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | 查询语句的执行策略 |
| 单位 | 无 |
| 缺省值 | 1 |
| 补充说明 | 1: 只使用 vnode,不使用 qnode |
2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
### querySmaOptimize
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | sma index 的优化策略 |
| 单位 | 无 |
| 缺省值 | 0 |
| 补充说明 |
0: 表示不使用 sma index,永远从原始数据进行查询
1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
| 属性 | 说明 |
| -------- | ------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 为所有并发查询占用保留的内存大小。 |
| 单位 | MB |
| 缺省值 | 无 |
| 补充说明 | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。<br/>(2.0.15 以前的版本中,此参数的单位是字节) |
### maxNumOfDistinctRes ### maxNumOfDistinctRes
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------------- | | -------- | -------------------------------- | --- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | 允许返回的 distinct 结果最大行数 | | 含义 | 允许返回的 distinct 结果最大行数 |
| 取值范围 | 默认值为 10 万,最大值 1 亿 | | 取值范围 | 默认值为 10 万,最大值 1 亿 |
| 缺省值 | 10 万 | | 缺省值 | 10 万 |
| 补充说明 | 2.3 版本新增。 | |
## 区域相关 ## 区域相关
...@@ -306,12 +320,12 @@ charset 的有效值是 UTF-8。 ...@@ -306,12 +320,12 @@ charset 的有效值是 UTF-8。
### supportVnodes ### supportVnodes
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ----------------------------------------------------------------------------------------------------------------------------------------- | | -------- | --------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | dnode 支持的最大 vnode 数目 | | 含义 | dnode 支持的最大 vnode 数目 |
| 取值范围 | 0-4096 | | 取值范围 | 0-4096 |
| 缺省值 | 256 | | 缺省值 | 256 |
## 时间相关 ## 时间相关
...@@ -366,7 +380,6 @@ charset 的有效值是 UTF-8。 ...@@ -366,7 +380,6 @@ charset 的有效值是 UTF-8。
| 单位 | bytes | | 单位 | bytes |
| 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | | 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 |
| 缺省值 | -1 | | 缺省值 | -1 |
| 补充说明 | 2.3.0.0 版本新增。 |
## 日志相关 ## 日志相关
...@@ -464,7 +477,7 @@ charset 的有效值是 UTF-8。 ...@@ -464,7 +477,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------- | | -------- | -------------------- |
| 适用范围 | 服务端和客户端均适用 | | 适用范围 | 服务端和客户端均适用 |
| 含义 | query 模块的日志开关 | | 含义 | query 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
...@@ -481,7 +494,7 @@ charset 的有效值是 UTF-8。 ...@@ -481,7 +494,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------- | | -------- | -------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | dnode 模块的日志开关 | | 含义 | dnode 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | 135 | | 缺省值 | 135 |
...@@ -490,29 +503,29 @@ charset 的有效值是 UTF-8。 ...@@ -490,29 +503,29 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------- | | -------- | -------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | vnode 模块的日志开关 | | 含义 | vnode 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### mDebugFlag ### mDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | mnode 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | 135 |
### wDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ------------------ | | -------- | ------------------ |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | mnode 模块的日志开关 | | 含义 | wal 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | 135 | | 缺省值 | 135 |
### wDebugFlag
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | wal 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | 135 |
### sDebugFlag ### sDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
...@@ -533,57 +546,86 @@ charset 的有效值是 UTF-8。 ...@@ -533,57 +546,86 @@ charset 的有效值是 UTF-8。
### tqDebugFlag ### tqDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ------------------- | | -------- | ----------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | tq 模块的日志开关 | | 含义 | tq 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### fsDebugFlag ### fsDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ------------------- | | -------- | ----------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | fs 模块的日志开关 | | 含义 | fs 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### udfDebugFlag ### udfDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ---------------------- | | -------- | ------------------ |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | UDF 模块的日志开关 | | 含义 | UDF 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### smaDebugFlag ### smaDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ---------------------- | | -------- | ------------------ |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | sma 模块的日志开关 | | 含义 | sma 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### idxDebugFlag ### idxDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ---------------------- | | -------- | -------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | index 模块的日志开关 | | 含义 | index 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
### tdbDebugFlag ### tdbDebugFlag
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ---------------------- | | -------- | ------------------ |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | tdb 模块的日志开关 | | 含义 | tdb 模块的日志开关 |
| 取值范围 | 同上 | | 取值范围 | 同上 |
| 缺省值 | | | 缺省值 | |
## Schemaless 相关
### smlChildTableName
| 属性 | 说明 |
| -------- | ------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless 自定义的子表名 |
| 类型 | 字符串 |
| 缺省值 | 无 |
### smlTagName
| 属性 | 说明 |
| -------- | ------------------------------------ |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless tag 为空时默认的 tag 名字 |
| 类型 | 字符串 |
| 缺省值 | _tag_null |
### smlDataFormat
| 属性 | 说明 |
| -------- | ----------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless 列数据是否顺序一致 |
| 值域 | 0:不一致;1: 一致 |
| 缺省值 | 1 |
## 其他 ## 其他
...@@ -596,3 +638,12 @@ charset 的有效值是 UTF-8。 ...@@ -596,3 +638,12 @@ charset 的有效值是 UTF-8。
| 取值范围 | 0:否,1:是 | | 取值范围 | 0:否,1:是 |
| 缺省值 | 1 | | 缺省值 | 1 |
| 补充说明 | 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 | | 补充说明 | 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
### udf
| 属性 | 说明 |
| -------- | ------------------ |
| 适用范围 | 仅服务端适用 |
| 含义 | 是否启动 udf 服务 |
| 取值范围 | 0: 不启动;1:启动 |
| 缺省值 | 1 |
此差异已折叠。
...@@ -89,7 +89,6 @@ extern uint16_t tsTelemPort; ...@@ -89,7 +89,6 @@ extern uint16_t tsTelemPort;
// query buffer management // query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
extern bool tsRetrieveBlockingModel; // retrieve threads will be blocked
// query client // query client
extern int32_t tsQueryPolicy; extern int32_t tsQueryPolicy;
......
...@@ -225,9 +225,9 @@ typedef struct SRequestObj { ...@@ -225,9 +225,9 @@ typedef struct SRequestObj {
SArray* targetTableList; SArray* targetTableList;
SQueryExecMetric metric; SQueryExecMetric metric;
SRequestSendRecvBody body; SRequestSendRecvBody body;
bool syncQuery; // todo refactor: async query object bool syncQuery; // todo refactor: async query object
bool stableQuery; // todo refactor bool stableQuery; // todo refactor
bool validateOnly; // todo refactor bool validateOnly; // todo refactor
bool killed; bool killed;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry; uint32_t retry;
......
...@@ -591,7 +591,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray ...@@ -591,7 +591,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
return code; return code;
} }
void freeVgList(void *list) { void freeVgList(void* list) {
SArray* pList = *(SArray**)list; SArray* pList = *(SArray**)list;
taosArrayDestroy(pList); taosArrayDestroy(pList);
} }
...@@ -1278,8 +1278,8 @@ int32_t doProcessMsgFromServer(void* param) { ...@@ -1278,8 +1278,8 @@ int32_t doProcessMsgFromServer(void* param) {
char tbuf[40] = {0}; char tbuf[40] = {0};
TRACE_TO_STR(trace, tbuf); TRACE_TO_STR(trace, tbuf);
tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle,
tbuf); TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), tbuf);
if (pSendInfo->requestObjRefId != 0) { if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
...@@ -2114,7 +2114,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) { ...@@ -2114,7 +2114,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
return NULL; return NULL;
} }
TAOS_RES* pRes = execQuery(connId, sql, sqlLen, validateOnly); TAOS_RES* pRes = execQuery(*(int64_t*)taos, sql, sqlLen, validateOnly);
return pRes; return pRes;
#endif #endif
} }
...@@ -124,9 +124,6 @@ int32_t tsMinIntervalTime = 1; ...@@ -124,9 +124,6 @@ int32_t tsMinIntervalTime = 1;
int32_t tsQueryBufferSize = -1; int32_t tsQueryBufferSize = -1;
int64_t tsQueryBufferSizeBytes = -1; int64_t tsQueryBufferSizeBytes = -1;
// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing.
bool tsRetrieveBlockingModel = false;
// tsdb config // tsdb config
// For backward compatibility // For backward compatibility
bool tsdbForceKeepFile = false; bool tsdbForceKeepFile = false;
...@@ -296,6 +293,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { ...@@ -296,6 +293,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "metaDebugFlag", metaDebugFlag, 0, 255, 0) != 0) return -1;
return 0; return 0;
} }
...@@ -362,7 +360,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { ...@@ -362,7 +360,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "retrieveBlockingModel", tsRetrieveBlockingModel, 0) != 0) return -1;
if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, 0) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1; if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
...@@ -476,6 +473,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) { ...@@ -476,6 +473,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32; smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32; idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32; tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32;
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
} }
static int32_t taosSetClientCfg(SConfig *pCfg) { static int32_t taosSetClientCfg(SConfig *pCfg) {
...@@ -547,7 +545,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { ...@@ -547,7 +545,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32; tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32; tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32; tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32;
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval; tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval; tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
...@@ -832,9 +829,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { ...@@ -832,9 +829,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break; break;
} }
case 'r': { case 'r': {
if (strcasecmp("retrieveBlockingModel", name) == 0) { if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
} else if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64; tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
} else if (strcasecmp("rpcDebugFlag", name) == 0) { } else if (strcasecmp("rpcDebugFlag", name) == 0) {
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32; rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
...@@ -1100,12 +1095,12 @@ void taosCfgDynamicOptions(const char *option, const char *value) { ...@@ -1100,12 +1095,12 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
const char *options[] = { const char *options[] = {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
"tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
}; };
int32_t *optionVars[] = { int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
&tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
}; };
int32_t optionSize = tListLen(options); int32_t optionSize = tListLen(options);
...@@ -1152,5 +1147,6 @@ void taosSetAllDebugFlag(int32_t flag) { ...@@ -1152,5 +1147,6 @@ void taosSetAllDebugFlag(int32_t flag) {
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag); taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag); taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag); taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
uInfo("all debug flag are set to %d", flag); uInfo("all debug flag are set to %d", flag);
} }
...@@ -788,9 +788,9 @@ _OVER: ...@@ -788,9 +788,9 @@ _OVER:
static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
const char *options[] = { const char *options[] = {
"debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag",
"tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
}; };
int32_t optionSize = tListLen(options); int32_t optionSize = tListLen(options);
......
...@@ -145,6 +145,8 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr ...@@ -145,6 +145,8 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
pWriter->sver = sver; pWriter->sver = sver;
pWriter->ever = ever; pWriter->ever = ever;
metaBegin(pMeta);
*ppWriter = pWriter; *ppWriter = pWriter;
return code; return code;
......
...@@ -175,7 +175,7 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST ...@@ -175,7 +175,7 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
cacheRow = (STSRow *)taosLRUCacheValue(pCache, h); cacheRow = (STSRow *)taosLRUCacheValue(pCache, h);
if (row->ts >= cacheRow->ts) { if (row->ts >= cacheRow->ts) {
if (row->ts == cacheRow->ts) { if (row->ts == cacheRow->ts) {
STSRow *mergedRow; STSRow *mergedRow = NULL;
SRowMerger merger = {0}; SRowMerger merger = {0};
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1); STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
......
...@@ -374,11 +374,12 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid ...@@ -374,11 +374,12 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid
p = taosArrayInsert(pMemTable->aTbData, idx, &pTbData); p = taosArrayInsert(pMemTable->aTbData, idx, &pTbData);
taosWUnLockLatch(&pMemTable->latch); taosWUnLockLatch(&pMemTable->latch);
tsdbDebug("vgId:%d add table data %p at idx:%d", TD_VID(pMemTable->pTsdb->pVnode), pTbData, idx);
if (p == NULL) { if (p == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY;
goto _err; goto _err;
} }
_exit: _exit:
*ppTbData = pTbData; *ppTbData = pTbData;
return code; return code;
......
...@@ -694,8 +694,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { ...@@ -694,8 +694,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue; if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue;
_write_block: _write_block:
code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdx, &pWriter->blockW, code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
pWriter->cmprAlg); &pWriter->blockW, pWriter->cmprAlg);
if (code) goto _err; if (code) goto _err;
code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock); code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
...@@ -756,7 +756,7 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) { ...@@ -756,7 +756,7 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) { if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
ASSERT(pWriter->pDataFReader); ASSERT(pWriter->pDataFReader);
SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlock); SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
int32_t c = tTABLEIDCmprFn(pBlockIdx, &id); int32_t c = tTABLEIDCmprFn(pBlockIdx, &id);
ASSERT(c >= 0); ASSERT(c >= 0);
...@@ -833,7 +833,7 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) { ...@@ -833,7 +833,7 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
} }
_exit: _exit:
tsdbError("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode)); tsdbInfo("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode));
return code; return code;
_err: _err:
......
...@@ -288,7 +288,7 @@ int vnodeCommit(SVnode *pVnode) { ...@@ -288,7 +288,7 @@ int vnodeCommit(SVnode *pVnode) {
// apply the commit (TODO) // apply the commit (TODO)
walEndSnapshot(pVnode->pWal); walEndSnapshot(pVnode->pWal);
vInfo("vgId:%d, commit over", TD_VID(pVnode)); vInfo("vgId:%d, commit end", TD_VID(pVnode));
return 0; return 0;
} }
......
...@@ -180,6 +180,7 @@ struct SVSnapWriter { ...@@ -180,6 +180,7 @@ struct SVSnapWriter {
SVnode *pVnode; SVnode *pVnode;
int64_t sver; int64_t sver;
int64_t ever; int64_t ever;
int64_t commitID;
int64_t index; int64_t index;
// meta // meta
SMetaSnapWriter *pMetaSnapWriter; SMetaSnapWriter *pMetaSnapWriter;
...@@ -201,7 +202,16 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr ...@@ -201,7 +202,16 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
pWriter->sver = sver; pWriter->sver = sver;
pWriter->ever = ever; pWriter->ever = ever;
vInfo("vgId:%d vnode snapshot writer opened", TD_VID(pVnode)); // commit it
code = vnodeCommit(pVnode);
if (code) goto _err;
// inc commit ID
pVnode->state.commitID++;
pWriter->commitID = pVnode->state.commitID;
vInfo("vgId:%d vnode snapshot writer opened, sver:%" PRId64 " ever:%" PRId64 " commit id:%" PRId64, TD_VID(pVnode),
sver, ever, pWriter->commitID);
*ppWriter = pWriter; *ppWriter = pWriter;
return code; return code;
...@@ -244,6 +254,8 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot * ...@@ -244,6 +254,8 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
code = vnodeCommitInfo(dir, &info); code = vnodeCommitInfo(dir, &info);
if (code) goto _err; if (code) goto _err;
vnodeBegin(pVnode);
} else { } else {
ASSERT(0); ASSERT(0);
} }
......
...@@ -251,15 +251,29 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo ...@@ -251,15 +251,29 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
} }
} }
bool assignUid = false;
if (LIST_LENGTH(pScanInfo->pGroupTags) > 0) {
SNode* p = nodesListGetNode(pScanInfo->pGroupTags, 0);
if (p->type == QUERY_NODE_FUNCTION) {
// partition by tbname/group by tbname
assignUid = (strcmp(((struct SFunctionNode*)p)->functionName, "tbname") == 0);
}
}
for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) { for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
uint64_t* uid = taosArrayGet(qa, i); uint64_t* uid = taosArrayGet(qa, i);
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0}; STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
if (bufLen > 0) { if (bufLen > 0) {
code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf, if (assignUid) {
&keyInfo.groupId); keyInfo.groupId = keyInfo.uid;
if (code != TSDB_CODE_SUCCESS) { } else {
return code; code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
&keyInfo.groupId);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} }
} }
......
...@@ -3140,6 +3140,26 @@ void destroyStreamAggSupporter(SStreamAggSupporter* pSup) { ...@@ -3140,6 +3140,26 @@ void destroyStreamAggSupporter(SStreamAggSupporter* pSup) {
blockDataDestroy(pSup->pScanBlock); blockDataDestroy(pSup->pScanBlock);
} }
void destroyStateWinInfo(void* ptr) {
if (ptr == NULL) {
return;
}
SStateWindowInfo* pWin = (SStateWindowInfo*) ptr;
taosMemoryFreeClear(pWin->stateKey.pData);
}
void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) {
taosMemoryFreeClear(pSup->pKeyBuf);
void** pIte = NULL;
while ((pIte = taosHashIterate(pSup->pResultRows, pIte)) != NULL) {
SArray* pWins = (SArray*)(*pIte);
taosArrayDestroyEx(pWins, (FDelete)destroyStateWinInfo);
}
taosHashCleanup(pSup->pResultRows);
destroyDiskbasedBuf(pSup->pResultBuf);
blockDataDestroy(pSup->pScanBlock);
}
void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param; SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo); cleanupBasicInfo(&pInfo->binfo);
...@@ -3607,12 +3627,17 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData ...@@ -3607,12 +3627,17 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
} }
} }
void deleteWindow(SArray* pWinInfos, int32_t index) { void deleteWindow(SArray* pWinInfos, int32_t index, FDelete fp) {
ASSERT(index >= 0 && index < taosArrayGetSize(pWinInfos)); ASSERT(index >= 0 && index < taosArrayGetSize(pWinInfos));
if (fp) {
void* ptr = taosArrayGet(pWinInfos, index);
fp(ptr);
}
taosArrayRemove(pWinInfos, index); taosArrayRemove(pWinInfos, index);
} }
static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int64_t gap, SArray* result) { static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int64_t gap,
SArray* result, FDelete fp) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData; TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
...@@ -3626,7 +3651,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc ...@@ -3626,7 +3651,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
if (!pCurWin) { if (!pCurWin) {
break; break;
} }
deleteWindow(pAggSup->pCurWins, winIndex); deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) { if (result) {
taosArrayPush(result, pCurWin); taosArrayPush(result, pCurWin);
} }
...@@ -3751,7 +3776,7 @@ SResultWindowInfo* getResWinForSession(void* pData) { return (SResultWindowInfo* ...@@ -3751,7 +3776,7 @@ SResultWindowInfo* getResWinForSession(void* pData) { return (SResultWindowInfo*
SResultWindowInfo* getResWinForState(void* pData) { return &((SStateWindowInfo*)pData)->winInfo; } SResultWindowInfo* getResWinForState(void* pData) { return &((SStateWindowInfo*)pData)->winInfo; }
int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArray* pClosed, __get_win_info_ fn, int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArray* pClosed, __get_win_info_ fn,
bool delete) { bool delete, FDelete fp) {
// Todo(liuyao) save window to tdb // Todo(liuyao) save window to tdb
void** pIte = NULL; void** pIte = NULL;
size_t keyLen = 0; size_t keyLen = 0;
...@@ -3773,7 +3798,7 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra ...@@ -3773,7 +3798,7 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra
pSeWin->isOutput = true; pSeWin->isOutput = true;
} }
if (delete) { if (delete) {
deleteWindow(pWins, i); deleteWindow(pWins, i, fp);
i--; i--;
size = taosArrayGetSize(pWins); size = taosArrayGetSize(pWins);
} }
...@@ -3786,13 +3811,13 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra ...@@ -3786,13 +3811,13 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void closeChildSessionWindow(SArray* pChildren, TSKEY maxTs, bool delete) { static void closeChildSessionWindow(SArray* pChildren, TSKEY maxTs, bool delete, FDelete fp) {
int32_t size = taosArrayGetSize(pChildren); int32_t size = taosArrayGetSize(pChildren);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pChildren, i); SOperatorInfo* pChildOp = taosArrayGetP(pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChildOp->info; SStreamSessionAggOperatorInfo* pChInfo = pChildOp->info;
pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs); pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs);
closeSessionWindow(pChInfo->streamAggSup.pResultRows, &pChInfo->twAggSup, NULL, getResWinForSession, delete); closeSessionWindow(pChInfo->streamAggSup.pResultRows, &pChInfo->twAggSup, NULL, getResWinForSession, delete, fp);
} }
} }
...@@ -3870,13 +3895,13 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { ...@@ -3870,13 +3895,13 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) { } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
// gap must be 0 // gap must be 0
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins); doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, NULL);
if (IS_FINAL_OP(pInfo)) { if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock); int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
// gap must be 0 // gap must be 0
doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL); doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL, NULL);
rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator); rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
} }
copyDeleteWindowInfo(pWins, pInfo->pStDeleted); copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
...@@ -3918,8 +3943,8 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { ...@@ -3918,8 +3943,8 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForSession, closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForSession,
pInfo->ignoreExpiredData); pInfo->ignoreExpiredData, NULL);
closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData); closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, NULL);
copyUpdateResult(pStUpdated, pUpdated); copyUpdateResult(pStUpdated, pUpdated);
taosHashCleanup(pStUpdated); taosHashCleanup(pStUpdated);
...@@ -4014,7 +4039,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { ...@@ -4014,7 +4039,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
break; break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) { } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
// gap must be 0 // gap must be 0
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL); doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL, NULL);
copyDataBlock(pInfo->pDelRes, pBlock); copyDataBlock(pInfo->pDelRes, pBlock);
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
break; break;
...@@ -4120,7 +4145,7 @@ _error: ...@@ -4120,7 +4145,7 @@ _error:
void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param; SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo); cleanupBasicInfo(&pInfo->binfo);
destroyStreamAggSupporter(&pInfo->streamAggSup); destroyStateStreamAggSupporter(&pInfo->streamAggSup);
cleanupGroupResInfo(&pInfo->groupResInfo); cleanupGroupResInfo(&pInfo->groupResInfo);
if (pInfo->pChildren != NULL) { if (pInfo->pChildren != NULL) {
int32_t size = taosArrayGetSize(pInfo->pChildren); int32_t size = taosArrayGetSize(pInfo->pChildren);
...@@ -4132,6 +4157,10 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { ...@@ -4132,6 +4157,10 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(pChInfo); taosMemoryFreeClear(pChInfo);
} }
} }
colDataDestroy(&pInfo->twAggSup.timeWindowData);
blockDataDestroy(pInfo->pDelRes);
taosHashCleanup(pInfo->pSeDeleted);
destroySqlFunctionCtx(pInfo->pDummyCtx, 0);
taosMemoryFreeClear(param); taosMemoryFreeClear(param);
} }
...@@ -4314,7 +4343,7 @@ static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc ...@@ -4314,7 +4343,7 @@ static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
pSeDeleted); pSeDeleted);
ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData)); ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData));
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
} }
} }
...@@ -4357,7 +4386,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl ...@@ -4357,7 +4386,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
&pSDataBlock->info.groupId); &pSDataBlock->info.groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
continue; continue;
} }
code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator); code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator);
...@@ -4415,7 +4444,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { ...@@ -4415,7 +4444,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
continue; continue;
} else if (pBlock->info.type == STREAM_DELETE_DATA) { } else if (pBlock->info.type == STREAM_DELETE_DATA) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins); doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, destroyStateWinInfo);
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted); copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
taosArrayDestroy(pWins); taosArrayDestroy(pWins);
continue; continue;
...@@ -4437,8 +4466,8 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { ...@@ -4437,8 +4466,8 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForState, closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForState,
pInfo->ignoreExpiredData); pInfo->ignoreExpiredData, destroyStateWinInfo);
closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData); // closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, destroyStateWinInfo);
copyUpdateResult(pSeUpdated, pUpdated); copyUpdateResult(pSeUpdated, pUpdated);
taosHashCleanup(pSeUpdated); taosHashCleanup(pSeUpdated);
......
...@@ -2068,7 +2068,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2068,7 +2068,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "apercentile", .name = "apercentile",
.type = FUNCTION_TYPE_APERCENTILE, .type = FUNCTION_TYPE_APERCENTILE,
.classification = FUNC_MGT_AGG_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateApercentile, .translateFunc = translateApercentile,
.getEnvFunc = getApercentileFuncEnv, .getEnvFunc = getApercentileFuncEnv,
.initFunc = apercentileFunctionSetup, .initFunc = apercentileFunctionSetup,
...@@ -2083,7 +2083,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2083,7 +2083,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
}, },
{ {
.name = "_apercentile_partial", .name = "_apercentile_partial",
.type = FUNCTION_TYPE_APERCENTILE_PARTIAL, .type = FUNCTION_TYPE_APERCENTILE_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC, .classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentilePartial, .translateFunc = translateApercentilePartial,
.getEnvFunc = getApercentileFuncEnv, .getEnvFunc = getApercentileFuncEnv,
...@@ -2096,7 +2096,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2096,7 +2096,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "_apercentile_merge", .name = "_apercentile_merge",
.type = FUNCTION_TYPE_APERCENTILE_MERGE, .type = FUNCTION_TYPE_APERCENTILE_MERGE,
.classification = FUNC_MGT_AGG_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateApercentileMerge, .translateFunc = translateApercentileMerge,
.getEnvFunc = getApercentileFuncEnv, .getEnvFunc = getApercentileFuncEnv,
.initFunc = apercentileFunctionSetup, .initFunc = apercentileFunctionSetup,
...@@ -2358,7 +2358,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2358,7 +2358,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "histogram", .name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM, .type = FUNCTION_TYPE_HISTOGRAM,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogram, .translateFunc = translateHistogram,
.getEnvFunc = getHistogramFuncEnv, .getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup, .initFunc = histogramFunctionSetup,
...@@ -2373,7 +2373,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2373,7 +2373,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "_histogram_partial", .name = "_histogram_partial",
.type = FUNCTION_TYPE_HISTOGRAM_PARTIAL, .type = FUNCTION_TYPE_HISTOGRAM_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramPartial, .translateFunc = translateHistogramPartial,
.getEnvFunc = getHistogramFuncEnv, .getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup, .initFunc = histogramFunctionSetup,
...@@ -2385,7 +2385,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -2385,7 +2385,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "_histogram_merge", .name = "_histogram_merge",
.type = FUNCTION_TYPE_HISTOGRAM_MERGE, .type = FUNCTION_TYPE_HISTOGRAM_MERGE,
.classification = FUNC_MGT_AGG_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramMerge, .translateFunc = translateHistogramMerge,
.getEnvFunc = getHistogramFuncEnv, .getEnvFunc = getHistogramFuncEnv,
.initFunc = functionSetup, .initFunc = functionSetup,
......
...@@ -34,6 +34,7 @@ typedef struct STranslateContext { ...@@ -34,6 +34,7 @@ typedef struct STranslateContext {
SMsgBuf msgBuf; SMsgBuf msgBuf;
SArray* pNsLevel; // element is SArray*, the element of this subarray is STableNode* SArray* pNsLevel; // element is SArray*, the element of this subarray is STableNode*
int32_t currLevel; int32_t currLevel;
int32_t levelNo;
ESqlClause currClause; ESqlClause currClause;
SNode* pCurrStmt; SNode* pCurrStmt;
SCmdMsgInfo* pCmdMsg; SCmdMsgInfo* pCmdMsg;
...@@ -354,6 +355,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* p ...@@ -354,6 +355,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* p
pCxt->msgBuf.len = pParseCxt->msgLen; pCxt->msgBuf.len = pParseCxt->msgLen;
pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES); pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
pCxt->currLevel = 0; pCxt->currLevel = 0;
pCxt->levelNo = 0;
pCxt->currClause = 0; pCxt->currClause = 0;
pCxt->pMetaCache = pMetaCache; pCxt->pMetaCache = pMetaCache;
pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
...@@ -4960,13 +4962,14 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) { ...@@ -4960,13 +4962,14 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
} }
static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode) { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode) {
++(pCxt->currLevel);
ESqlClause currClause = pCxt->currClause; ESqlClause currClause = pCxt->currClause;
SNode* pCurrStmt = pCxt->pCurrStmt; SNode* pCurrStmt = pCxt->pCurrStmt;
int32_t currLevel = pCxt->currLevel;
pCxt->currLevel = ++(pCxt->levelNo);
int32_t code = translateQuery(pCxt, pNode); int32_t code = translateQuery(pCxt, pNode);
--(pCxt->currLevel);
pCxt->currClause = currClause; pCxt->currClause = currClause;
pCxt->pCurrStmt = pCurrStmt; pCxt->pCurrStmt = pCurrStmt;
pCxt->currLevel = currLevel;
return code; return code;
} }
......
...@@ -513,7 +513,7 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3 ...@@ -513,7 +513,7 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
msgSendInfo->paramFreeFp = taosMemoryFree; msgSendInfo->paramFreeFp = taosMemoryFree;
SCH_ERR_JRET(schMakeCallbackParam(pJob, pTask, msgType, isHb, trans, &msgSendInfo->param)); SCH_ERR_JRET(schMakeCallbackParam(pJob, pTask, msgType, isHb, trans, &msgSendInfo->param));
SCH_ERR_JRET(schGetCallbackFp(msgType, &msgSendInfo->fp)); SCH_ERR_JRET(schGetCallbackFp(msgType, &msgSendInfo->fp));
...@@ -541,7 +541,7 @@ _return: ...@@ -541,7 +541,7 @@ _return:
} }
taosMemoryFree(msg); taosMemoryFree(msg);
SCH_RET(code); SCH_RET(code);
} }
...@@ -681,7 +681,7 @@ int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { ...@@ -681,7 +681,7 @@ int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
param->pTrans = pJob->conn.pTrans; param->pTrans = pJob->conn.pTrans;
pMsgSendInfo->param = param; pMsgSendInfo->param = param;
pMsgSendInfo->paramFreeFp = taosMemoryFree; pMsgSendInfo->paramFreeFp = taosMemoryFree;
pMsgSendInfo->fp = fp; pMsgSendInfo->fp = fp;
SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo}; SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo};
...@@ -801,7 +801,7 @@ int32_t schCloneSMsgSendInfo(void *src, void **dst) { ...@@ -801,7 +801,7 @@ int32_t schCloneSMsgSendInfo(void *src, void **dst) {
pDst->param = NULL; pDst->param = NULL;
SCH_ERR_JRET(schCloneCallbackParam(pSrc->param, (SSchCallbackParamHeader **)&pDst->param)); SCH_ERR_JRET(schCloneCallbackParam(pSrc->param, (SSchCallbackParamHeader **)&pDst->param));
pDst->paramFreeFp = taosMemoryFree; pDst->paramFreeFp = taosMemoryFree;
*dst = pDst; *dst = pDst;
...@@ -1094,14 +1094,29 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, ...@@ -1094,14 +1094,29 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
break; break;
} }
#if 1
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
msg = NULL; msg = NULL;
SCH_ERR_JRET(code); SCH_ERR_JRET(code);
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) { if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId)); SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
} }
#else
if (TDMT_VND_SUBMIT != msgType) {
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
msg = NULL;
SCH_ERR_JRET(code);
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
}
} else {
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
}
#endif
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
......
...@@ -82,7 +82,7 @@ int64_t tsNumOfTraceLogs = 0; ...@@ -82,7 +82,7 @@ int64_t tsNumOfTraceLogs = 0;
// log // log
int32_t dDebugFlag = 135; int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135; int32_t vDebugFlag = 135;
int32_t mDebugFlag = 131; int32_t mDebugFlag = 135;
int32_t cDebugFlag = 131; int32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131; int32_t jniDebugFlag = 131;
int32_t tmrDebugFlag = 131; int32_t tmrDebugFlag = 131;
......
...@@ -489,7 +489,7 @@ class TDDnode: ...@@ -489,7 +489,7 @@ class TDDnode:
onlyKillOnceWindows = 0 onlyKillOnceWindows = 0
while(processID): while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -4 %s > /dev/null 2>&1" % processID killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd) os.system(killCmd)
onlyKillOnceWindows = 1 onlyKillOnceWindows = 1
time.sleep(1) time.sleep(1)
...@@ -503,7 +503,7 @@ class TDDnode: ...@@ -503,7 +503,7 @@ class TDDnode:
time.sleep(2) time.sleep(2)
self.running = 0 self.running = 0
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index)) tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def stoptaosd(self): def stoptaosd(self):
...@@ -527,7 +527,7 @@ class TDDnode: ...@@ -527,7 +527,7 @@ class TDDnode:
onlyKillOnceWindows = 0 onlyKillOnceWindows = 0
while(processID): while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -4 %s > /dev/null 2>&1" % processID killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd) os.system(killCmd)
onlyKillOnceWindows = 1 onlyKillOnceWindows = 1
time.sleep(1) time.sleep(1)
...@@ -537,7 +537,7 @@ class TDDnode: ...@@ -537,7 +537,7 @@ class TDDnode:
time.sleep(2) time.sleep(2)
self.running = 0 self.running = 0
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index)) tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self): def forcestop(self):
if (not self.remoteIP == ""): if (not self.remoteIP == ""):
......
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
# TD-17738 ./test.sh -f tsim/parser/col_arithmetic_operation.sim # TD-17738 ./test.sh -f tsim/parser/col_arithmetic_operation.sim
# TD-17661 ./test.sh -f tsim/parser/columnValue.sim # TD-17661 ./test.sh -f tsim/parser/columnValue.sim
./test.sh -f tsim/parser/commit.sim ./test.sh -f tsim/parser/commit.sim
# TD-17661 ./test.sh -f tsim/parser/condition.sim ./test.sh -f tsim/parser/condition.sim
./test.sh -f tsim/parser/constCol.sim ./test.sh -f tsim/parser/constCol.sim
#./test.sh -f tsim/parser/create_db.sim #./test.sh -f tsim/parser/create_db.sim
./test.sh -f tsim/parser/create_mt.sim ./test.sh -f tsim/parser/create_mt.sim
......
...@@ -24,14 +24,20 @@ sql alter dnode 1 'fsDebugFlag 131' ...@@ -24,14 +24,20 @@ sql alter dnode 1 'fsDebugFlag 131'
sql alter dnode 1 'udfDebugFlag 131' sql alter dnode 1 'udfDebugFlag 131'
sql alter dnode 1 'smaDebugFlag 131' sql alter dnode 1 'smaDebugFlag 131'
sql alter dnode 1 'idxDebugFlag 131' sql alter dnode 1 'idxDebugFlag 131'
sql alter dnode 1 'tdbDebugFlag 131'
sql alter dnode 1 'tmrDebugFlag 131' sql alter dnode 1 'tmrDebugFlag 131'
sql alter dnode 1 'uDebugFlag 131' sql alter dnode 1 'uDebugFlag 131'
sql alter dnode 1 'smaDebugFlag 131' sql alter dnode 1 'smaDebugFlag 131'
sql alter dnode 1 'rpcDebugFlag 131' sql alter dnode 1 'rpcDebugFlag 131'
sql alter dnode 1 'qDebugFlag 131' sql alter dnode 1 'qDebugFlag 131'
sql alter dnode 1 'metaDebugFlag 131'
sql_error alter dnode 2 'wDebugFlag 135' sql_error alter dnode 2 'wDebugFlag 135'
sql_error alter dnode 2 'tmrDebugFlag 135' sql_error alter dnode 2 'tmrDebugFlag 135'
sql_error alter dnode 1 'monDebugFlag 131'
sql_error alter dnode 1 'cqDebugFlag 131'
sql_error alter dnode 1 'httpDebugFlag 131'
sql_error alter dnode 1 'mqttDebugFlag 131'
print ======== step3 print ======== step3
sql_error alter $hostname1 debugFlag 135 sql_error alter $hostname1 debugFlag 135
......
...@@ -135,6 +135,7 @@ print ===================> nest query interval ...@@ -135,6 +135,7 @@ print ===================> nest query interval
sql_error select ts, avg(c1) from (select ts, c1 from nest_tb0); sql_error select ts, avg(c1) from (select ts, c1 from nest_tb0);
sql select _wstart, avg(c1) from (select * from nest_tb0) interval(3d) sql select _wstart, avg(c1) from (select * from nest_tb0) interval(3d)
print $data00 $data01 $data10 $data11 $data20 $data21
if $rows != 3 then if $rows != 3 then
return -1 return -1
endi endi
...@@ -147,19 +148,19 @@ endi ...@@ -147,19 +148,19 @@ endi
if $data10 != @20-09-17 00:00:00.000@ then if $data10 != @20-09-17 00:00:00.000@ then
return -1 return -1
endi endi
if $data11 != 49.581325301 then if $data11 != 49.685185185 then
return -1 return -1
endi endi
if $data20 != @20-09-20 00:00:00.000@ then if $data20 != @20-09-20 00:00:00.000@ then
return -1 return -1
endi endi
if $data21 != 49.703539823 then if $data21 != 49.500000000 then
return -1 return -1
endi endi
sql select stddev(c1) from (select c1 from nest_tb0); sql select stddev(c1) from (select c1 from nest_tb0);
sql_error select percentile(c1, 20) from (select * from nest_tb0); sql_error select percentile(c1, 20) from (select * from nest_tb0);
sql_error select interp(c1) from (select * from nest_tb0); sql select interp(c1) from (select * from nest_tb0);
sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
sql_error select twa(c1) from (select c1 from nest_tb0); sql_error select twa(c1) from (select c1 from nest_tb0);
sql_error select irate(c1) from (select c1 from nest_tb0); sql_error select irate(c1) from (select c1 from nest_tb0);
...@@ -217,19 +218,14 @@ if $data00 != 0.016666667 then ...@@ -217,19 +218,14 @@ if $data00 != 0.016666667 then
endi endi
sql select derivative(c1, 1s, 0) from (select * from nest_tb0); sql select derivative(c1, 1s, 0) from (select * from nest_tb0);
print $rows $data00 $data10
if $rows != 9999 then if $rows != 9999 then
return -1 return -1
endi endi
if $data00 != @20-09-15 00:01:00.000@ then if $data00 != 0.016666667 then
return -1
endi
if $data01 != 0.016666667 then
return -1
endi
if $data10 != @20-09-15 00:02:00.000@ then
return -1 return -1
endi endi
if $data11 != 0.016666667 then if $data10 != 0.016666667 then
return -1 return -1
endi endi
...@@ -238,7 +234,7 @@ if $rows != 9999 then ...@@ -238,7 +234,7 @@ if $rows != 9999 then
return -1 return -1
endi endi
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d); sql select _wstart, avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
if $rows != 7 then if $rows != 7 then
return -1 return -1
endi endi
...@@ -291,7 +287,7 @@ print ===================> group by + having ...@@ -291,7 +287,7 @@ print ===================> group by + having
print =========================> ascending order/descending order print =========================> ascending order/descending order
print =========================> nest query join print =========================> nest query join
sql select a.ts,a.k,b.ts from (select count(*) k from nest_tb0 interval(30a)) a, (select count(*) f from nest_tb1 interval(30a)) b where a.ts = b.ts ; sql select a.ts,a.k,b.ts from (select _wstart ts, count(*) k from nest_tb0 interval(30a)) a, (select _wstart ts, count(*) f from nest_tb1 interval(30a)) b where a.ts = b.ts ;
if $rows != 10000 then if $rows != 10000 then
return -1 return -1
endi endi
...@@ -314,7 +310,7 @@ if $data12 != @20-09-15 00:01:00.000@ then ...@@ -314,7 +310,7 @@ if $data12 != @20-09-15 00:01:00.000@ then
return -1 return -1
endi endi
sql select sum(a.k), sum(b.f) from (select count(*) k from nest_tb0 interval(30a)) a, (select count(*) f from nest_tb1 interval(30a)) b where a.ts = b.ts ; sql select sum(a.k), sum(b.f) from (select _wstart ts, count(*) k from nest_tb0 interval(30a)) a, (select _wstart ts, count(*) f from nest_tb1 interval(30a)) b where a.ts = b.ts ;
if $rows != 1 then if $rows != 1 then
return -1 return -1
endi endi
...@@ -325,7 +321,7 @@ if $data01 != 10000 then ...@@ -325,7 +321,7 @@ if $data01 != 10000 then
return -1 return -1
endi endi
sql select a.ts,a.k,b.ts,c.ts,c.ts,c.x from (select count(*) k from nest_tb0 interval(30a)) a, (select count(*) f from nest_tb1 interval(30a)) b, (select count(*) x from nest_tb2 interval(30a)) c where a.ts = b.ts and a.ts = c.ts sql select a.ts,a.k,b.ts,c.ts,c.ts,c.x from (select _wstart ts, count(*) k from nest_tb0 interval(30a)) a, (select _wstart ts, count(*) f from nest_tb1 interval(30a)) b, (select _wstart ts, count(*) x from nest_tb2 interval(30a)) c where a.ts = b.ts and a.ts = c.ts
if $rows != 10000 then if $rows != 10000 then
return -1 return -1
endi endi
...@@ -346,10 +342,7 @@ sql select diff(val) from (select c1 val from nest_tb0); ...@@ -346,10 +342,7 @@ sql select diff(val) from (select c1 val from nest_tb0);
if $rows != 9999 then if $rows != 9999 then
return -1 return -1
endi endi
if $data00 != @70-01-01 08:00:00.000@ then if $data00 != 1 then
return -1
endi
if $data01 != 1 then
return -1 return -1
endi endi
...@@ -376,7 +369,7 @@ if $data11 != 1 then ...@@ -376,7 +369,7 @@ if $data11 != 1 then
endi endi
print =====================>TD-5157 print =====================>TD-5157
sql select twa(c1) from nest_tb1 interval(19a); sql select _wstart, twa(c1) from nest_tb1 interval(19a);
if $rows != 10000 then if $rows != 10000 then
return -1 return -1
endi endi
...@@ -388,29 +381,14 @@ if $data01 != 0.000083333 then ...@@ -388,29 +381,14 @@ if $data01 != 0.000083333 then
endi endi
print ======================>TD-5271 print ======================>TD-5271
sql select min(val),max(val),first(val),last(val),count(val),sum(val),avg(val) from (select count(*) val from nest_mt0 group by tbname) sql_error select min(val),max(val),first(val),last(val),count(val),sum(val),avg(val) from (select count(*) val from nest_mt0 group by tbname)
if $rows != 1 then
return -1
endi
if $data00 != 10000 then
return -1
endi
if $data01 != 10000 then
return -1
endi
if $data04 != 10 then
return -1
endi
if $data05 != 100000 then
return -1
endi
print =================>us database interval query, TD-5039 print =================>us database interval query, TD-5039
sql create database test precision 'us'; sql create database test precision 'us';
sql use test; sql use test;
sql create table t1(ts timestamp, k int); sql create table t1(ts timestamp, k int);
sql insert into t1 values('2020-01-01 01:01:01.000', 1) ('2020-01-01 01:02:00.000', 2); sql insert into t1 values('2020-01-01 01:01:01.000', 1) ('2020-01-01 01:02:00.000', 2);
sql select avg(k) from (select avg(k) k from t1 interval(1s)) interval(1m); sql select _wstart, avg(k) from (select _wstart, avg(k) k from t1 interval(1s)) interval(1m);
if $rows != 2 then if $rows != 2 then
return -1 return -1
endi endi
...@@ -427,4 +405,4 @@ if $data11 != 2.000000000 then ...@@ -427,4 +405,4 @@ if $data11 != 2.000000000 then
return -1 return -1
endi endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
...@@ -498,4 +498,7 @@ if $data15 != 3 then ...@@ -498,4 +498,7 @@ if $data15 != 3 then
goto loop5 goto loop5
endi endi
sql drop database test;
sql drop database test1;
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sql connect
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
sql create dnode $hostname port 7400
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
print ===> $data30 $data31 $data32 $data33 $data34 $data35
if $rows != 4 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
if $data(2)[4] != ready then
goto step1
endi
if $data(3)[4] != ready then
goto step1
endi
if $data(4)[4] != ready then
goto step1
endi
$replica = 3
$vgroups = 1
$retentions = 5s:7d,15s:21d
print ============= create database
sql create database db replica $replica vgroups $vgroups retentions $retentions
$loop_cnt = 0
check_db_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 100 then
print ====> db not ready!
return -1
endi
sql show databases
print ===> rows: $rows
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
if $rows != 3 then
return -1
endi
if $data[2][15] != ready then
goto check_db_ready
endi
sql use db
$loop_cnt = 0
check_vg_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 300 then
print ====> vgroups not ready!
return -1
endi
sql show vgroups
print ===> rows: $rows
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
if $rows != $vgroups then
return -1
endi
if $data[0][4] == leader then
if $data[0][6] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
endi
endi
elif $data[0][6] == leader then
if $data[0][4] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
endi
endi
elif $data[0][8] == leader then
if $data[0][4] == follower then
if $data[0][6] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
endi
endi
else
goto check_vg_ready
endi
vg_ready:
print ====> create stable/child table
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum)
sql show stables
if $rows != 1 then
return -1
endi
sql create table ct1 using stb tags(1000)
print ===> stop dnode4
system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep 3000
print ===> write 100 records
$N = 100
$count = 0
while $count < $N
$ms = 1659000000000 + $count
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
$count = $count + 1
endw
#sql flush database db;
sleep 3000
print ===> stop dnode1 dnode2 dnode3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sleep 10000
########################################################
print ===> start dnode1 dnode2 dnode3 dnode4
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sleep 3000
print =============== query data
sql connect
sql use db
sql select * from ct1
print rows: $rows
print $data00 $data01 $data02
if $rows != 100 then
return -1
endi
\ No newline at end of file
...@@ -50,9 +50,9 @@ class TDTestCase: ...@@ -50,9 +50,9 @@ class TDTestCase:
'col11': 'bool', 'col11': 'bool',
'col12': f'binary({self.str_length})', 'col12': f'binary({self.str_length})',
'col13': f'nchar({self.str_length})', 'col13': f'nchar({self.str_length})',
} }
self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX) self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
...@@ -100,15 +100,15 @@ class TDTestCase: ...@@ -100,15 +100,15 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned': elif col_type.lower() == 'bigint unsigned':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})')
elif col_type.lower() == 'bool': elif col_type.lower() == 'bool':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
elif col_type.lower() == 'float': elif col_type.lower() == 'float':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
elif col_type.lower() == 'double': elif col_type.lower() == 'double':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})')
elif 'binary' in col_type.lower(): elif 'binary' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''') tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
elif 'nchar' in col_type.lower(): elif 'nchar' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''') tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1): def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1):
tdSql.execute(f'delete from {tbname}') tdSql.execute(f'delete from {tbname}')
tdSql.execute(f'flush database {dbname}') tdSql.execute(f'flush database {dbname}')
...@@ -164,7 +164,7 @@ class TDTestCase: ...@@ -164,7 +164,7 @@ class TDTestCase:
elif 'nchar' in column_type.lower(): elif 'nchar' in column_type.lower():
tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar']) tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
else: else:
tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type]) tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1): def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1):
for i in range(row_num): for i in range(row_num):
tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}') tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}')
...@@ -189,7 +189,7 @@ class TDTestCase: ...@@ -189,7 +189,7 @@ class TDTestCase:
elif tb_type == 'stb': elif tb_type == 'stb':
tdSql.checkRows(i*tb_num) tdSql.checkRows(i*tb_num)
for j in range(tb_num): for j in range(tb_num):
self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data) self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
for i in range(row_num): for i in range(row_num):
tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}') tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}')
tdSql.execute(f'flush database {dbname}') tdSql.execute(f'flush database {dbname}')
...@@ -240,7 +240,7 @@ class TDTestCase: ...@@ -240,7 +240,7 @@ class TDTestCase:
tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''') tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
else: else:
tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}') tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
def delete_data_ntb(self): def delete_data_ntb(self):
tdSql.execute(f'create database if not exists {self.dbname}') tdSql.execute(f'create database if not exists {self.dbname}')
tdSql.execute(f'use {self.dbname}') tdSql.execute(f'use {self.dbname}')
...@@ -295,4 +295,4 @@ class TDTestCase: ...@@ -295,4 +295,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -26,13 +26,13 @@ from util.common import tdCom ...@@ -26,13 +26,13 @@ from util.common import tdCom
import platform import platform
import io import io
if platform.system().lower() == 'windows': if platform.system().lower() == 'windows':
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
class TDTestCase: class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
self._conn = conn self._conn = conn
def createDb(self, name="test", db_update_tag=0): def createDb(self, name="test", db_update_tag=0):
if db_update_tag == 0: if db_update_tag == 0:
...@@ -67,7 +67,7 @@ class TDTestCase: ...@@ -67,7 +67,7 @@ class TDTestCase:
td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
return td_ts return td_ts
#return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
def dateToTs(self, datetime_input): def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
...@@ -274,7 +274,7 @@ class TDTestCase: ...@@ -274,7 +274,7 @@ class TDTestCase:
input_sql = self.gen_influxdb_line(stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, ts, input_sql = self.gen_influxdb_line(stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, ts,
id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag) id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag)
return input_sql, stb_name return input_sql, stb_name
def genMulTagColStr(self, gen_type, count): def genMulTagColStr(self, gen_type, count):
""" """
gen_type must be "tag"/"col" gen_type must be "tag"/"col"
...@@ -370,10 +370,10 @@ class TDTestCase: ...@@ -370,10 +370,10 @@ class TDTestCase:
for t_type in full_type_list: for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type) input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
self.resCmp(input_sql, stb_name) self.resCmp(input_sql, stb_name)
def symbolsCheckCase(self): def symbolsCheckCase(self):
""" """
check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
""" """
''' '''
please test : please test :
...@@ -395,7 +395,7 @@ class TDTestCase: ...@@ -395,7 +395,7 @@ class TDTestCase:
for ts in ts_list: for ts in ts_list:
input_sql, stb_name = self.genFullTypeSql(ts=ts) input_sql, stb_name = self.genFullTypeSql(ts=ts)
self.resCmp(input_sql, stb_name, ts=ts) self.resCmp(input_sql, stb_name, ts=ts)
def idSeqCheckCase(self): def idSeqCheckCase(self):
""" """
check id.index in tags check id.index in tags
...@@ -404,7 +404,7 @@ class TDTestCase: ...@@ -404,7 +404,7 @@ class TDTestCase:
tdCom.cleanTb() tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name) self.resCmp(input_sql, stb_name)
def idUpperCheckCase(self): def idUpperCheckCase(self):
""" """
check id param check id param
...@@ -444,7 +444,7 @@ class TDTestCase: ...@@ -444,7 +444,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def idIllegalNameCheckCase(self): def idIllegalNameCheckCase(self):
""" """
test illegal id name test illegal id name
...@@ -490,7 +490,7 @@ class TDTestCase: ...@@ -490,7 +490,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def illegalTsCheckCase(self): def illegalTsCheckCase(self):
""" """
check ts format like 16260068336390us19 check ts format like 16260068336390us19
...@@ -575,11 +575,11 @@ class TDTestCase: ...@@ -575,11 +575,11 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# binary # binary
stb_name = tdCom.getLongName(7, "letters") stb_name = tdCom.getLongName(7, "letters")
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000' input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000' input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000'
try: try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
...@@ -647,7 +647,7 @@ class TDTestCase: ...@@ -647,7 +647,7 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# f32 # f32
for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(c5=c5) input_sql, stb_name = self.genFullTypeSql(c5=c5)
self.resCmp(input_sql, stb_name) self.resCmp(input_sql, stb_name)
...@@ -671,11 +671,11 @@ class TDTestCase: ...@@ -671,11 +671,11 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# # # binary # # # binary
# stb_name = tdCom.getLongName(7, "letters") # stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000' # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000' # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000'
# try: # try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
...@@ -715,13 +715,13 @@ class TDTestCase: ...@@ -715,13 +715,13 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64 # i8 i16 i32 i64 f32 f64
for input_sql in [ for input_sql in [
self.genFullTypeSql(t1="1s2i8")[0], self.genFullTypeSql(t1="1s2i8")[0],
self.genFullTypeSql(t2="1s2i16")[0], self.genFullTypeSql(t2="1s2i16")[0],
self.genFullTypeSql(t3="1s2i32")[0], self.genFullTypeSql(t3="1s2i32")[0],
self.genFullTypeSql(t4="1s2i64")[0], self.genFullTypeSql(t4="1s2i64")[0],
self.genFullTypeSql(t5="11.1s45f32")[0], self.genFullTypeSql(t5="11.1s45f32")[0],
self.genFullTypeSql(t6="11.1s45f64")[0], self.genFullTypeSql(t6="11.1s45f64")[0],
self.genFullTypeSql(c1="1s2i8")[0], self.genFullTypeSql(c1="1s2i8")[0],
self.genFullTypeSql(c2="1s2i16")[0], self.genFullTypeSql(c2="1s2i16")[0],
self.genFullTypeSql(c3="1s2i32")[0], self.genFullTypeSql(c3="1s2i32")[0],
self.genFullTypeSql(c4="1s2i64")[0], self.genFullTypeSql(c4="1s2i64")[0],
...@@ -746,14 +746,14 @@ class TDTestCase: ...@@ -746,14 +746,14 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# check accepted binary and nchar symbols # check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:; # # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000' input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000'
input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000' input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
# self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
def duplicateIdTagColInsertCheckCase(self): def duplicateIdTagColInsertCheckCase(self):
""" """
check duplicate Id Tag Col check duplicate Id Tag Col
...@@ -810,7 +810,7 @@ class TDTestCase: ...@@ -810,7 +810,7 @@ class TDTestCase:
self.resCmp(input_sql, stb_name) self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
self.resCmp(input_sql, stb_name) self.resCmp(input_sql, stb_name)
@tdCom.smlPass @tdCom.smlPass
def tagColBinaryNcharLengthCheckCase(self): def tagColBinaryNcharLengthCheckCase(self):
""" """
...@@ -829,7 +829,7 @@ class TDTestCase: ...@@ -829,7 +829,7 @@ class TDTestCase:
check column and tag count add, stb and tb duplicate check column and tag count add, stb and tb duplicate
* tag: alter table ... * tag: alter table ...
* col: when update==0 and ts is same, unchange * col: when update==0 and ts is same, unchange
* so this case tag&&value will be added, * so this case tag&&value will be added,
* col is added without value when update==0 * col is added without value when update==0
* col is added with value when update==1 * col is added with value when update==1
""" """
...@@ -897,7 +897,7 @@ class TDTestCase: ...@@ -897,7 +897,7 @@ class TDTestCase:
# * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000' input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
tdSql.query(f"select * from {stb_name}") tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2) tdSql.checkRows(2)
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000'
...@@ -922,7 +922,7 @@ class TDTestCase: ...@@ -922,7 +922,7 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
tdSql.query(f"select * from {stb_name}") tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(3) tdSql.checkRows(3)
# * tag nchar max is 16374/4, col+ts nchar max 49151 # * tag nchar max is 16374/4, col+ts nchar max 49151
def tagColNcharMaxLengthCheckCase(self): def tagColNcharMaxLengthCheckCase(self):
""" """
...@@ -977,7 +977,7 @@ class TDTestCase: ...@@ -977,7 +977,7 @@ class TDTestCase:
"st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000" "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000"
] ]
self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
def multiInsertCheckCase(self, count): def multiInsertCheckCase(self, count):
""" """
test multi insert test multi insert
...@@ -1073,7 +1073,7 @@ class TDTestCase: ...@@ -1073,7 +1073,7 @@ class TDTestCase:
self.multiThreadRun(self.genMultiThreadSeq(input_sql)) self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;") tdSql.query(f"show tables;")
tdSql.checkRows(5) tdSql.checkRows(5)
def sStbStbDdataInsertMultiThreadCheckCase(self): def sStbStbDdataInsertMultiThreadCheckCase(self):
""" """
thread input same stb tb, different data, result keep first data thread input same stb tb, different data, result keep first data
...@@ -1107,7 +1107,7 @@ class TDTestCase: ...@@ -1107,7 +1107,7 @@ class TDTestCase:
tdSql.checkEqual(tb_name, expected_tb_name) tdSql.checkEqual(tb_name, expected_tb_name)
tdSql.query(f"select * from {stb_name};") tdSql.query(f"select * from {stb_name};")
tdSql.checkRows(1) tdSql.checkRows(1)
def sStbStbDdataMtcInsertMultiThreadCheckCase(self): def sStbStbDdataMtcInsertMultiThreadCheckCase(self):
""" """
thread input same stb tb, different data, minus columes and tags, result keep first data thread input same stb tb, different data, minus columes and tags, result keep first data
...@@ -1217,7 +1217,7 @@ class TDTestCase: ...@@ -1217,7 +1217,7 @@ class TDTestCase:
tdSql.checkRows(6) tdSql.checkRows(6)
for c in ["c7", "c8", "c9"]: for c in ["c7", "c8", "c9"]:
tdSql.query(f"select * from {stb_name} where {c} is NULL") tdSql.query(f"select * from {stb_name} where {c} is NULL")
tdSql.checkRows(5) tdSql.checkRows(5)
for t in ["t10", "t11"]: for t in ["t10", "t11"]:
tdSql.query(f"select * from {stb_name} where {t} is not NULL;") tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
tdSql.checkRows(6) tdSql.checkRows(6)
......
...@@ -43,7 +43,7 @@ class TDTestCase: ...@@ -43,7 +43,7 @@ class TDTestCase:
case1: limit offset base function test case1: limit offset base function test
case2: offset return valid case2: offset return valid
''' '''
return return
def getBuildPath(self): def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -69,7 +69,7 @@ class TDTestCase: ...@@ -69,7 +69,7 @@ class TDTestCase:
# self.create_tables(); # self.create_tables();
self.ts = 1500000000000 self.ts = 1500000000000
# stop # stop
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
...@@ -80,7 +80,7 @@ class TDTestCase: ...@@ -80,7 +80,7 @@ class TDTestCase:
def newcur(self,host,cfg): def newcur(self,host,cfg):
user = "root" user = "root"
password = "taosdata" password = "taosdata"
port =6030 port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor() cur=con.cursor()
print(cur) print(cur)
...@@ -90,7 +90,7 @@ class TDTestCase: ...@@ -90,7 +90,7 @@ class TDTestCase:
def create_tables(self,host,dbname,stbname,count): def create_tables(self,host,dbname,stbname,count):
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config) tsql=self.newcur(host,config)
tsql.execute("use %s" %dbname) tsql.execute("use %s" %dbname)
...@@ -109,7 +109,7 @@ class TDTestCase: ...@@ -109,7 +109,7 @@ class TDTestCase:
tsql.execute(sql) tsql.execute(sql)
sql = pre_create sql = pre_create
# print(time.time()) # print(time.time())
# end sql # end sql
if sql != pre_create: if sql != pre_create:
# print(sql) # print(sql)
tsql.execute(sql) tsql.execute(sql)
...@@ -122,7 +122,7 @@ class TDTestCase: ...@@ -122,7 +122,7 @@ class TDTestCase:
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount): def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount):
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config) tsql=self.newcur(host,config)
tdLog.debug("create database %s"%dbname) tdLog.debug("create database %s"%dbname)
tsql.execute("drop database if exists %s"%dbname) tsql.execute("drop database if exists %s"%dbname)
...@@ -132,7 +132,7 @@ class TDTestCase: ...@@ -132,7 +132,7 @@ class TDTestCase:
threads = [] threads = []
for i in range(threadNumbers): for i in range(threadNumbers):
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,))) threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,)))
start_time = time.time() start_time = time.time()
for tr in threads: for tr in threads:
tr.start() tr.start()
...@@ -142,7 +142,7 @@ class TDTestCase: ...@@ -142,7 +142,7 @@ class TDTestCase:
spendTime=end_time-start_time spendTime=end_time-start_time
speedCreate=threadNumbers*count/spendTime speedCreate=threadNumbers*count/spendTime
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate)) tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
return return
# def create_tables(self,host,dbname,stbname,vgroups,tcountStart,tcountStop): # def create_tables(self,host,dbname,stbname,vgroups,tcountStart,tcountStop):
...@@ -169,7 +169,7 @@ class TDTestCase: ...@@ -169,7 +169,7 @@ class TDTestCase:
# print(sql) # print(sql)
tsql.execute(sql) tsql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i) sql = "insert into %s_%d values " %(stbname,i)
# end sql # end sql
if sql != pre_insert: if sql != pre_insert:
# print(sql) # print(sql)
print(len(sql)) print(len(sql))
...@@ -184,7 +184,7 @@ class TDTestCase: ...@@ -184,7 +184,7 @@ class TDTestCase:
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount): def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount):
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config) tsql=self.newcur(host,config)
tdLog.debug("ready to inser data") tdLog.debug("ready to inser data")
...@@ -193,7 +193,7 @@ class TDTestCase: ...@@ -193,7 +193,7 @@ class TDTestCase:
threads = [] threads = []
for i in range(threadNumbers): for i in range(threadNumbers):
# tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) # tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,))) threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,)))
start_time = time.time() start_time = time.time()
for tr in threads: for tr in threads:
tr.start() tr.start()
...@@ -224,10 +224,10 @@ class TDTestCase: ...@@ -224,10 +224,10 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath) tdLog.info("taosd found in %s" % buildPath)
taosBenchbin = buildPath+ "/build/bin/taosBenchmark" taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
os.system("%s -f %s -y " %(taosBenchbin,jsonFile)) os.system("%s -f %s -y " %(taosBenchbin,jsonFile))
return return
def taosBenchCreate(self,host,dropdb,dbname,stbname,vgroups,processNumbers,count): def taosBenchCreate(self,host,dropdb,dbname,stbname,vgroups,processNumbers,count):
# count=50000 # count=50000
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
...@@ -241,7 +241,7 @@ class TDTestCase: ...@@ -241,7 +241,7 @@ class TDTestCase:
# tsql.getResult("show databases") # tsql.getResult("show databases")
# print(tdSql.queryResult) # print(tdSql.queryResult)
tsql.execute("use %s" %dbname) tsql.execute("use %s" %dbname)
threads = [] threads = []
for i in range(processNumbers): for i in range(processNumbers):
jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i) jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i)
...@@ -252,7 +252,7 @@ class TDTestCase: ...@@ -252,7 +252,7 @@ class TDTestCase:
os.system("sed -i 's/\"childtable_count\": 10000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile)) os.system("sed -i 's/\"childtable_count\": 10000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile))
os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile)) os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile))
os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile)) os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile))
threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,))) threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,)))
start_time = time.time() start_time = time.time()
for tr in threads: for tr in threads:
tr.start() tr.start()
...@@ -274,10 +274,10 @@ class TDTestCase: ...@@ -274,10 +274,10 @@ class TDTestCase:
for i in range(stableCount): for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i)) tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable) tdSql.checkData(0,0,rowsPerSTable)
return return
# test case1 base # test case1 base
def test_case1(self): def test_case1(self):
#stableCount=threadNumbersCtb #stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \ parameterDict = {'vgroups': 1, \
...@@ -290,22 +290,22 @@ class TDTestCase: ...@@ -290,22 +290,22 @@ class TDTestCase:
'stbname': 'stb', \ 'stbname': 'stb', \
'host': 'localhost', \ 'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
tdLog.debug("-----create database and muti-thread create tables test------- ") tdLog.debug("-----create database and muti-thread create tables test------- ")
#host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop #host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop
#host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount #host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount
self.mutiThread_create_tables( self.mutiThread_create_tables(
host=parameterDict['host'], host=parameterDict['host'],
dbname=parameterDict['dbname'], dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'], stbname=parameterDict['stbname'],
vgroups=parameterDict['vgroups'], vgroups=parameterDict['vgroups'],
threadNumbers=parameterDict['threadNumbersCtb'], threadNumbers=parameterDict['threadNumbersCtb'],
childcount=parameterDict['tablesPerStb']) childcount=parameterDict['tablesPerStb'])
self.mutiThread_insert_data( self.mutiThread_insert_data(
host=parameterDict['host'], host=parameterDict['host'],
dbname=parameterDict['dbname'], dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'], stbname=parameterDict['stbname'],
threadNumbers=parameterDict['threadNumbersIda'], threadNumbers=parameterDict['threadNumbersIda'],
chilCount=parameterDict['tablesPerStb'], chilCount=parameterDict['tablesPerStb'],
ts_start=parameterDict['startTs'], ts_start=parameterDict['startTs'],
...@@ -315,7 +315,7 @@ class TDTestCase: ...@@ -315,7 +315,7 @@ class TDTestCase:
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb'] rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData(dbname=parameterDict['dbname'],stbname=parameterDict['stbname'], stableCount=parameterDict['threadNumbersCtb'],CtableCount=tableCount,rowsPerSTable=rowsPerStable) self.checkData(dbname=parameterDict['dbname'],stbname=parameterDict['stbname'], stableCount=parameterDict['threadNumbersCtb'],CtableCount=tableCount,rowsPerSTable=rowsPerStable)
def test_case3(self): def test_case3(self):
#stableCount=threadNumbersCtb #stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \ parameterDict = {'vgroups': 1, \
...@@ -327,21 +327,21 @@ class TDTestCase: ...@@ -327,21 +327,21 @@ class TDTestCase:
'stbname': 'stb1', \ 'stbname': 'stb1', \
'host': 'localhost', \ 'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
self.taosBenchCreate( self.taosBenchCreate(
parameterDict['host'], parameterDict['host'],
"no", "no",
parameterDict['dbname'], parameterDict['dbname'],
parameterDict['stbname'], parameterDict['stbname'],
parameterDict['vgroups'], parameterDict['vgroups'],
parameterDict['threadNumbersCtb'], parameterDict['threadNumbersCtb'],
parameterDict['tablesPerStb']) parameterDict['tablesPerStb'])
tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb'] tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb']
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb'] rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData( self.checkData(
dbname=parameterDict['dbname'], dbname=parameterDict['dbname'],
stbname=parameterDict['stbname'], stbname=parameterDict['stbname'],
stableCount=parameterDict['threadNumbersCtb'], stableCount=parameterDict['threadNumbersCtb'],
CtableCount=tableCount, CtableCount=tableCount,
rowsPerSTable=rowsPerStable) rowsPerSTable=rowsPerStable)
...@@ -353,9 +353,9 @@ class TDTestCase: ...@@ -353,9 +353,9 @@ class TDTestCase:
# self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000) # self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000)
# self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000) # self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000)
return return
# run case # run case
def run(self): def run(self):
# create database and tables。 # create database and tables。
...@@ -368,7 +368,7 @@ class TDTestCase: ...@@ -368,7 +368,7 @@ class TDTestCase:
return return
# #
# add case with filename # add case with filename
# #
......
...@@ -38,7 +38,7 @@ class TDTestCase: ...@@ -38,7 +38,7 @@ class TDTestCase:
tlist = self.genMultiThreadSeq(sql_list) tlist = self.genMultiThreadSeq(sql_list)
self.multiThreadRun(tlist) self.multiThreadRun(tlist)
tdSql.query(f'show databases') tdSql.query(f'show databases')
def stop(self): def stop(self):
tdSql.close() tdSql.close()
......
...@@ -34,14 +34,14 @@ class TDTestCase: ...@@ -34,14 +34,14 @@ class TDTestCase:
# #
# --------------- main frame ------------------- # --------------- main frame -------------------
# #
def caseDescription(self): def caseDescription(self):
''' '''
limit and offset keyword function test cases; limit and offset keyword function test cases;
case1: limit offset base function test case1: limit offset base function test
case2: offset return valid case2: offset return valid
''' '''
return return
def getBuildPath(self): def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -68,7 +68,7 @@ class TDTestCase: ...@@ -68,7 +68,7 @@ class TDTestCase:
self.ts = 1500000000000 self.ts = 1500000000000
# run case # run case
def run(self): def run(self):
# test base case # test base case
...@@ -79,7 +79,7 @@ class TDTestCase: ...@@ -79,7 +79,7 @@ class TDTestCase:
# self.test_case2() # self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]") # tdLog.debug(" LIMIT test_case2 ............ [OK]")
# stop # stop
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
...@@ -101,7 +101,7 @@ class TDTestCase: ...@@ -101,7 +101,7 @@ class TDTestCase:
tdSql.execute(sql) tdSql.execute(sql)
sql = pre_create sql = pre_create
# print(time.time()) # print(time.time())
# end sql # end sql
if sql != pre_create: if sql != pre_create:
tdSql.execute(sql) tdSql.execute(sql)
exeEndTime=time.time() exeEndTime=time.time()
...@@ -113,7 +113,7 @@ class TDTestCase: ...@@ -113,7 +113,7 @@ class TDTestCase:
def newcur(self,host,cfg): def newcur(self,host,cfg):
user = "root" user = "root"
password = "taosdata" password = "taosdata"
port =6030 port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor() cur=con.cursor()
print(cur) print(cur)
...@@ -123,7 +123,7 @@ class TDTestCase: ...@@ -123,7 +123,7 @@ class TDTestCase:
host = "127.0.0.1" host = "127.0.0.1"
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/" config = buildPath+ "../sim/dnode1/cfg/"
tsql=self.newcur(host,config) tsql=self.newcur(host,config)
tsql.execute("drop database if exists %s" %(dbname)) tsql.execute("drop database if exists %s" %(dbname))
tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups)) tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups))
...@@ -147,7 +147,7 @@ class TDTestCase: ...@@ -147,7 +147,7 @@ class TDTestCase:
tsql.execute(sql) tsql.execute(sql)
sql = pre_create sql = pre_create
# print(time.time()) # print(time.time())
# end sql # end sql
if sql != pre_create: if sql != pre_create:
# print(sql) # print(sql)
tsql.execute(sql) tsql.execute(sql)
...@@ -176,7 +176,7 @@ class TDTestCase: ...@@ -176,7 +176,7 @@ class TDTestCase:
# print(sql) # print(sql)
tdSql.execute(sql) tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i) sql = "insert into %s_%d values " %(stbname,i)
# end sql # end sql
if sql != pre_insert: if sql != pre_insert:
# print(sql) # print(sql)
tdSql.execute(sql) tdSql.execute(sql)
...@@ -189,7 +189,7 @@ class TDTestCase: ...@@ -189,7 +189,7 @@ class TDTestCase:
return return
# test case1 base # test case1 base
def test_case1(self): def test_case1(self):
tdLog.debug("-----create database and tables test------- ") tdLog.debug("-----create database and tables test------- ")
# tdSql.execute("drop database if exists db1") # tdSql.execute("drop database if exists db1")
...@@ -220,7 +220,7 @@ class TDTestCase: ...@@ -220,7 +220,7 @@ class TDTestCase:
threads = [] threads = []
threadNumbers=2 threadNumbers=2
for i in range(threadNumbers): for i in range(threadNumbers):
threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,))) threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,)))
start_time = time.time() start_time = time.time()
for tr in threads: for tr in threads:
tr.start() tr.start()
...@@ -247,7 +247,7 @@ class TDTestCase: ...@@ -247,7 +247,7 @@ class TDTestCase:
# tdSql.execute("create database db16 vgroups 16") # tdSql.execute("create database db16 vgroups 16")
# self.create_tables("db16", "stb16", 30*10000) # self.create_tables("db16", "stb16", 30*10000)
return return
# test case2 base:insert data # test case2 base:insert data
def test_case2(self): def test_case2(self):
...@@ -266,7 +266,7 @@ class TDTestCase: ...@@ -266,7 +266,7 @@ class TDTestCase:
tdSql.execute("create database db1 vgroups 1") tdSql.execute("create database db1 vgroups 1")
self.create_tables("db1", "stb1", 1*100) self.create_tables("db1", "stb1", 1*100)
self.insert_data("db1", "stb1", self.ts, 1*50,1*10000) self.insert_data("db1", "stb1", self.ts, 1*50,1*10000)
tdSql.execute("create database db4 vgroups 4") tdSql.execute("create database db4 vgroups 4")
self.create_tables("db4", "stb4", 1*100) self.create_tables("db4", "stb4", 1*100)
...@@ -287,7 +287,7 @@ class TDTestCase: ...@@ -287,7 +287,7 @@ class TDTestCase:
tdSql.execute("create database db16 vgroups 16") tdSql.execute("create database db16 vgroups 16")
self.create_tables("db16", "stb16", 1*100) self.create_tables("db16", "stb16", 1*100)
self.insert_data("db16", "stb16", self.ts, 1*100,1*10000) self.insert_data("db16", "stb16", self.ts, 1*100,1*10000)
return return
# #
...@@ -296,4 +296,4 @@ class TDTestCase: ...@@ -296,4 +296,4 @@ class TDTestCase:
# tdCases.addWindows(__file__, TDTestCase()) # tdCases.addWindows(__file__, TDTestCase())
# tdCases.addLinux(__file__, TDTestCase()) # tdCases.addLinux(__file__, TDTestCase())
case=TDTestCase() case=TDTestCase()
case.test_case1() case.test_case1()
\ No newline at end of file
...@@ -25,19 +25,19 @@ import threading ...@@ -25,19 +25,19 @@ import threading
import platform import platform
import io import io
if platform.system().lower() == 'windows': if platform.system().lower() == 'windows':
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
class TDTestCase: class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
self._conn = conn self._conn = conn
self.smlChildTableName_value = "id" self.smlChildTableName_value = "id"
def createDb(self, name="test", db_update_tag=0, protocol=None): def createDb(self, name="test", db_update_tag=0, protocol=None):
if protocol == "telnet-tcp": if protocol == "telnet-tcp":
name = "opentsdb_telnet" name = "opentsdb_telnet"
if db_update_tag == 0: if db_update_tag == 0:
tdSql.execute(f"drop database if exists {name}") tdSql.execute(f"drop database if exists {name}")
tdSql.execute(f"create database if not exists {name} precision 'us' schemaless 1") tdSql.execute(f"create database if not exists {name} precision 'us' schemaless 1")
...@@ -66,7 +66,7 @@ class TDTestCase: ...@@ -66,7 +66,7 @@ class TDTestCase:
td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
return td_ts return td_ts
#return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
def dateToTs(self, datetime_input): def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
...@@ -191,7 +191,7 @@ class TDTestCase: ...@@ -191,7 +191,7 @@ class TDTestCase:
tb_name = "" tb_name = ""
td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
col_name_list.append('_value') col_name_list.append('_value')
col_value_list.append(stb_col_value) col_value_list.append(stb_col_value)
...@@ -218,7 +218,7 @@ class TDTestCase: ...@@ -218,7 +218,7 @@ class TDTestCase:
t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
t8="L\"ncharTagValue\"", ts="1626006833641", t8="L\"ncharTagValue\"", ts="1626006833641",
id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None): chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None):
if stb_name == "": if stb_name == "":
stb_name = tdCom.getLongName(len=6, mode="letters") stb_name = tdCom.getLongName(len=6, mode="letters")
...@@ -268,7 +268,7 @@ class TDTestCase: ...@@ -268,7 +268,7 @@ class TDTestCase:
if protocol == "telnet-tcp": if protocol == "telnet-tcp":
sql_seq = 'put ' + sql_seq + '\n' sql_seq = 'put ' + sql_seq + '\n'
return sql_seq, stb_name return sql_seq, stb_name
def genMulTagColStr(self, genType, count=1): def genMulTagColStr(self, genType, count=1):
""" """
genType must be tag/col genType must be tag/col
...@@ -365,10 +365,10 @@ class TDTestCase: ...@@ -365,10 +365,10 @@ class TDTestCase:
for t_type in full_type_list: for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol)
def symbolsCheckCase(self, protocol=None): def symbolsCheckCase(self, protocol=None):
""" """
check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
""" """
''' '''
please test : please test :
...@@ -424,7 +424,7 @@ class TDTestCase: ...@@ -424,7 +424,7 @@ class TDTestCase:
raise Exception("should not reach here") raise Exception("should not reach here")
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def idSeqCheckCase(self, protocol=None): def idSeqCheckCase(self, protocol=None):
""" """
check id.index in tags check id.index in tags
...@@ -434,7 +434,7 @@ class TDTestCase: ...@@ -434,7 +434,7 @@ class TDTestCase:
tdCom.cleanTb() tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol)
def idLetterCheckCase(self, protocol=None): def idLetterCheckCase(self, protocol=None):
""" """
check id param check id param
...@@ -527,7 +527,7 @@ class TDTestCase: ...@@ -527,7 +527,7 @@ class TDTestCase:
raise Exception("should not reach here") raise Exception("should not reach here")
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def illegalTsCheckCase(self): def illegalTsCheckCase(self):
""" """
check ts format like 16260068336390us19 check ts format like 16260068336390us19
...@@ -592,7 +592,7 @@ class TDTestCase: ...@@ -592,7 +592,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here") raise Exception("should not reach here")
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def tagValueLengthCheckCase(self): def tagValueLengthCheckCase(self):
""" """
...@@ -673,7 +673,7 @@ class TDTestCase: ...@@ -673,7 +673,7 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# f32 # f32
tdCom.cleanTb() tdCom.cleanTb()
for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(value=value) input_sql, stb_name = self.genFullTypeSql(value=value)
...@@ -703,12 +703,12 @@ class TDTestCase: ...@@ -703,12 +703,12 @@ class TDTestCase:
# except SchemalessError as err: # except SchemalessError as err:
# tdSql.checkNotEqual(err.errno, 0) # tdSql.checkNotEqual(err.errno, 0)
# # # binary # # # binary
# tdCom.cleanTb() # tdCom.cleanTb()
# stb_name = tdCom.getLongName(7, "letters") # stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
# tdCom.cleanTb() # tdCom.cleanTb()
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
# try: # try:
...@@ -748,12 +748,12 @@ class TDTestCase: ...@@ -748,12 +748,12 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64 # i8 i16 i32 i64 f32 f64
for input_sql in [ for input_sql in [
self.genFullTypeSql(value="1s2i8")[0], self.genFullTypeSql(value="1s2i8")[0],
self.genFullTypeSql(value="1s2i16")[0], self.genFullTypeSql(value="1s2i16")[0],
self.genFullTypeSql(value="1s2i32")[0], self.genFullTypeSql(value="1s2i32")[0],
self.genFullTypeSql(value="1s2i64")[0], self.genFullTypeSql(value="1s2i64")[0],
self.genFullTypeSql(value="11.1s45f32")[0], self.genFullTypeSql(value="11.1s45f32")[0],
self.genFullTypeSql(value="11.1s45f64")[0], self.genFullTypeSql(value="11.1s45f64")[0],
]: ]:
try: try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
...@@ -761,14 +761,14 @@ class TDTestCase: ...@@ -761,14 +761,14 @@ class TDTestCase:
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
# check accepted binary and nchar symbols # check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:; # # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t' input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t'
input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"' input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"'
self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None) self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None)
# self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None) # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None)
def blankCheckCase(self): def blankCheckCase(self):
''' '''
check blank case check blank case
...@@ -853,7 +853,7 @@ class TDTestCase: ...@@ -853,7 +853,7 @@ class TDTestCase:
check tag count add, stb and tb duplicate check tag count add, stb and tb duplicate
* tag: alter table ... * tag: alter table ...
* col: when update==0 and ts is same, unchange * col: when update==0 and ts is same, unchange
* so this case tag&&value will be added, * so this case tag&&value will be added,
* col is added without value when update==0 * col is added without value when update==0
* col is added with value when update==1 * col is added with value when update==1
""" """
...@@ -869,14 +869,14 @@ class TDTestCase: ...@@ -869,14 +869,14 @@ class TDTestCase:
if db_update_tag == 1 : if db_update_tag == 1 :
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
tdSql.checkData(0, 11, None) tdSql.checkData(0, 11, None)
tdSql.checkData(0, 12, None) tdSql.checkData(0, 12, None)
else: else:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
tdSql.checkData(0, 1, True) tdSql.checkData(0, 1, True)
tdSql.checkData(0, 11, None) tdSql.checkData(0, 11, None)
tdSql.checkData(0, 12, None) tdSql.checkData(0, 12, None)
self.createDb() self.createDb()
@tdCom.smlPass @tdCom.smlPass
...@@ -952,7 +952,7 @@ class TDTestCase: ...@@ -952,7 +952,7 @@ class TDTestCase:
tdCom.cleanTb() tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters") stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"", lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
"st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"', f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"',
...@@ -970,7 +970,7 @@ class TDTestCase: ...@@ -970,7 +970,7 @@ class TDTestCase:
tdSql.checkRows(6) tdSql.checkRows(6)
tdSql.query('select * from st123456') tdSql.query('select * from st123456')
tdSql.checkRows(5) tdSql.checkRows(5)
def multiInsertCheckCase(self, count): def multiInsertCheckCase(self, count):
""" """
test multi insert test multi insert
...@@ -1014,7 +1014,7 @@ class TDTestCase: ...@@ -1014,7 +1014,7 @@ class TDTestCase:
raise Exception("should not reach here") raise Exception("should not reach here")
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def blankColInsertCheckCase(self): def blankColInsertCheckCase(self):
""" """
test blank col insert test blank col insert
...@@ -1040,7 +1040,7 @@ class TDTestCase: ...@@ -1040,7 +1040,7 @@ class TDTestCase:
raise Exception("should not reach here") raise Exception("should not reach here")
except SchemalessError as err: except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0) tdSql.checkNotEqual(err.errno, 0)
def chineseCheckCase(self): def chineseCheckCase(self):
""" """
check nchar ---> chinese check nchar ---> chinese
...@@ -1210,7 +1210,7 @@ class TDTestCase: ...@@ -1210,7 +1210,7 @@ class TDTestCase:
self.multiThreadRun(self.genMultiThreadSeq(input_sql)) self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;") tdSql.query(f"show tables;")
tdSql.checkRows(5) tdSql.checkRows(5)
def sStbStbDdataInsertMultiThreadCheckCase(self): def sStbStbDdataInsertMultiThreadCheckCase(self):
""" """
thread input same stb tb, different data, result keep first data thread input same stb tb, different data, result keep first data
...@@ -1248,7 +1248,7 @@ class TDTestCase: ...@@ -1248,7 +1248,7 @@ class TDTestCase:
tdSql.checkEqual(tb_name, expected_tb_name) tdSql.checkEqual(tb_name, expected_tb_name)
tdSql.query(f"select * from {stb_name};") tdSql.query(f"select * from {stb_name};")
tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
def sStbStbDdataMtInsertMultiThreadCheckCase(self): def sStbStbDdataMtInsertMultiThreadCheckCase(self):
""" """
thread input same stb tb, different data, minus columes and tags, result keep first data thread input same stb tb, different data, minus columes and tags, result keep first data
...@@ -1466,7 +1466,7 @@ class TDTestCase: ...@@ -1466,7 +1466,7 @@ class TDTestCase:
def run(self): def run(self):
print("running {}".format(__file__)) print("running {}".format(__file__))
try: try:
self.createDb() self.createDb()
self.runAll() self.runAll()
......
...@@ -42,7 +42,7 @@ class TDTestCase: ...@@ -42,7 +42,7 @@ class TDTestCase:
self.comment_flag_list = [True,False] self.comment_flag_list = [True,False]
def __set_and_alter_comment(self,tb_type='',comment_flag= False): def __set_and_alter_comment(self,tb_type='',comment_flag= False):
column_sql = '' column_sql = ''
tag_sql = '' tag_sql = ''
for k,v in self.column_dict.items(): for k,v in self.column_dict.items():
...@@ -78,7 +78,7 @@ class TDTestCase: ...@@ -78,7 +78,7 @@ class TDTestCase:
tdSql.execute(f'create {operation} {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]}) comment "{comment_info}"') tdSql.execute(f'create {operation} {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]}) comment "{comment_info}"')
self.check_comment_info(comment_info,'stable') self.check_comment_info(comment_info,'stable')
self.alter_comment(self.stbname,'stable') self.alter_comment(self.stbname,'stable')
tdSql.execute(f'drop table {self.stbname}') tdSql.execute(f'drop table {self.stbname}')
elif tb_type == 'child_table': elif tb_type == 'child_table':
tdSql.execute(f'create table if not exists {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})') tdSql.execute(f'create table if not exists {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})')
if comment_flag == False: if comment_flag == False:
...@@ -122,7 +122,7 @@ class TDTestCase: ...@@ -122,7 +122,7 @@ class TDTestCase:
for flag in comment_flag: for flag in comment_flag:
self.__set_and_alter_comment(tb,flag) self.__set_and_alter_comment(tb,flag)
tdSql.execute('drop database db') tdSql.execute('drop database db')
def run(self): def run(self):
self.comment_check_case(self.table_type_list,self.comment_flag_list) self.comment_check_case(self.table_type_list,self.comment_flag_list)
...@@ -131,4 +131,4 @@ class TDTestCase: ...@@ -131,4 +131,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -20,7 +20,7 @@ class TDTestCase: ...@@ -20,7 +20,7 @@ class TDTestCase:
updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3} updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3}
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
self.ntbname = 'ntb' self.ntbname = 'ntb'
self.stbname = 'stb' self.stbname = 'stb'
self.tbnum = 10 self.tbnum = 10
......
...@@ -38,7 +38,7 @@ class TDTestCase: ...@@ -38,7 +38,7 @@ class TDTestCase:
case1: limit offset base function test case1: limit offset base function test
case2: offset return valid case2: offset return valid
''' '''
return return
def getBuildPath(self): def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -64,7 +64,7 @@ class TDTestCase: ...@@ -64,7 +64,7 @@ class TDTestCase:
# self.create_tables(); # self.create_tables();
self.ts = 1500000000000 self.ts = 1500000000000
# stop # stop
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
...@@ -76,7 +76,7 @@ class TDTestCase: ...@@ -76,7 +76,7 @@ class TDTestCase:
def newcon(self,host,cfg): def newcon(self,host,cfg):
user = "root" user = "root"
password = "taosdata" password = "taosdata"
port =6030 port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
print(con) print(con)
return con return con
...@@ -126,14 +126,14 @@ class TDTestCase: ...@@ -126,14 +126,14 @@ class TDTestCase:
end = datetime.now() end = datetime.now()
print("elapsed time: ", end - start) print("elapsed time: ", end - start)
assert stmt.affected_rows == 3 assert stmt.affected_rows == 3
#query 1 #query 1
querystmt=conn.statement("select ?,bu from stb1") querystmt=conn.statement("select ?,bu from stb1")
queryparam=new_bind_params(1) queryparam=new_bind_params(1)
print(type(queryparam)) print(type(queryparam))
queryparam[0].binary("ts") queryparam[0].binary("ts")
querystmt.bind_param(queryparam) querystmt.bind_param(queryparam)
querystmt.execute() querystmt.execute()
result=querystmt.use_result() result=querystmt.use_result()
# rows=result.fetch_all() # rows=result.fetch_all()
# print( querystmt.use_result()) # print( querystmt.use_result())
...@@ -152,7 +152,7 @@ class TDTestCase: ...@@ -152,7 +152,7 @@ class TDTestCase:
print(type(queryparam1)) print(type(queryparam1))
queryparam1[0].int(4) queryparam1[0].int(4)
querystmt1.bind_param(queryparam1) querystmt1.bind_param(queryparam1)
querystmt1.execute() querystmt1.execute()
result1=querystmt1.use_result() result1=querystmt1.use_result()
rows1=result1.fetch_all() rows1=result1.fetch_all()
print(rows1) print(rows1)
...@@ -176,10 +176,10 @@ class TDTestCase: ...@@ -176,10 +176,10 @@ class TDTestCase:
host="localhost" host="localhost"
connectstmt=self.newcon(host,config) connectstmt=self.newcon(host,config)
self.test_stmt_insert_multi(connectstmt) self.test_stmt_insert_multi(connectstmt)
return return
# add case with filename # add case with filename
# #
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -38,7 +38,7 @@ class TDTestCase: ...@@ -38,7 +38,7 @@ class TDTestCase:
case1: limit offset base function test case1: limit offset base function test
case2: offset return valid case2: offset return valid
''' '''
return return
def getBuildPath(self): def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -64,7 +64,7 @@ class TDTestCase: ...@@ -64,7 +64,7 @@ class TDTestCase:
# self.create_tables(); # self.create_tables();
self.ts = 1500000000000 self.ts = 1500000000000
# stop # stop
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
...@@ -76,7 +76,7 @@ class TDTestCase: ...@@ -76,7 +76,7 @@ class TDTestCase:
def newcon(self,host,cfg): def newcon(self,host,cfg):
user = "root" user = "root"
password = "taosdata" password = "taosdata"
port =6030 port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
tdLog.debug(con) tdLog.debug(con)
return con return con
...@@ -84,7 +84,7 @@ class TDTestCase: ...@@ -84,7 +84,7 @@ class TDTestCase:
def stmtExe(self,conn,sql,bindStat): def stmtExe(self,conn,sql,bindStat):
queryStat=conn.statement("%s"%sql) queryStat=conn.statement("%s"%sql)
queryStat.bind_param(bindStat) queryStat.bind_param(bindStat)
queryStat.execute() queryStat.execute()
result=queryStat.use_result() result=queryStat.use_result()
rows=result.fetch_all() rows=result.fetch_all()
return rows return rows
...@@ -101,7 +101,7 @@ class TDTestCase: ...@@ -101,7 +101,7 @@ class TDTestCase:
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\
t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)"%stablename) t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)"%stablename)
stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
tags = new_bind_params(16) tags = new_bind_params(16)
...@@ -140,13 +140,13 @@ class TDTestCase: ...@@ -140,13 +140,13 @@ class TDTestCase:
params[14].nchar(["涛思数据", None, "a long string with 中文?字符"]) params[14].nchar(["涛思数据", None, "a long string with 中文?字符"])
params[15].timestamp([None, None, 1626861392591]) params[15].timestamp([None, None, 1626861392591])
params[16].binary(["涛思数据16", None, None]) params[16].binary(["涛思数据16", None, None])
stmt.bind_param_batch(params) stmt.bind_param_batch(params)
stmt.execute() stmt.execute()
assert stmt.affected_rows == 3 assert stmt.affected_rows == 3
#query all #query all
queryparam=new_bind_params(1) queryparam=new_bind_params(1)
queryparam[0].int(10) queryparam[0].int(10)
rows=self.stmtExe(conn,"select * from log where bu < ?",queryparam) rows=self.stmtExe(conn,"select * from log where bu < ?",queryparam)
...@@ -189,7 +189,7 @@ class TDTestCase: ...@@ -189,7 +189,7 @@ class TDTestCase:
#query: conversion Functions #query: conversion Functions
queryparam=new_bind_params(1) queryparam=new_bind_params(1)
queryparam[0].binary('1232a') queryparam[0].binary('1232a')
rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam) rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam)
tdLog.debug("assert 5th case %s"%rows) tdLog.debug("assert 5th case %s"%rows)
assert rows[0][0] == 1232, '5th.1 case is failed' assert rows[0][0] == 1232, '5th.1 case is failed'
...@@ -210,7 +210,7 @@ class TDTestCase: ...@@ -210,7 +210,7 @@ class TDTestCase:
tdLog.debug("assert 7th case %s"%rows) tdLog.debug("assert 7th case %s"%rows)
assert rows[0][0] == 1, '7th case is failed' assert rows[0][0] == 1, '7th case is failed'
assert rows[1][0] == 1, '7th case is failed' assert rows[1][0] == 1, '7th case is failed'
#query: aggregate Functions #query: aggregate Functions
queryparam=new_bind_params(1) queryparam=new_bind_params(1)
queryparam[0].int(123) queryparam[0].int(123)
...@@ -238,7 +238,7 @@ class TDTestCase: ...@@ -238,7 +238,7 @@ class TDTestCase:
# conn.execute("drop database if exists %s" % dbname) # conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
except Exception as err: except Exception as err:
# conn.execute("drop database if exists %s" % dbname) # conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
...@@ -251,10 +251,10 @@ class TDTestCase: ...@@ -251,10 +251,10 @@ class TDTestCase:
connectstmt=self.newcon(host,config) connectstmt=self.newcon(host,config)
self.test_stmt_set_tbname_tag(connectstmt) self.test_stmt_set_tbname_tag(connectstmt)
return return
# add case with filename # add case with filename
# #
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -47,7 +47,7 @@ class TDTestCase: ...@@ -47,7 +47,7 @@ class TDTestCase:
'col13': f'nchar({self.str_length})', 'col13': f'nchar({self.str_length})',
'col_ts' : 'timestamp' 'col_ts' : 'timestamp'
} }
def data_check(self,tbname,col_name,col_type,value): def data_check(self,tbname,col_name,col_type,value):
tdSql.query(f'select {col_name} from {tbname}') tdSql.query(f'select {col_name} from {tbname}')
if col_type.lower() == 'float' or col_type.lower() == 'double': if col_type.lower() == 'float' or col_type.lower() == 'double':
...@@ -121,9 +121,9 @@ class TDTestCase: ...@@ -121,9 +121,9 @@ class TDTestCase:
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})') tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'tinyint unsigned': elif col_type.lower() == 'tinyint unsigned':
for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
if tb_type == 'ctb': if tb_type == 'ctb':
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})') tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'smallint unsigned': elif col_type.lower() == 'smallint unsigned':
for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
...@@ -136,9 +136,9 @@ class TDTestCase: ...@@ -136,9 +136,9 @@ class TDTestCase:
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})') tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'bigint unsigned': elif col_type.lower() == 'bigint unsigned':
for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
if tb_type == 'ctb': if tb_type == 'ctb':
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})') tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
tdSql.execute(f'drop table {tbname}') tdSql.execute(f'drop table {tbname}')
if tb_type == 'ctb': if tb_type == 'ctb':
tdSql.execute(f'drop table {stbname}') tdSql.execute(f'drop table {stbname}')
...@@ -182,9 +182,9 @@ class TDTestCase: ...@@ -182,9 +182,9 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned': elif col_type.lower() == 'bigint unsigned':
self.update_and_check_data(tbname,col_name,col_type,up_unbigint,dbname) self.update_and_check_data(tbname,col_name,col_type,up_unbigint,dbname)
elif col_type.lower() == 'bool': elif col_type.lower() == 'bool':
self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname) self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname)
elif col_type.lower() == 'float': elif col_type.lower() == 'float':
self.update_and_check_data(tbname,col_name,col_type,up_float,dbname) self.update_and_check_data(tbname,col_name,col_type,up_float,dbname)
elif col_type.lower() == 'double': elif col_type.lower() == 'double':
self.update_and_check_data(tbname,col_name,col_type,up_double,dbname) self.update_and_check_data(tbname,col_name,col_type,up_double,dbname)
elif 'binary' in col_type.lower(): elif 'binary' in col_type.lower():
...@@ -248,10 +248,10 @@ class TDTestCase: ...@@ -248,10 +248,10 @@ class TDTestCase:
self.update_check() self.update_check()
self.update_check_error() self.update_check_error()
# i+=1 # i+=1
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -87,7 +87,7 @@ class TDTestCase: ...@@ -87,7 +87,7 @@ class TDTestCase:
sql += f'({self.ts+i},{values})' sql += f'({self.ts+i},{values})'
sql += ' ' sql += ' '
tdSql.execute(sql) tdSql.execute(sql)
def insert_data(self,col_type,tbname,rows,data): def insert_data(self,col_type,tbname,rows,data):
for i in range(rows): for i in range(rows):
if col_type.lower() == 'tinyint': if col_type.lower() == 'tinyint':
...@@ -107,16 +107,16 @@ class TDTestCase: ...@@ -107,16 +107,16 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned': elif col_type.lower() == 'bigint unsigned':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bigint unsigned"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bigint unsigned"]})')
elif col_type.lower() == 'bool': elif col_type.lower() == 'bool':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bool"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bool"]})')
elif col_type.lower() == 'float': elif col_type.lower() == 'float':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["float"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["float"]})')
elif col_type.lower() == 'double': elif col_type.lower() == 'double':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["double"]})') tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["double"]})')
elif 'binary' in col_type.lower(): elif 'binary' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['binary']}")''') tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['binary']}")''')
elif 'nchar' in col_type.lower(): elif 'nchar' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['nchar']}")''') tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['nchar']}")''')
def data_check(self,dbname,tbname,tbnum,rownum,data,col_name,col_type): def data_check(self,dbname,tbname,tbnum,rownum,data,col_name,col_type):
if 'binary' in col_type.lower(): if 'binary' in col_type.lower():
self.update_data(dbname,f'{tbname}',tbnum,rownum,data['binary'],col_type) self.update_data(dbname,f'{tbname}',tbnum,rownum,data['binary'],col_type)
...@@ -170,10 +170,10 @@ class TDTestCase: ...@@ -170,10 +170,10 @@ class TDTestCase:
self.update_data_ntb() self.update_data_ntb()
self.update_data_ctb() self.update_data_ctb()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -141,8 +141,8 @@ class TDTestCase: ...@@ -141,8 +141,8 @@ class TDTestCase:
query_data = tdSql.queryResult query_data = tdSql.queryResult
# nest query for support max # nest query for support max
tdSql.query(f"select apercentile(c2+2,10)+1 from (select max(c1) c2 from {dbname}.stb1)") #tdSql.query(f"select apercentile(c2+2,10)+1 from (select max(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,31.000000000) #tdSql.checkData(0,0,31.000000000)
tdSql.query(f"select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.query(f"select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,7.560701700) tdSql.checkData(0,0,7.560701700)
tdSql.query(f"select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.query(f"select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
......
...@@ -175,7 +175,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M ...@@ -175,7 +175,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 # python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册