diff --git a/.gitmodules b/.gitmodules index 7c84eac8a4ee7529005855bc836387561c49ae2d..156226d54486c17e64b9c514e47e3a7dc3fe6942 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,9 @@ [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin url = https://github.com/taosdata/grafanaplugin +[submodule "tests/examples/rust"] + path = tests/examples/rust + url = https://github.com/songtianyi/tdengine-rust-bindings.git +[submodule "src/connector/hivemq-tdengine-extension"] + path = src/connector/hivemq-tdengine-extension + url = https://github.com/huskar-t/hivemq-tdengine-extension.git \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index ea50d6ef5a13fed2114868f8a36cd0acace78dd5..dc7836c3daacaa457f721f9278687b99770fc394 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -27,6 +27,7 @@ pipeline { cd debug cmake .. > /dev/null make > /dev/null + make install > /dev/null cd ${WKC}/tests #./test-all.sh smoke ./test-all.sh pytest @@ -79,7 +80,20 @@ pipeline { cmake .. > /dev/null make > /dev/null cd ${WKC}/tests/pytest - ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./handle_crash_gen_val_log.sh + ''' + } + sh ''' date cd ${WKC}/tests ./test-all.sh b2 @@ -124,14 +138,33 @@ pipeline { sh''' cd ${WORKSPACE} git checkout develop - cd tests/gotest - bash batchtest.sh - cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ - mvn clean package assembly:single >/dev/null - java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 - cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker - python3 PythonChecker.py ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/gotest + bash batchtest.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker + python3 PythonChecker.py + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ + mvn clean package assembly:single >/dev/null + java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# + dotnet run + ''' + } + } } @@ -139,5 +172,82 @@ pipeline { } } - + post { + success { + emailext ( + subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + } } \ No newline at end of file diff --git a/alert/README.md b/alert/README.md index 547f3a0381a74714b1f6c8c74b861678b3805619..b8b8c92a2797bf58ca9f80b1781fc1f5d7555f6b 100644 --- a/alert/README.md +++ b/alert/README.md @@ -61,7 +61,7 @@ The use of each configuration item is: * **port**: This is the `http` service port which enables other application to manage rules by `restful API`. * **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically). -* **tdengine**: connection string of `TDEngine` server, note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string. +* **tdengine**: connection string of `TDEngine` server (please refer the documentation of GO connector for the detailed format of this string), note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string. * **log > level**: log level, could be `production` or `debug`. * **log > path**: log output file path. * **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL. diff --git a/alert/README_cn.md b/alert/README_cn.md index 938b23a58406f5d6f279191a47dc957c446911ce..f659e997e3ea8ec8acef8cc3eb6e41a4c692a476 100644 --- a/alert/README_cn.md +++ b/alert/README_cn.md @@ -58,7 +58,7 @@ $ go build * **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。 * **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。 -* **tdengine**:`TDEngine` 的连接字符串,一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。 +* **tdengine**:`TDEngine` 的连接字符串(这个字符串的详细格式说明请见 GO 连接器的文档),一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。 * **log > level**:日志的记录级别,可选 `production` 或 `debug`。 * **log > path**:日志文件的路径。 * **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。 diff --git a/alert/app/rule.go b/alert/app/rule.go index 44596ca26d7da0ddec55dfabc2930c7166dc3547..236e5bd75562d90cac8bb5e20608dc08e1298f56 100644 --- a/alert/app/rule.go +++ b/alert/app/rule.go @@ -84,6 +84,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool { case firing && (alert.State == AlertStateWaiting): alert.StartsAt = time.Now() + alert.EndsAt = time.Time{} if rule.For.Nanoseconds() > 0 { alert.State = AlertStatePending return false @@ -95,6 +96,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool { return false } alert.StartsAt = alert.StartsAt.Add(rule.For.Duration) + alert.EndsAt = time.Time{} alert.State = AlertStateFiring case firing && (alert.State == AlertStateFiring): diff --git a/cmake/install.inc b/cmake/install.inc index dfca758b9362c96bec0ce45aa385d54a4e75a9e5..746e493a17cd09db6f8bffa4e3e4ac925fac2510 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -9,22 +9,23 @@ ELSEIF (TD_WINDOWS) ELSE () SET(CMAKE_INSTALL_PREFIX C:/TDengine) ENDIF () - + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taoserror.h DESTINATION include) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) - + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) + IF (TD_POWER) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) - ELSE () - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) + ELSE () + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .) ENDIF () diff --git a/cmake/version.inc b/cmake/version.inc index a248f76f48ede5f1f483943c08fce6604756b6d4..2f0ec81aea2dcc932845537bcb37f5ab64d9bcb8 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.6.0") + SET(TD_VER_NUMBER "2.0.8.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 44b3ad46712019870be6fefb5234611f55f6e03e..79388a2edb9404a0f7b31b9182eb5ce2cb0d52be 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 - httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。 - dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。 - logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。 +- tempDir:临时文件目录,客户端和服务器的临时文件(主要是查询时用于保存中间结果的问题)将写入该目录。 默认值:Linux下为 /tmp/,Windows下为环境变量 tmp 或 temp 指向的目录。 - arbitrator:系统中裁决器的end point, 缺省值为空。 - role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode - debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md index 077a0431383da8067b6bd33c4b7218627b7adbd0..f1f2d58f0562b831395e5177eb34ec3534aaa715 100644 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md @@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 - [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录 - [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入 - [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入 -- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 +- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将 MQTT 数据直接写入 +- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension,不用任何代码,就可将 MQTT 数据直接写入 ## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries) diff --git a/documentation20/webdocs/markdowndocs/Evaluation-ch.md b/documentation20/webdocs/markdowndocs/Evaluation-ch.md index 9e7e0ec6aa8b60bfa9dbe603a45a265a1d1eba00..a92f97a8d9dfc9a47d5554daa076f4d2f0774c92 100644 --- a/documentation20/webdocs/markdowndocs/Evaluation-ch.md +++ b/documentation20/webdocs/markdowndocs/Evaluation-ch.md @@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 * __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。 -* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。 +* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。 * __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 4082d72f112e8ff62d2e8b2c8b15391dd6f39d8a..760ebae4fc3b7ddd609d1bfb5689f51b05fc7cb7 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name REPLICA 2; ``` - REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。 + REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。 ```mysql ALTER DATABASE db_name KEEP 365; @@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 - **PERCENTILE** ```mysql - SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]; + SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; ``` 功能说明:统计表中某列的值百分比分位数。 返回结果数据类型: 双精度浮点数Double。 @@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER ``` ## TAOS SQL 边界限制 -- 数据库名最大长度为33 -- 表名最大长度为193,每行数据最大长度16k个字符 -- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳 +- 数据库名最大长度为32 +- 表名最大长度为192,每行数据最大长度16k个字符 +- 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳 - 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index 4b274e05e6e392879fc887ec1137968270a8e4b8..36466d2b7ea29307b5c17ad21b0db47a98598fa4 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景 Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable ``` -示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44851T。TDengine大概需要消耗44851/5=8970T, 8.9P空间。 +示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 @@ -253,7 +253,7 @@ ALTER USER PASS <'password'>; 修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 ``` -ALTER USER PRIVILEDGE ; +ALTER USER PRIVILEGE ; ``` 修改用户权限为:super/write/read,不需要添加单引号 diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md index d4705ccb05c092d8da38072368a167466bd78968..c9bfa30830fe7b2f3cd1364b589326255560ad83 100644 --- a/documentation20/webdocs/markdowndocs/architecture-ch.md +++ b/documentation20/webdocs/markdowndocs/architecture-ch.md @@ -4,16 +4,99 @@ ### 物联网典型场景 在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格: -| Device ID | Time Stamp | current | voltage | phase | location | groupId | -| :-------: | :-----------: | :-----: | :-----: | :---: | :--------------: | :-----: | -| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 | -| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 | -| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 | -| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 | -| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 | -| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 | -| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 | -| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 | +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
设备ID时间戳采集量标签
Device IDTime StampcurrentvoltagephaselocationgroupId
d1001153854868500010.32190.31Beijing.Chaoyang2
d1002153854868400010.22200.23Beijing.Chaoyang3
d1003153854868650011.52210.35Beijing.Haidian3
d1004153854868550013.42230.29Beijing.Haidian2
d1001153854869500012.62180.33Beijing.Chaoyang2
d1004153854869660011.82210.28Beijing.Haidian2
d1002153854869665010.32180.25Beijing.Chaoyang3
d1001153854869680012.32210.31Beijing.Chaoyang2
表1:智能电表数据示例
@@ -221,7 +304,7 @@ TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO), TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。 -每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当一半以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有一半内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 +每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 ### 持久化存储 TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。 diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 0e29b324871e61d184fde17710c863f06b60bdcc..c5a955f43fcc30e70d8aac3919433c2e767f76ba 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 - httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131 +## CSharp Connector + +在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 + +#### 安装TDengine客户端 + +C#连接器需要使用`libtaos.so`和`taos.h`。因此,在使用C#连接器之前,需在程序运行的Windows环境安装TDengine的Windows客户端,以便获得相关驱动文件。 + +安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件 + +- TDengineDriver.cs 调用taos.dll文件的Native C方法 +- TDengineTest.cs 参考程序示例 + +在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件 + +#### 使用方法 + +- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中 +- 参考TDengineTest.cs来定义数据库连接参数,及执行数据插入、查询等操作的方法 +- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中 + +#### 注意事项 + +- `taos.dll`文件使用x64平台编译,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请均选择“x64”。 +- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过,其它VS版本尚待验证。 + +#### 第三方驱动 + +Maikebing.Data.Taos是一个TDengine的ADO.Net提供器,支持linux,windows。该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考 + +``` +//接口下载 +https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos +//用法说明 +https://www.taosdata.com/blog/2020/11/02/1901.html +``` + ## Go Connector diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 80deb889ef0eee8e9b47f86b5e58a76c6c070d5b..757b6d9929b68a90e7bc558b553233f24b09ba17 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -38,9 +38,9 @@ 6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的 -7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里 +7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 -8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*) +8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*) 9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅 检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} ` diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/webdocs/markdowndocs/insert-ch.md index fa53cbd62b17169c0f54877a62da8c48ac21edcf..77ba596d4ec6de76a39b878e57ea255b081fea45 100644 --- a/documentation20/webdocs/markdowndocs/insert-ch.md +++ b/documentation20/webdocs/markdowndocs/insert-ch.md @@ -1,6 +1,6 @@ # 高效写入数据 -TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 +TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 ## SQL写入 @@ -218,7 +218,15 @@ use telegraf; select * from cpu; ``` -## EMQ X Broker直接写入 -MQTT是一流行的物联网数据传输协议,[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDEngine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。 + +MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。 + +## EMQ Broker 直接写入 + +[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。 + +## HiveMQ Broker 直接写入 + +[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 974b2b05c12f9fd5a482ef0072a9dacc2d1f09dc..ca88bca3c863dcfd4a95497fc572499dc93f31f9 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -20,6 +20,9 @@ # data file's directory # dataDir /var/lib/taos +# temporary file's directory +# tempDir /tmp/ + # the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only # arbitrator arbitrator_hostname:6042 @@ -256,3 +259,5 @@ # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden # maxBinaryDisplayWidth 30 +# enable/disable telemetry reporting +# telemetryReporting 1 \ No newline at end of file diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 450c6a8f551fe7dd8dff5e53f5dad639aec4656e..edc7de96923266729d1985cb43abfa3003f9178e 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver @@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector -cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector +cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ chmod 755 ${pkg_dir}/DEBIAN/* diff --git a/packaging/release.sh b/packaging/release.sh index 7542a5b4cafb69d5cee16bddfc9a5651eb717b92..68f947ccab3ef18a1b351b91a58db64a8f465c8e 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -156,9 +156,15 @@ build_time=$(date +"%F %R") # get commint id from git gitinfo=$(git rev-parse --verify HEAD) -enterprise_dir="${top_dir}/../enterprise" -cd ${enterprise_dir} -gitinfoOfInternal=$(git rev-parse --verify HEAD) + +if [[ "$verMode" == "cluster" ]]; then + enterprise_dir="${top_dir}/../enterprise" + cd ${enterprise_dir} + gitinfoOfInternal=$(git rev-parse --verify HEAD) +else + gitinfoOfInternal=NULL +fi + cd ${curr_dir} # 2. cmake executable file @@ -193,23 +199,35 @@ cd ${curr_dir} # 3. Call the corresponding script for packaging if [ "$osType" != "Darwin" ]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then - echo "====do deb package for the ubuntu system====" - output_dir="${top_dir}/debs" - if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} + ret='0' + command -v dpkg >/dev/null 2>&1 || { ret='1'; } + if [ "$ret" -eq 0 ]; then + echo "====do deb package for the ubuntu system====" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/deb + ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} + else + echo "==========dpkg command not exist, so not release deb package!!!" fi - ${csudo} mkdir -p ${output_dir} - cd ${script_dir}/deb - ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - - echo "====do rpm package for the centos system====" - output_dir="${top_dir}/rpms" - if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} + + ret='0' + command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } + if [ "$ret" -eq 0 ]; then + echo "====do rpm package for the centos system====" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/rpm + ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} + else + echo "==========rpmbuild command not exist, so not release rpm package!!!" fi - ${csudo} mkdir -p ${output_dir} - cd ${script_dir}/rpm - ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} fi echo "====do tar.gz package for all systems====" diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 4e40263dc4ebaf6b566d20890ecd97f64e160340..8c23ab802d54088dafc4b4c6a5d1b6241c881f2e 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -2,7 +2,7 @@ %define cfg_install_dir /etc/taos %define __strip /bin/true -Name: TDengine +Name: tdengine Version: %{_version} Release: 3%{?dist} Summary: tdengine from taosdata @@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include @@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector -cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector +cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples #Scripts executed before installation @@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : + #${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : diff --git a/packaging/tools/get_client.sh b/packaging/tools/get_client.sh new file mode 100755 index 0000000000000000000000000000000000000000..0d34ecb311fb4b941d6f6773d1c3c921a9bd9886 --- /dev/null +++ b/packaging/tools/get_client.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# + +log_dir=$1 +result_file=$2 + +if [ ! -n "$1" ];then + echo "Pleas input the director of taosdlog." + echo "usage: ./get_client.sh " + exit 1 +else + log_dir=$1 +fi + +if [ ! -n "$2" ];then + result_file=clientInfo.txt +else + result_file=$2 +fi + +grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file} diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index aedfb0a6834afbf3c0c8c822b056ea5705f8d2ff..ddf7114f08387c2277df47e9174948e479d432ba 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -312,7 +312,7 @@ function install_data() { } function install_connector() { - ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector + ${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/ } function install_examples() { diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 24586d3390fe5c4f38b6292442fed2936d5ea7a6..34a9bfaecb4d0366008ea9b78a60434297ea9f51 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -163,7 +163,7 @@ function install_log() { } function install_connector() { - ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector + ${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/ } function install_examples() { diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index eff70d8035af0291f6dc7040ec13632fec4fa3be..831012851ad70d05080bfae161c0b925d5215ae9 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -278,11 +278,11 @@ function install_service_on_sysvinit() { # Install taosd service if ((${os_type}==1)); then - ${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d - ${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + ${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd elif ((${os_type}==2)); then - ${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d - ${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd fi #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index e17c678f263cb6b7a0ccbc32250265b9bc5cbd0e..ee79a560407c650de8c511964f611cf8cacfd2d5 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else @@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index faa5a03f52b4c9b56981a6b1c0918e543262b3bb..fdb3e0e5cc8c7add9c8ea8cdc9224964b3c80153 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -76,8 +76,10 @@ if [ "$osType" != "Darwin" ]; then else cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin fi else cp ${bin_files} ${install_dir}/bin @@ -135,7 +137,7 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 75b45b544e0a4abbf709cc4c5b3a3b55dc315f0f..5ae5cbbcdc00c1fd862501a6eb0dccd3ac0fa007 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" @@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 2c02b99787c6d5ad6234de2319bf78b0b09d7e8a..13849484695f98a5fc206d88ec6a5de0cc930a9b 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -77,8 +77,10 @@ else cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin fi chmod a+x ${install_dir}/bin/* || : @@ -156,7 +158,7 @@ cp ${lib_files} ${install_dir}/driver connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index d91daaa5c44488e34dea7ec2ddec0863699446f2..00705fad778c065eddbd7cb65a2f9c5583a9997b 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -81,8 +81,10 @@ function install_lib() { ${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi } function install_bin() { @@ -121,8 +123,11 @@ function install_config() { echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}" echo echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do + #read firstEp + if exec < /dev/tty; then + read firstEp; + fi + while true; do if [ ! -z "$firstEp" ]; then # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 233b7a15b4a9383ee65dac8c9c0dc107fb66dd0a..059c0650c2bbba99694253a73b59d77cd16f06bf 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.6.0' +version: '2.0.8.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.6.0 + - usr/lib/libtaos.so.2.0.8.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f619edd221c005a8d8e707afa5271072b032f74a..a2600785c35a80d040b9f91137c630d7761cef7e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -20,6 +20,6 @@ ADD_SUBDIRECTORY(tsdb) ADD_SUBDIRECTORY(wal) ADD_SUBDIRECTORY(cq) ADD_SUBDIRECTORY(dnode) -ADD_SUBDIRECTORY(connector/odbc) +#ADD_SUBDIRECTORY(connector/odbc) ADD_SUBDIRECTORY(connector/jdbc) diff --git a/src/balance/src/balance.c b/src/balance/src/balance.c index 0e9bb85b25defd169fea8711d3e0b40304500de4..df78f4fe270d34d17e436e80322f6c81b68d0d2c 100644 --- a/src/balance/src/balance.c +++ b/src/balance/src/balance.c @@ -490,7 +490,7 @@ static bool balanceMontiorDropping() { if (pDnode->status == TAOS_DN_STATUS_OFFLINE) { if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue; - if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) continue; + if (dnodeIsMasterEp(pDnode->dnodeEp)) continue; if (mnodeGetDnodesNum() <= 1) continue; mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId, @@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() { if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) { pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT; - mInfo("dnode:%d, set to offline state, access seq:%d, last seq:%d", pDnode->dnodeId, tsAccessSquence, - pDnode->lastAccess); + mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence, + pDnode->lastAccess, pDnode->status); balanceSetVgroupOffline(pDnode); } } diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h index 5baa66a9e0229f35c431cea7a0d2dbb9e2ffb0e2..2c7c2f51d02ea1b9943004fc62f8e48f2034d0fe 100644 --- a/src/client/inc/tscLocalMerge.h +++ b/src/client/inc/tscLocalMerge.h @@ -62,11 +62,11 @@ typedef struct SLocalReducer { bool hasUnprocessedRow; tOrderDescriptor * pDesc; SColumnModel * resColModel; + SColumnModel* finalModel; tExtMemBuffer ** pExtMemBuffer; // disk-based buffer SFillInfo* pFillInfo; // interpolation support structure - char * pFinalRes; // result data after interpo - tFilePage * discardData; - SResultInfo * pResInfo; + char* pFinalRes; // result data after interpo + tFilePage* discardData; bool discard; int32_t offset; // limit offset value bool orderPrjOnSTable; // projection query on stable @@ -75,7 +75,8 @@ typedef struct SLocalReducer { typedef struct SRetrieveSupport { tExtMemBuffer ** pExtMemBuffer; // for build loser tree tOrderDescriptor *pOrderDescriptor; - SColumnModel * pFinalColModel; // colModel for final result + SColumnModel* pFinalColModel; // colModel for final result + SColumnModel* pFFColModel; int32_t subqueryIndex; // index of current vnode in vnode list SSqlObj * pParentSql; tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to @@ -83,9 +84,9 @@ typedef struct SRetrieveSupport { } SRetrieveSupport; int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc, - SColumnModel **pFinalModel, uint32_t nBufferSize); + SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSize); -void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel* pFFModel, int32_t numOfVnodes); int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, @@ -97,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF * create local reducer to launch the second-stage reduce process at client site */ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - SColumnModel *finalModel, SSqlObj* pSql); + SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql); void tscDestroyLocalReducer(SSqlObj *pSql); diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index bc01de110345e4c90cf5c15d3d7f6b010cb7308d..f7832c9818226d19ba1e20cb47fa17c5bfb0f611 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -23,7 +23,7 @@ extern "C" { #include "tscUtil.h" #include "tsclient.h" -void tscFetchDatablockFromSubquery(SSqlObj* pSql); +void tscFetchDatablockForSubquery(SSqlObj* pSql); void tscSetupOutputColumnIndex(SSqlObj* pSql); void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code); @@ -39,7 +39,9 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql); int32_t tscHandleInsertRetry(SSqlObj* pSql); void tscBuildResFromSubqueries(SSqlObj *pSql); -TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult); +TAOS_ROW doSetResultRowData(SSqlObj *pSql); + +char *getArithmeticInputSrc(void *param, const char *name, int32_t colId); #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 3df493349e387c1dd33a945886389218f7f7cf0f..bde27d2932a5bacd09864c76ee81faa6adef04a7 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -75,6 +75,7 @@ typedef struct SJoinSupporter { SArray* exprList; SFieldInfo fieldsInfo; STagCond tagCond; + SSqlGroupbyExpr groupInfo; // group by info struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array FILE* f; // temporary file in order to create TSBuf char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory @@ -82,11 +83,12 @@ typedef struct SJoinSupporter { char* pIdTagList; // result of first stage tags int32_t totalLen; int32_t num; + SArray* pVgroupTables; } SJoinSupporter; typedef struct SVgroupTableInfo { - SCMVgroupInfo vgInfo; - SArray* itemList; //SArray + SVgroupInfo vgInfo; + SArray* itemList; //SArray } SVgroupTableInfo; static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { @@ -123,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t */ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); bool tscIsTWAQuery(SQueryInfo* pQueryInfo); +bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); @@ -156,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index); void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); -void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); +void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); void tscFieldInfoClear(SFieldInfo* pFieldInfo); @@ -165,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); -void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes); int32_t tscGetResRowLength(SArray* pExprList); SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); @@ -215,7 +218,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex); void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache); STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta, - SVgroupsInfo* vgroupList, SArray* pTagCols); + SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables); STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo); int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); @@ -224,6 +227,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo); void tscClearSubqueryInfo(SSqlCmd* pCmd); void tscFreeVgroupTableInfo(SArray* pVgroupTables); +SArray* tscVgroupTableInfoClone(SArray* pVgroupTables); +void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index); +void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo); int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex); int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo); @@ -234,7 +240,7 @@ void tscDoQuery(SSqlObj* pSql); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo); -void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src); +void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src); /** * The create object function must be successful expect for the out of memory issue. * @@ -262,6 +268,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); +int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId); void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); @@ -275,6 +282,7 @@ int tscSetMgmtEpSetFromCfg(const char *first, const char *second); bool tscSetSqlOwner(SSqlObj* pSql); void tscClearSqlOwner(SSqlObj* pSql); +int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h index 67942ad42a0942756efe18e44eff711df59ba1d9..f6dc45398f35c38598f3f3132b2f6f5601a4ed68 100644 --- a/src/client/inc/tschemautil.h +++ b/src/client/inc/tschemautil.h @@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex); * @param colId * @return */ -SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId); +SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId); /** * check if the schema is valid or not, including following aspects: @@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema(); */ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size); -//todo tags value as well as the table id structure needs refactor -char *tsGetTagsValue(STableMeta *pMeta); - #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index fa215db2702f63aff4c704f8393255a94987d681..ff36cf0f5ac31ecf3baa4abae5697e08a1c8bc0b 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -30,6 +30,7 @@ extern "C" { #include "tsqlfunction.h" #include "tutil.h" #include "tcache.h" +#include "tref.h" #include "qExecutor.h" #include "qSqlparser.h" @@ -89,12 +90,12 @@ typedef struct STableComInfo { int32_t rowSize; } STableComInfo; -typedef struct SCMCorVgroupInfo { - int32_t version; - int8_t inUse; - int8_t numOfEps; - SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -} SCMCorVgroupInfo; +typedef struct SCorVgroupInfo { + int32_t version; + int8_t inUse; + int8_t numOfEps; + SEpAddr1 epAddr[TSDB_MAX_REPLICA]; +} SCorVgroupInfo; typedef struct STableMeta { STableComInfo tableInfo; @@ -102,8 +103,8 @@ typedef struct STableMeta { int16_t sversion; int16_t tversion; char sTableId[TSDB_TABLE_FNAME_LEN]; - SCMVgroupInfo vgroupInfo; - SCMCorVgroupInfo corVgroupInfo; + SVgroupInfo vgroupInfo; + SCorVgroupInfo corVgroupInfo; STableId id; SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info } STableMeta; @@ -127,7 +128,7 @@ typedef struct STableMetaInfo { typedef struct SSqlExpr { char aliasName[TSDB_COL_NAME_LEN]; // as aliasName SColIndex colInfo; - int64_t uid; // refactor use the pointer + uint64_t uid; // refactor use the pointer int16_t functionId; // function id in aAgg array int16_t resType; // return value type int16_t resBytes; // length of return value @@ -135,6 +136,7 @@ typedef struct SSqlExpr { int16_t numOfParams; // argument value of each function tVariant param[3]; // parameters are not more than 3 int32_t offset; // sub result column value of arithmetic expression. + int16_t resColId; // result column id } SSqlExpr; typedef struct SColumnIndex { @@ -250,6 +252,7 @@ typedef struct SQueryInfo { int64_t clauseLimit; // limit for current sub clause int64_t prjOffset; // offset value in the original sql expression, only applied at client side int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX + int16_t resColumnId; // result column id } SQueryInfo; typedef struct { @@ -290,32 +293,32 @@ typedef struct SResRec { } SResRec; typedef struct { - int64_t numOfRows; // num of results in current retrieved - int64_t numOfRowsGroup; // num of results of current group - int64_t numOfTotal; // num of total results - int64_t numOfClauseTotal; // num of total result in current subclause - char * pRsp; - int32_t rspType; - int32_t rspLen; - uint64_t qhandle; - int64_t uid; - int64_t useconds; - int64_t offset; // offset value from vnode during projection query of stable - int32_t row; - int16_t numOfCols; - int16_t precision; - bool completed; - int32_t code; - int32_t numOfGroups; - SResRec * pGroupRec; - char * data; - TAOS_ROW tsrow; - int32_t* length; // length for each field for current row - char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) - SColumnIndex * pColumnIndex; + int32_t numOfRows; // num of results in current retrieval + int64_t numOfRowsGroup; // num of results of current group + int64_t numOfTotal; // num of total results + int64_t numOfClauseTotal; // num of total result in current subclause + char * pRsp; + int32_t rspType; + int32_t rspLen; + uint64_t qhandle; + int64_t useconds; + int64_t offset; // offset value from vnode during projection query of stable + int32_t row; + int16_t numOfCols; + int16_t precision; + bool completed; + int32_t code; + int32_t numOfGroups; + SResRec * pGroupRec; + char * data; + TAOS_ROW tsrow; + TAOS_ROW urow; + int32_t* length; // length for each field for current row + char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) + SColumnIndex* pColumnIndex; + SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions - - struct SLocalReducer *pLocalReducer; + struct SLocalReducer* pLocalReducer; } SSqlRes; typedef struct STscObj { @@ -329,6 +332,7 @@ typedef struct STscObj { char writeAuth : 1; char superAuth : 1; uint32_t connId; + uint64_t rid; // ref ID returned by taosAddRef struct SSqlObj * pHb; struct SSqlObj * sqlList; struct SSqlStream *streamList; @@ -338,16 +342,16 @@ typedef struct STscObj { } STscObj; typedef struct SSubqueryState { - int32_t numOfRemain; // the number of remain unfinished subquery - int32_t numOfSub; // the number of total sub-queries - uint64_t numOfRetrievedRows; // total number of points in this query + int32_t numOfRemain; // the number of remain unfinished subquery + int32_t numOfSub; // the number of total sub-queries + uint64_t numOfRetrievedRows; // total number of points in this query } SSubqueryState; typedef struct SSqlObj { void *signature; pthread_t owner; // owner of sql object, by which it is executed STscObj *pTscObj; - void *pRpcCtx; + int64_t rpcRid; void (*fp)(); void (*fetchFp)(); void *param; @@ -421,6 +425,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo); void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo); int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); +void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo); void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache); @@ -430,14 +435,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache); */ void tscFreeSqlResult(SSqlObj *pSql); -/** - * only free part of resources allocated during query. - * TODO remove it later - * Note: this function is multi-thread safe. - * @param pObj - */ -void tscPartiallyFreeSqlObj(SSqlObj *pSql); - /** * free sql object, release allocated resource * @param pObj @@ -446,7 +443,7 @@ void tscFreeSqlObj(SSqlObj *pSql); void tscFreeRegisteredSqlObj(void *pSql); void tscFreeTableMetaHelper(void *pTableMeta); -void tscCloseTscObj(STscObj *pObj); +void tscCloseTscObj(void *pObj); // todo move to taos? or create a new file: taos_internal.h TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), @@ -468,17 +465,17 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { +static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) { SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex); - assert(pInfo->pSqlExpr != NULL); - int32_t type = pInfo->pSqlExpr->resType; - int32_t bytes = pInfo->pSqlExpr->resBytes; + int32_t type = pInfo->field.type; + int32_t bytes = pInfo->field.bytes; - char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row); + char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row); + UNUSED(pData); - // user defined constant value output columns - if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { +// user defined constant value output columns + if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { pData = pInfo->pSqlExpr->param[1].pz; pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; @@ -516,13 +513,14 @@ extern void * tscQhandle; extern int tscKeepConn[]; extern int tsInsertHeadSize; extern int tscNumOfThreads; +extern int tscRefId; extern SRpcCorEpSet tscMgmtEpSet; extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); -int32_t tscCompareTidTags(const void* p1, const void* p2); void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables); +int16_t getNewResColId(SQueryInfo* pQueryInfo); #ifdef __cplusplus } diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index eaea91d1bfb0197a36fe67d78a09ab190e7e3aeb..582bd6bac03ce0049c7c22fe58fa4fa6eb8c69fb 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -129,6 +129,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp (JNIEnv *, jobject, jlong, jlong, jobject); +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: fetchBlockImp + * Signature: (JJLcom/taosdata/jdbc/TSDBResultSetBlockData;)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp + (JNIEnv *, jobject, jlong, jlong, jobject); + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: closeConnectionImp diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 4e2272eb0566569754df5ef6201f3d190f2789c1..a8829499a324605036beefad62d83eccf4d1c65b 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -17,7 +17,6 @@ #include "taos.h" #include "tlog.h" #include "tscUtil.h" -#include "tsclient.h" #include "com_taosdata_jdbc_TSDBJNIConnector.h" @@ -57,6 +56,10 @@ jmethodID g_rowdataSetStringFp; jmethodID g_rowdataSetTimestampFp; jmethodID g_rowdataSetByteArrayFp; +jmethodID g_blockdataSetByteArrayFp; +jmethodID g_blockdataSetNumOfRowsFp; +jmethodID g_blockdataSetNumOfColsFp; + #define JNI_SUCCESS 0 #define JNI_TDENGINE_ERROR -1 #define JNI_CONNECTION_NULL -2 @@ -66,7 +69,7 @@ jmethodID g_rowdataSetByteArrayFp; #define JNI_FETCH_END -6 #define JNI_OUT_OF_MEMORY -7 -void jniGetGlobalMethod(JNIEnv *env) { +static void jniGetGlobalMethod(JNIEnv *env) { // make sure init function executed once switch (atomic_val_compare_exchange_32(&__init, 0, 1)) { case 0: @@ -114,10 +117,31 @@ void jniGetGlobalMethod(JNIEnv *env) { g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V"); (*env)->DeleteLocalRef(env, rowdataClass); + jclass blockdataClass = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBResultSetBlockData"); + jclass g_blockdataClass = (*env)->NewGlobalRef(env, blockdataClass); + g_blockdataSetByteArrayFp = (*env)->GetMethodID(env, g_blockdataClass, "setByteArray", "(II[B)V"); + g_blockdataSetNumOfRowsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfRows", "(I)V"); + g_blockdataSetNumOfColsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfCols", "(I)V"); + (*env)->DeleteLocalRef(env, blockdataClass); + atomic_store_32(&__init, 2); jniDebug("native method register finished"); } +static int32_t check_for_params(jobject jobj, jlong conn, jlong res) { + if ((TAOS*) conn == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + if ((TAOS_RES *) res == NULL) { + jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn); + return JNI_RESULT_SET_NULL; + } + + return JNI_SUCCESS; +} + JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode, jstring jPath, jboolean jAutoDump) { if (jPath != NULL) { @@ -192,39 +216,37 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost, jint jport, jstring jdbName, jstring juser, jstring jpass) { - jlong ret = 0; + jlong ret = 0; const char *host = NULL; - const char *dbname = NULL; const char *user = NULL; const char *pass = NULL; + const char *dbname = NULL; if (jhost != NULL) { host = (*env)->GetStringUTFChars(env, jhost, NULL); } + if (jdbName != NULL) { dbname = (*env)->GetStringUTFChars(env, jdbName, NULL); } + if (juser != NULL) { user = (*env)->GetStringUTFChars(env, juser, NULL); } + if (jpass != NULL) { pass = (*env)->GetStringUTFChars(env, jpass, NULL); } if (user == NULL) { - jniDebug("jobj:%p, user is null, use default user %s", jobj, TSDB_DEFAULT_USER); + jniDebug("jobj:%p, user not specified, use default user %s", jobj, TSDB_DEFAULT_USER); } + if (pass == NULL) { - jniDebug("jobj:%p, pass is null, use default password", jobj); + jniDebug("jobj:%p, pass not specified, use default password", jobj); } - /* - * set numOfThreadsPerCore = 0 - * means only one thread for client side scheduler - */ - tsNumOfThreadsPerCore = 0.0; - - ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport); + ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport); if (ret == 0) { jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, (char *)host, (char *)user, (char *)dbname, (int32_t)jport); @@ -233,10 +255,21 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn (char *)host, (char *)user, (char *)dbname, (int32_t)jport); } - if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); - if (dbname != NULL) (*env)->ReleaseStringUTFChars(env, jdbName, dbname); - if (user != NULL) (*env)->ReleaseStringUTFChars(env, juser, user); - if (pass != NULL) (*env)->ReleaseStringUTFChars(env, jpass, pass); + if (host != NULL) { + (*env)->ReleaseStringUTFChars(env, jhost, host); + } + + if (dbname != NULL) { + (*env)->ReleaseStringUTFChars(env, jdbName, dbname); + } + + if (user != NULL) { + (*env)->ReleaseStringUTFChars(env, juser, user); + } + + if (pass != NULL) { + (*env)->ReleaseStringUTFChars(env, jpass, pass); + } return ret; } @@ -245,64 +278,53 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp( jbyteArray jsql, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is already closed", jobj); + jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } if (jsql == NULL) { - jniError("jobj:%p, conn:%p, sql is null", jobj, tscon); + jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon); return JNI_SQL_NULL; } jsize len = (*env)->GetArrayLength(env, jsql); - char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); - if (dst == NULL) { - jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon); + char *str = (char *) calloc(1, sizeof(char) * (len + 1)); + if (str == NULL) { + jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon); return JNI_OUT_OF_MEMORY; } - (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst); + (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str); if ((*env)->ExceptionCheck(env)) { // todo handle error } - jniDebug("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst); - - SSqlObj *pSql = taos_query(tscon, dst); + SSqlObj *pSql = taos_query(tscon, str); int32_t code = taos_errno(pSql); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, tscon, tstrerror(code), taos_errstr(pSql)); } else { - int32_t affectRows = 0; if (pSql->cmd.command == TSDB_SQL_INSERT) { - affectRows = taos_affected_rows(pSql); + int32_t affectRows = taos_affected_rows(pSql); jniDebug("jobj:%p, conn:%p, code:%s, affect rows:%d", jobj, tscon, tstrerror(code), affectRows); } else { jniDebug("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); } } - free(dst); - return (jlong)pSql; + free(str); + return (jlong) pSql; } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) { - TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return (jint)TSDB_CODE_TSC_INVALID_CONNECTION; + int32_t code = check_for_params(jobj, con, tres); + if (code != JNI_SUCCESS) { + return code; } - if ((void *)tres == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; - } - - TAOS_RES *pSql = (TAOS_RES *)tres; - - return (jint)taos_errno(pSql); + return (jint)taos_errno((TAOS_RES*) tres); } JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) { @@ -313,23 +335,16 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) { TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return JNI_CONNECTION_NULL; - } - - if ((void *)tres == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; + int32_t code = check_for_params(jobj, con, tres); + if (code != JNI_SUCCESS) { + return code; } SSqlObj *pSql = (TAOS_RES *)tres; - STscObj *pObj = pSql->pTscObj; - if (tscIsUpdateQuery(pSql)) { - jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, pObj, (void *)tres); + jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, tscon, (void *)tres); } else { - jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, pObj, (void *)tres); + jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, tscon, (void *)tres); } return tres; @@ -337,15 +352,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp( JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) { - TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return JNI_CONNECTION_NULL; - } - - if ((void *)tres == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; + int32_t code = check_for_params(jobj, con, tres); + if (code != JNI_SUCCESS) { + return code; } SSqlObj *pSql = (TAOS_RES *)tres; @@ -355,37 +364,27 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp( JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con, jlong res) { - TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return JNI_CONNECTION_NULL; - } - - if ((void *)res == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; + int32_t code = check_for_params(jobj, con, res); + if (code != JNI_SUCCESS) { + return code; } taos_free_result((void *)res); - jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, tscon, (void *)res); + jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res); + return JNI_SUCCESS; } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con, jlong res) { TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return JNI_CONNECTION_NULL; - } - - if ((void *)res == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; + int32_t code = check_for_params(jobj, con, res); + if (code != JNI_SUCCESS) { + return code; } jint ret = taos_affected_rows((SSqlObj *)res); - jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret); + jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret); return ret; } @@ -394,27 +393,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData jlong con, jlong res, jobject arrayListObj) { TAOS *tscon = (TAOS *)con; - if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); - return JNI_CONNECTION_NULL; - } - - TAOS_RES *result = (TAOS_RES *)res; - if (result == NULL) { - jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon); - return JNI_RESULT_SET_NULL; + int32_t code = check_for_params(jobj, con, res); + if (code != JNI_SUCCESS) { + return code; } - TAOS_FIELD *fields = taos_fetch_fields(result); - int num_fields = taos_num_fields(result); - - // jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, ""); + TAOS_RES* tres = (TAOS_RES*) res; + TAOS_FIELD *fields = taos_fetch_fields(tres); + int32_t num_fields = taos_num_fields(tres); if (num_fields == 0) { - jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields); return JNI_NUM_OF_FIELDS_0; } else { - jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields); + jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields); for (int i = 0; i < num_fields; ++i) { jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp); (*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type); @@ -457,21 +449,21 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn } TAOS_FIELD *fields = taos_fetch_fields(result); - int num_fields = taos_num_fields(result); - if (num_fields == 0) { - jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields); + int32_t numOfFields = taos_num_fields(result); + if (numOfFields == 0) { + jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields); return JNI_NUM_OF_FIELDS_0; } TAOS_ROW row = taos_fetch_row(result); if (row == NULL) { - int tserrno = taos_errno(result); - if (tserrno == 0) { - jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, num_fields); + int code = taos_errno(result); + if (code == TSDB_CODE_SUCCESS) { + jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields); return JNI_FETCH_END; } else { - jniDebug("jobj:%p, conn:%p, interruptted query", jobj, tscon); + jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon); return JNI_RESULT_SET_NULL; } } @@ -480,7 +472,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; - for (int i = 0; i < num_fields; i++) { + for (int i = 0; i < numOfFields; i++) { if (row[i] == NULL) { continue; } @@ -534,6 +526,45 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn return JNI_SUCCESS; } +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con, + jlong res, jobject rowobj) { + TAOS * tscon = (TAOS *)con; + int32_t code = check_for_params(jobj, con, res); + if (code != JNI_SUCCESS) { + return code; + } + + TAOS_RES * tres = (TAOS_RES *)res; + TAOS_FIELD *fields = taos_fetch_fields(tres); + + int32_t numOfFields = taos_num_fields(tres); + assert(numOfFields > 0); + + TAOS_ROW row = NULL; + int32_t numOfRows = taos_fetch_block(tres, &row); + if (numOfRows == 0) { + code = taos_errno(tres); + if (code == JNI_SUCCESS) { + jniDebug("jobj:%p, conn:%p, resultset:%p, numOfFields:%d, no data to retrieve", jobj, tscon, (void *)res, + numOfFields); + return JNI_FETCH_END; + } else { + jniDebug("jobj:%p, conn:%p, query interrupted", jobj, tscon); + return JNI_RESULT_SET_NULL; + } + } + + (*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfRowsFp, (jint)numOfRows); + (*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields); + + for (int i = 0; i < numOfFields; i++) { + (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows, + jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows)); + } + + return JNI_SUCCESS; +} + JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionImp(JNIEnv *env, jobject jobj, jlong con) { TAOS *tscon = (TAOS *)con; @@ -589,7 +620,6 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEn jniGetGlobalMethod(env); TAOS_SUB *tsub = (TAOS_SUB *)sub; - TAOS_RES *res = taos_consume(tsub); if (res == NULL) { @@ -621,16 +651,16 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab jsize len = (*env)->GetArrayLength(env, jsql); - char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); - (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst); + char *str = (char *)calloc(1, sizeof(char) * (len + 1)); + (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str); if ((*env)->ExceptionCheck(env)) { // todo handle error } - int code = taos_validate_sql(tscon, dst); + int code = taos_validate_sql(tscon, str); jniDebug("jobj:%p, conn:%p, code is %d", jobj, tscon, code); - free(dst); + free(str); return code; } diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index c996bb2a76ff505fca4e09cd8763b324e5c4cb8d..3ff8a68d8f07f776dbb03b4b55b830b7e37dfdff 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -91,8 +91,8 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa int32_t sqlLen = (int32_t)strlen(sqlstr); if (sqlLen > tsMaxSQLStringLen) { tscError("sql string exceeds max length:%d", tsMaxSQLStringLen); - terrno = TSDB_CODE_TSC_INVALID_SQL; - tscQueueAsyncError(fp, param, TSDB_CODE_TSC_INVALID_SQL); + terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT; + tscQueueAsyncError(fp, param, terrno); return; } @@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo } if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { - tscFetchDatablockFromSubquery(pSql); + tscFetchDatablockForSubquery(pSql); } else { tscProcessSql(pSql); } @@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { // handle the sub queries of join query if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { - tscFetchDatablockFromSubquery(pSql); + tscFetchDatablockForSubquery(pSql); } else if (pRes->completed) { if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) { if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. @@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); if (pSup->pSqlExpr != NULL) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); + tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0); } else { // todo add } @@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlRes *pRes = &pSql->res; pRes->code = code; - const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; + SSqlObj *sub = (SSqlObj*) res; + const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; if (code != TSDB_CODE_SUCCESS) { tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); goto _error; @@ -427,8 +428,12 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } else { assert(code == TSDB_CODE_SUCCESS); } - - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL); + + // param already freed by other routine and pSql in tscCache when ctrl + c + if (atomic_load_ptr(&pSql->param) == NULL) { + return; + } + assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSql; @@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); + // tscProcessSql can add error into async res + tscProcessSql(pSql); + return; + } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) { + tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + code = tscGetTableMeta(pSql, pTableMetaInfo); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + return; + } else { + assert(code == TSDB_CODE_SUCCESS); + } + + assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); // tscProcessSql can add error into async res tscProcessSql(pSql); return; @@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { tscResetSqlCmdObj(pCmd, false); code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } else if (code != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 1b4f92d3fc9fc78951e23eaec4c438d7522bf7f1..d39b833374b4891358053788a1afbf9246dfb0dd 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -28,6 +28,7 @@ #include "tscompression.h" #include "tsqlfunction.h" #include "tutil.h" +#include "ttype.h" #define GET_INPUT_CHAR(x) (((char *)((x)->aInputElemBuf)) + ((x)->startOffset) * ((x)->inputBytes)) #define GET_INPUT_CHAR_INDEX(x, y) (GET_INPUT_CHAR(x) + (y) * (x)->inputBytes) @@ -99,7 +100,7 @@ typedef struct SSumInfo { // the attribute of hasResult is not needed since the num attribute would server as this purpose typedef struct SAvgInfo { double sum; - int64_t num; // num servers as the hasResult attribute in other struct + int64_t num; } SAvgInfo; typedef struct SStddevInfo { @@ -129,11 +130,11 @@ typedef struct STopBotInfo { } STopBotInfo; // leastsquares do not apply to super table -typedef struct SLeastsquareInfo { +typedef struct SLeastsquaresInfo { double mat[2][3]; double startVal; int64_t num; -} SLeastsquareInfo; +} SLeastsquaresInfo; typedef struct SAPercentileInfo { SHistogramInfo *pHisto; @@ -167,7 +168,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) { *type = (int16_t)dataType; *bytes = (int16_t)dataBytes; - *interBytes = *bytes + sizeof(SResultInfo); + + if (functionId == TSDB_FUNC_INTERP) { + *interBytes = sizeof(SInterpInfoDetail); + } else { + *interBytes = 0; + } + return TSDB_CODE_SUCCESS; } @@ -175,21 +182,21 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct *type = TSDB_DATA_TYPE_BINARY; *bytes = (int16_t)(dataBytes + sizeof(int16_t) + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t) + VARSTR_HEADER_SIZE); - *interBytes = *bytes; + *interBytes = 0; return TSDB_CODE_SUCCESS; } if (functionId == TSDB_FUNC_COUNT) { *type = TSDB_DATA_TYPE_BIGINT; *bytes = sizeof(int64_t); - *interBytes = *bytes; + *interBytes = 0; return TSDB_CODE_SUCCESS; } if (functionId == TSDB_FUNC_ARITHM) { *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); - *interBytes = *bytes; + *interBytes = 0; return TSDB_CODE_SUCCESS; } @@ -298,7 +305,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) { *type = (int16_t)dataType; *bytes = (int16_t)dataBytes; - *interBytes = dataBytes + sizeof(SResultInfo); + *interBytes = dataBytes; } else if (functionId == TSDB_FUNC_SPREAD) { *type = (int16_t)TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); @@ -309,8 +316,8 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = (int16_t)sizeof(SPercentileInfo); } else if (functionId == TSDB_FUNC_LEASTSQR) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string - *interBytes = *bytes + sizeof(SResultInfo); + *bytes = MAX(TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE, sizeof(SLeastsquaresInfo)); // string + *interBytes = *bytes; } else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { *type = TSDB_DATA_TYPE_BINARY; *bytes = (int16_t)(dataBytes + sizeof(SFirstLastInfo)); @@ -334,28 +341,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } -void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf) { - assert(pResInfo->interResultBuf == NULL); - - pResInfo->bufLen = size; - pResInfo->superTableQ = superTable; - pResInfo->interResultBuf = buf; -} - // set the query flag to denote that query is completed static void no_next_step(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->complete = true; } static bool function_setup(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->initialized) { return false; } memset(pCtx->aOutputBuf, 0, (size_t)pCtx->outputBytes); - initResultInfo(pResInfo); + initResultInfo(pResInfo, pCtx->interBufBytes); return true; } @@ -367,7 +366,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) { * @param pCtx */ static void function_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult != DATA_SET_FLAG) { if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { setVardataNull(pCtx->aOutputBuf, pCtx->outputType); @@ -431,7 +430,7 @@ static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) { *((int64_t *)pCtx->aOutputBuf) += 1; // do not need it actually - SResultInfo *pInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; } @@ -592,8 +591,8 @@ static void sum_function(SQLFunctionCtx *pCtx) { do_sum(pCtx); // keep the result data in output buffer, not in the intermediate buffer - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { // set the flag for super table query SSumInfo *pSum = (SSumInfo *)pCtx->aOutputBuf; pSum->hasResult = DATA_SET_FLAG; @@ -604,8 +603,8 @@ static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) { do_sum_f(pCtx, index); // keep the result data in output buffer, not in the intermediate buffer - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { SSumInfo *pSum = (SSumInfo *)pCtx->aOutputBuf; pSum->hasResult = DATA_SET_FLAG; } @@ -615,8 +614,7 @@ static int32_t sum_merge_impl(const SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; GET_TRUE_DATA_TYPE(); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + assert(pCtx->stableQuery); for (int32_t i = 0; i < pCtx->size; ++i) { char * input = GET_INPUT_CHAR_INDEX(pCtx, i); @@ -661,7 +659,7 @@ static void sum_func_second_merge(SQLFunctionCtx *pCtx) { int32_t notNullElems = sum_merge_impl(pCtx); SET_VAL(pCtx, notNullElems, 1); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (notNullElems > 0) { pResInfo->hasResult = DATA_SET_FLAG; @@ -683,7 +681,7 @@ static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, i } // no result for first query, data block is required - if (GET_RES_INFO(pCtx)->numOfRes <= 0) { + if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { return BLK_DATA_ALL_NEEDED; } else { return BLK_DATA_NO_NEEDED; @@ -695,7 +693,7 @@ static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, in return BLK_DATA_NO_NEEDED; } - if (GET_RES_INFO(pCtx)->numOfRes <= 0) { + if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { return BLK_DATA_ALL_NEEDED; } else { return BLK_DATA_NO_NEEDED; @@ -755,9 +753,9 @@ static void avg_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; // NOTE: keep the intermediate result into the interResultBuf - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SAvgInfo *pAvgInfo = (SAvgInfo *)pResInfo->interResultBuf; + SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); double * pVal = &pAvgInfo->sum; if (pCtx->preAggVals.isSet) { @@ -800,8 +798,8 @@ static void avg_function(SQLFunctionCtx *pCtx) { } // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SAvgInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); } } @@ -814,9 +812,9 @@ static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); // NOTE: keep the intermediate result into the interResultBuf - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SAvgInfo *pAvgInfo = (SAvgInfo *)pResInfo->interResultBuf; + SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { pAvgInfo->sum += GET_INT8_VAL(pData); @@ -839,16 +837,16 @@ static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) { pResInfo->hasResult = DATA_SET_FLAG; // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SAvgInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); } } static void avg_func_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pCtx->stableQuery); - SAvgInfo *pAvgInfo = (SAvgInfo *)pResInfo->interResultBuf; + SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); char * input = GET_INPUT_CHAR(pCtx); for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { @@ -864,12 +862,12 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) { // if the data set hasResult is not set, the result is null if (pAvgInfo->num > 0) { pResInfo->hasResult = DATA_SET_FLAG; - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SAvgInfo)); + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); } } static void avg_func_second_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); double *sum = (double*) pCtx->aOutputBuf; char * input = GET_INPUT_CHAR(pCtx); @@ -883,7 +881,7 @@ static void avg_func_second_merge(SQLFunctionCtx *pCtx) { *sum += pInput->sum; // keep the number of data into the temp buffer - *(int64_t *)pResInfo->interResultBuf += pInput->num; + *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num; } } @@ -891,21 +889,21 @@ static void avg_func_second_merge(SQLFunctionCtx *pCtx) { * the average value is calculated in finalize routine, since current routine does not know the exact number of points */ static void avg_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); - if (GET_INT64_VAL(pResInfo->interResultBuf) <= 0) { + if (GET_INT64_VAL(GET_ROWCELL_INTERBUF(pResInfo)) <= 0) { setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); return; // empty table } - *(double *)pCtx->aOutputBuf = (*(double *)pCtx->aOutputBuf) / *(int64_t *)pResInfo->interResultBuf; + *(double *)pCtx->aOutputBuf = (*(double *)pCtx->aOutputBuf) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo); } else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY assert(pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_DOUBLE); - SAvgInfo *pAvgInfo = (SAvgInfo *)pResInfo->interResultBuf; + SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pAvgInfo->num == 0) { // all data are NULL or empty table setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); @@ -1116,11 +1114,11 @@ static void min_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); if (notNullElems > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; // set the flag for super table query - if (pResInfo->superTableQ) { + if (pCtx->stableQuery) { *(pCtx->aOutputBuf + pCtx->inputBytes) = DATA_SET_FLAG; } } @@ -1133,11 +1131,11 @@ static void max_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); if (notNullElems > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; // set the flag for super table query - if (pResInfo->superTableQ) { + if (pCtx->stableQuery) { *(pCtx->aOutputBuf + pCtx->inputBytes) = DATA_SET_FLAG; } } @@ -1148,8 +1146,7 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp GET_TRUE_DATA_TYPE(); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + assert(pCtx->stableQuery); for (int32_t i = 0; i < pCtx->size; ++i) { char *input = GET_INPUT_CHAR_INDEX(pCtx, i); @@ -1210,7 +1207,7 @@ static void min_func_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); - if (notNullElems > 0) { // for super table query, SResultInfo is not used + if (notNullElems > 0) { // for super table query, SResultRowCellInfo is not used char *flag = pCtx->aOutputBuf + pCtx->inputBytes; *flag = DATA_SET_FLAG; } @@ -1221,7 +1218,7 @@ static void min_func_second_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (notNullElems > 0) { pResInfo->hasResult = DATA_SET_FLAG; } @@ -1242,7 +1239,7 @@ static void max_func_second_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, numOfElem, 1); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (numOfElem > 0) { pResInfo->hasResult = DATA_SET_FLAG; } @@ -1297,8 +1294,8 @@ static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); minMax_function_f(pCtx, index, 0); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { char *flag = pCtx->aOutputBuf + pCtx->inputBytes; *flag = DATA_SET_FLAG; } @@ -1313,8 +1310,8 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); minMax_function_f(pCtx, index, 1); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pResInfo->superTableQ) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { char *flag = pCtx->aOutputBuf + pCtx->inputBytes; *flag = DATA_SET_FLAG; } @@ -1330,7 +1327,7 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void stddev_function(SQLFunctionCtx *pCtx) { // the second stage to calculate standard deviation - SStddevInfo *pStd = GET_RES_INFO(pCtx)->interResultBuf; + SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pStd->stage == 0) { // the first stage is to calculate average value avg_function(pCtx); @@ -1381,8 +1378,8 @@ static void stddev_function(SQLFunctionCtx *pCtx) { static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { // the second stage to calculate standard deviation - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); /* the first stage is to calculate average value */ if (pStd->stage == 0) { @@ -1433,8 +1430,8 @@ static void stddev_next_step(SQLFunctionCtx *pCtx) { * the stddevInfo and the average info struct share the same buffer area * And the position of each element in their struct is exactly the same matched */ - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); if (pStd->stage == 0) { /* @@ -1449,7 +1446,7 @@ static void stddev_next_step(SQLFunctionCtx *pCtx) { pResInfo->initialized = true; // set it initialized to avoid re-initialization // save average value into tmpBuf, for second stage scan - SAvgInfo *pAvg = pResInfo->interResultBuf; + SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); pStd->avg = GET_DOUBLE_VAL(pCtx->aOutputBuf); assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); @@ -1459,7 +1456,7 @@ static void stddev_next_step(SQLFunctionCtx *pCtx) { } static void stddev_finalizer(SQLFunctionCtx *pCtx) { - SStddevInfo *pStd = (SStddevInfo *)GET_RES_INFO(pCtx)->interResultBuf; + SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pStd->num <= 0) { setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); @@ -1505,7 +1502,7 @@ static void first_function(SQLFunctionCtx *pCtx) { TSKEY k = pCtx->ptsList[i]; DO_UPDATE_TAG_COLUMNS(pCtx, k); - SResultInfo *pInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; pInfo->complete = true; @@ -1532,7 +1529,7 @@ static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY ts = pCtx->ptsList[index]; DO_UPDATE_TAG_COLUMNS(pCtx, ts); - SResultInfo *pInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; pInfo->complete = true; // get the first not-null data, completed } @@ -1576,7 +1573,7 @@ static void first_dist_function(SQLFunctionCtx *pCtx) { first_data_assign_impl(pCtx, data, i); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; notNullElems++; @@ -1604,8 +1601,7 @@ static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void first_dist_func_merge(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_CHAR(pCtx); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pCtx->size == 1 && pResInfo->superTableQ); + assert(pCtx->size == 1 && pCtx->stableQuery); SFirstLastInfo *pInput = (SFirstLastInfo *)(pData + pCtx->inputBytes); if (pInput->hasResult != DATA_SET_FLAG) { @@ -1620,8 +1616,8 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) { } static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) { - assert(pCtx->resultInfo->superTableQ); - + assert(pCtx->stableQuery); + char * pData = GET_INPUT_CHAR(pCtx); SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes); if (pInput->hasResult != DATA_SET_FLAG) { @@ -1668,7 +1664,7 @@ static void last_function(SQLFunctionCtx *pCtx) { TSKEY ts = pCtx->ptsList[i]; DO_UPDATE_TAG_COLUMNS(pCtx, ts); - SResultInfo *pInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; pInfo->complete = true; // set query completed on this column @@ -1691,7 +1687,7 @@ static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY ts = pCtx->ptsList[index]; DO_UPDATE_TAG_COLUMNS(pCtx, ts); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; pResInfo->complete = true; // set query completed } @@ -1740,7 +1736,7 @@ static void last_dist_function(SQLFunctionCtx *pCtx) { last_data_assign_impl(pCtx, data, i); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; notNullElems++; @@ -1776,8 +1772,7 @@ static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void last_dist_func_merge(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_CHAR(pCtx); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pCtx->size == 1 && pResInfo->superTableQ); + assert(pCtx->size == 1 && pCtx->stableQuery); // the input data is null SFirstLastInfo *pInput = (SFirstLastInfo *)(pData + pCtx->inputBytes); @@ -1833,11 +1828,11 @@ static void last_row_function(SQLFunctionCtx *pCtx) { // assign the last element in current data block assignVal(pCtx->aOutputBuf, pData + (pCtx->size - 1) * pCtx->inputBytes, pCtx->inputBytes, pCtx->inputType); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; // set the result to final result buffer in case of super table query - if (pResInfo->superTableQ) { + if (pCtx->stableQuery) { SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->aOutputBuf + pCtx->inputBytes); pInfo1->ts = pCtx->ptsList[pCtx->size - 1]; pInfo1->hasResult = DATA_SET_FLAG; @@ -1852,7 +1847,7 @@ static void last_row_function(SQLFunctionCtx *pCtx) { static void last_row_finalizer(SQLFunctionCtx *pCtx) { // do nothing at the first stage - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult != DATA_SET_FLAG) { if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { setVardataNull(pCtx->aOutputBuf, pCtx->outputType); @@ -2044,8 +2039,8 @@ static int32_t resDataAscComparFn(const void *pLeft, const void *pRight) { static int32_t resDataDescComparFn(const void *pLeft, const void *pRight) { return -resDataAscComparFn(pLeft, pRight); } static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STopBotInfo *pRes = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STopBotInfo *pRes = GET_ROWCELL_INTERBUF(pResInfo); tValuePair **tvp = pRes->res; @@ -2123,7 +2118,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { } } - taosTFree(pData); + tfree(pData); } /* @@ -2135,18 +2130,18 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { * top/bottom use the intermediate result buffer to keep the intermediate result */ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); // only the first_stage_merge is directly written data into final output buffer - if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { + if (pCtx->stableQuery && pCtx->currentStage != SECONDARY_STAGE_MERGE) { return (STopBotInfo*) pCtx->aOutputBuf; } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer - return pResInfo->interResultBuf; + return GET_ROWCELL_INTERBUF(pResInfo); } } bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo == NULL) { return true; } @@ -2252,7 +2247,7 @@ static void top_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); if (notNullElems > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } } @@ -2270,7 +2265,7 @@ static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) { do_top_function_add(pRes, (int32_t)pCtx->param[0].i64Key, pData, pCtx->ptsList[index], pCtx->inputType, &pCtx->tagInfo, NULL, 0); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } @@ -2285,8 +2280,7 @@ static void top_func_merge(SQLFunctionCtx *pCtx) { // remmap the input buffer may cause the struct pointer invalid, so rebuild the STopBotInfo is necessary buildTopBotStruct(pInput, pCtx); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ && pCtx->outputType == TSDB_DATA_TYPE_BINARY && pCtx->size == 1); + assert(pCtx->stableQuery && pCtx->outputType == TSDB_DATA_TYPE_BINARY && pCtx->size == 1); STopBotInfo *pOutput = getTopBotOutputInfo(pCtx); @@ -2314,7 +2308,7 @@ static void top_func_second_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pInput->num, pOutput->num); if (pOutput->num > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } } @@ -2343,7 +2337,7 @@ static void bottom_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); if (notNullElems > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } } @@ -2359,7 +2353,7 @@ static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64Key, pData, pCtx->ptsList[index], pCtx->inputType, &pCtx->tagInfo, NULL, 0); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } @@ -2374,8 +2368,7 @@ static void bottom_func_merge(SQLFunctionCtx *pCtx) { // remmap the input buffer may cause the struct pointer invalid, so rebuild the STopBotInfo is necessary buildTopBotStruct(pInput, pCtx); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ && pCtx->outputType == TSDB_DATA_TYPE_BINARY && pCtx->size == 1); + assert(pCtx->stableQuery && pCtx->outputType == TSDB_DATA_TYPE_BINARY && pCtx->size == 1); STopBotInfo *pOutput = getTopBotOutputInfo(pCtx); @@ -2403,16 +2396,16 @@ static void bottom_func_second_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pInput->num, pOutput->num); if (pOutput->num > 0) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; } } static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); // data in temporary list is less than the required number of results, not enough qualified number of results - STopBotInfo *pRes = pResInfo->interResultBuf; + STopBotInfo *pRes = GET_ROWCELL_INTERBUF(pResInfo); if (pRes->num == 0) { // no result assert(pResInfo->hasResult != DATA_SET_FLAG); // TODO: @@ -2443,8 +2436,8 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) { } // in the first round, get the min-max value of all involved data - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); SET_DOUBLE_VAL(&pInfo->minval, DBL_MAX); SET_DOUBLE_VAL(&pInfo->maxval, -DBL_MAX); pInfo->numOfElems = 0; @@ -2455,18 +2448,28 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) { static void percentile_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // the first stage, only acquire the min/max value if (pInfo->stage == 0) { if (pCtx->preAggVals.isSet) { - if (GET_DOUBLE_VAL(&pInfo->minval) > pCtx->preAggVals.statis.min) { - SET_DOUBLE_VAL(&pInfo->minval, (double)pCtx->preAggVals.statis.min); + double tmin = 0.0, tmax = 0.0; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + tmin = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.min); + tmax = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.max); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + tmin = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.min); + tmax = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.max); + } else { + assert(true); + } + if (GET_DOUBLE_VAL(&pInfo->minval) > tmin) { + SET_DOUBLE_VAL(&pInfo->minval, tmin); } - if (GET_DOUBLE_VAL(&pInfo->maxval) < pCtx->preAggVals.statis.max) { - SET_DOUBLE_VAL(&pInfo->maxval, (double)pCtx->preAggVals.statis.max); + if (GET_DOUBLE_VAL(&pInfo->maxval) < tmax) { + SET_DOUBLE_VAL(&pInfo->maxval, tmax); } pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull); @@ -2477,28 +2480,8 @@ static void percentile_function(SQLFunctionCtx *pCtx) { continue; } - // TODO extract functions double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(data); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(data); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(data)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(data); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(data); - break; - default: - v = GET_INT32_VAL(data); - break; - } + GET_TYPED_DATA(v, double, pCtx->inputType, data); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2536,33 +2519,13 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { return; } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - - SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->stage == 0) { - // TODO extract functions + double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(pData)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(pData); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(pData); - break; - default: - v = GET_INT32_VAL(pData); - break; - } + GET_TYPED_DATA(v, double, pCtx->inputType, pData); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2585,8 +2548,8 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void percentile_finalizer(SQLFunctionCtx *pCtx) { double v = pCtx->param[0].nType == TSDB_DATA_TYPE_INT ? pCtx->param[0].i64Key : pCtx->param[0].dKey; - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - tMemBucket * pMemBucket = ((SPercentileInfo *)pResInfo->interResultBuf)->pMemBucket; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + tMemBucket * pMemBucket = ((SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo))->pMemBucket; if (pMemBucket->total > 0) { // check for null *(double *)pCtx->aOutputBuf = getPercentile(pMemBucket, v); @@ -2599,8 +2562,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { } static void percentile_next_step(SQLFunctionCtx *pCtx) { - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->stage == 0) { // all data are null, set it completed @@ -2617,12 +2580,12 @@ static void percentile_next_step(SQLFunctionCtx *pCtx) { ////////////////////////////////////////////////////////////////////////////////// static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { + if (pCtx->stableQuery && pCtx->currentStage != SECONDARY_STAGE_MERGE) { return (SAPercentileInfo*) pCtx->aOutputBuf; } else { - return pResInfo->interResultBuf; + return GET_ROWCELL_INTERBUF(pResInfo); } } @@ -2641,7 +2604,7 @@ static bool apercentile_function_setup(SQLFunctionCtx *pCtx) { static void apercentile_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - SResultInfo * pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); SAPercentileInfo *pInfo = getAPerctInfo(pCtx); for (int32_t i = 0; i < pCtx->size; ++i) { @@ -2651,29 +2614,9 @@ static void apercentile_function(SQLFunctionCtx *pCtx) { } notNullElems += 1; + double v = 0; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(data); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(data); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(data)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(data); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(data); - break; - default: - v = GET_INT32_VAL(data); - break; - } - + GET_TYPED_DATA(v, double, pCtx->inputType, data); tHistogramAdd(&pInfo->pHisto, v); } @@ -2694,30 +2637,11 @@ static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { return; } - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SAPercentileInfo *pInfo = getAPerctInfo(pCtx); // pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SAPercentileInfo *pInfo = getAPerctInfo(pCtx); double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(pData)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(pData); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(pData); - break; - default: - v = GET_INT32_VAL(pData); - break; - } + GET_TYPED_DATA(v, double, pCtx->inputType, pData); tHistogramAdd(&pInfo->pHisto, v); @@ -2726,8 +2650,8 @@ static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { } static void apercentile_func_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pCtx->stableQuery); SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx); @@ -2771,20 +2695,21 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { } SAPercentileInfo *pOutput = getAPerctInfo(pCtx); - SHistogramInfo * pHisto = pOutput->pHisto; + SHistogramInfo *pHisto = pOutput->pHisto; if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { + //TODO(dengyihao): avoid memcpy pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); - SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); - tHistogramDestroy(&pOutput->pHisto); - pOutput->pHisto = pRes; + memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); + tHistogramDestroy(&pRes); } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; SET_VAL(pCtx, 1, 1); } @@ -2792,8 +2717,8 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { static void apercentile_finalizer(SQLFunctionCtx *pCtx) { double v = (pCtx->param[0].nType == TSDB_DATA_TYPE_INT) ? pCtx->param[0].i64Key : pCtx->param[0].dKey; - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SAPercentileInfo *pOutput = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SAPercentileInfo *pOutput = GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { if (pResInfo->hasResult == DATA_SET_FLAG) { // check for null @@ -2830,8 +2755,8 @@ static bool leastsquares_function_setup(SQLFunctionCtx *pCtx) { return false; } - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SLeastsquareInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // 2*3 matrix pInfo->startVal = pCtx->param[0].dKey; @@ -2857,8 +2782,8 @@ static bool leastsquares_function_setup(SQLFunctionCtx *pCtx) { } static void leastsquares_function(SQLFunctionCtx *pCtx) { - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SLeastsquareInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); double(*param)[3] = pInfo->mat; double x = pInfo->startVal; @@ -2928,40 +2853,40 @@ static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { return; } - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SLeastsquareInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); double(*param)[3] = pInfo->mat; switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { int32_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; }; case TSDB_DATA_TYPE_TINYINT: { int8_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; } case TSDB_DATA_TYPE_SMALLINT: { int16_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; } case TSDB_DATA_TYPE_BIGINT: { int64_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; } case TSDB_DATA_TYPE_FLOAT: { float *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; } case TSDB_DATA_TYPE_DOUBLE: { double *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, index, pCtx->param[1].dKey); + LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; } default: @@ -2978,16 +2903,11 @@ static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { // no data in query - SResultInfo * pResInfo = GET_RES_INFO(pCtx); - SLeastsquareInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->num == 0) { - if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->aOutputBuf, pCtx->outputType); - } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); - } - + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); return; } @@ -3044,7 +2964,7 @@ static void col_project_function(SQLFunctionCtx *pCtx) { } static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0 return; } @@ -3476,7 +3396,7 @@ static bool spread_function_setup(SQLFunctionCtx *pCtx) { return false; } - SSpreadInfo *pInfo = GET_RES_INFO(pCtx)->interResultBuf; + SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); // this is the server-side setup function in client-side, the secondary merge do not need this procedure if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { @@ -3491,8 +3411,8 @@ static bool spread_function_setup(SQLFunctionCtx *pCtx) { } static void spread_function(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SSpreadInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t numOfElems = 0; @@ -3558,8 +3478,8 @@ static void spread_function(SQLFunctionCtx *pCtx) { } // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SSpreadInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); } } @@ -3571,8 +3491,8 @@ static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SSpreadInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); double val = 0.0; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { @@ -3601,16 +3521,16 @@ static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { pResInfo->hasResult = DATA_SET_FLAG; pInfo->hasResult = DATA_SET_FLAG; - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SSpreadInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); } } void spread_func_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pCtx->stableQuery); - SSpreadInfo *pResData = pResInfo->interResultBuf; + SSpreadInfo *pResData = GET_ROWCELL_INTERBUF(pResInfo); int32_t notNullElems = 0; for (int32_t i = 0; i < pCtx->size; ++i) { @@ -3634,7 +3554,7 @@ void spread_func_merge(SQLFunctionCtx *pCtx) { } if (notNullElems > 0) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SSpreadInfo)); + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); pResInfo->hasResult = DATA_SET_FLAG; } } @@ -3665,7 +3585,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { * here we do not check the input data types, because in case of metric query, * the type of intermediate data is binary */ - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); @@ -3680,7 +3600,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { assert((pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_DOUBLE) || (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP)); - SSpreadInfo *pInfo = GET_RES_INFO(pCtx)->interResultBuf; + SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pInfo->hasResult != DATA_SET_FLAG) { setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); return; @@ -3704,8 +3624,8 @@ static bool twa_function_setup(SQLFunctionCtx *pCtx) { return false; } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; - STwaInfo * pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; + STwaInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->lastKey = INT64_MIN; pInfo->type = pCtx->inputType; @@ -3744,8 +3664,8 @@ static void twa_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo * pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STwaInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t i = 0; @@ -3798,7 +3718,7 @@ static void twa_function(SQLFunctionCtx *pCtx) { pResInfo->hasResult = DATA_SET_FLAG; } - if (pResInfo->superTableQ) { + if (pCtx->stableQuery) { memcpy(pCtx->aOutputBuf, pInfo, sizeof(STwaInfo)); } @@ -3815,8 +3735,8 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY *primaryKey = pCtx->ptsList; - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->lastKey == INT64_MIN) { pInfo->lastKey = pCtx->nStartQueryTimestamp; @@ -3838,14 +3758,13 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { // pCtx->numOfIteratedElems += 1; pResInfo->hasResult = DATA_SET_FLAG; - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(STwaInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo)); } } static void twa_func_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + assert(pCtx->stableQuery); STwaInfo *pBuf = (STwaInfo *)pCtx->aOutputBuf; char * indicator = pCtx->aInputElemBuf; @@ -3885,16 +3804,16 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) { */ void twa_function_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - memcpy(pResInfo->interResultBuf, pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); + memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); pResInfo->hasResult = ((STwaInfo *)pCtx->aInputElemBuf)->hasResult; } void twa_function_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo *pInfo = (STwaInfo *)pResInfo->interResultBuf; + STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); assert(pInfo->EKey >= pInfo->lastKey && pInfo->hasResult == pResInfo->hasResult); if (pInfo->hasResult != DATA_SET_FLAG) { @@ -3922,8 +3841,8 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { */ static void interp_function(SQLFunctionCtx *pCtx) { // at this point, the value is existed, return directly - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SInterpInfoDetail* pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SInterpInfoDetail* pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->size == 1) { char *pData = GET_INPUT_CHAR(pCtx); @@ -3977,7 +3896,7 @@ static void interp_function(SQLFunctionCtx *pCtx) { if (isNull(data1, srcType) || isNull(data2, srcType)) { setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); } else { - taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point); + taosGetLinearInterpolationVal(pCtx->outputType, &point1, &point2, &point); } } else if (srcType == TSDB_DATA_TYPE_FLOAT) { point1.val = data1; @@ -3986,7 +3905,7 @@ static void interp_function(SQLFunctionCtx *pCtx) { if (isNull(data1, srcType) || isNull(data2, srcType)) { setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); } else { - taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point); + taosGetLinearInterpolationVal(pCtx->outputType, &point1, &point2, &point); } } else { @@ -3998,7 +3917,6 @@ static void interp_function(SQLFunctionCtx *pCtx) { } } } - } SET_VAL(pCtx, pCtx->size, 1); @@ -4009,8 +3927,8 @@ static bool ts_comp_function_setup(SQLFunctionCtx *pCtx) { return false; // not initialized since it has been initialized } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STSCompInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->pTSBuf = tsBufCreate(false, pCtx->order); pInfo->pTSBuf->tsOrder = pCtx->order; @@ -4018,18 +3936,18 @@ static bool ts_comp_function_setup(SQLFunctionCtx *pCtx) { } static void ts_comp_function(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STSBuf * pTSbuf = ((STSCompInfo *)(pResInfo->interResultBuf))->pTSBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STSBuf * pTSbuf = ((STSCompInfo *)(GET_ROWCELL_INTERBUF(pResInfo)))->pTSBuf; const char *input = GET_INPUT_CHAR(pCtx); // primary ts must be existed, so no need to check its existance if (pCtx->order == TSDB_ORDER_ASC) { - tsBufAppend(pTSbuf, 0, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE); } else { for (int32_t i = pCtx->size - 1; i >= 0; --i) { char *d = GET_INPUT_CHAR_INDEX(pCtx, i); - tsBufAppend(pTSbuf, 0, &pCtx->tag, d, TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, d, (int32_t)TSDB_KEYSIZE); } } @@ -4043,21 +3961,21 @@ static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) { return; } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - STSCompInfo *pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); STSBuf *pTSbuf = pInfo->pTSBuf; - tsBufAppend(pTSbuf, 0, &pCtx->tag, pData, TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, pData, TSDB_KEYSIZE); SET_VAL(pCtx, pCtx->size, 1); pResInfo->hasResult = DATA_SET_FLAG; } static void ts_comp_finalize(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STSCompInfo *pInfo = pResInfo->interResultBuf; + STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); STSBuf * pTSbuf = pInfo->pTSBuf; tsBufFlush(pTSbuf); @@ -4106,8 +4024,8 @@ static bool rate_function_setup(SQLFunctionCtx *pCtx) { return false; } - SResultInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; - SRateInfo * pInfo = pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; + SRateInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->CorrectionValue = 0; pInfo->firstKey = INT64_MIN; @@ -4126,8 +4044,8 @@ static bool rate_function_setup(SQLFunctionCtx *pCtx) { static void rate_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); TSKEY *primaryKey = pCtx->ptsList; tscDebug("%p rate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull); @@ -4142,22 +4060,7 @@ static void rate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4190,8 +4093,8 @@ static void rate_function(SQLFunctionCtx *pCtx) { } // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4202,27 +4105,12 @@ static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { } // NOTE: keep the intermediate result into the interResultBuf - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4247,20 +4135,18 @@ static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { pResInfo->hasResult = DATA_SET_FLAG; // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } static void rate_func_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + assert(pCtx->stableQuery); tscDebug("rate_func_merge() size:%d", pCtx->size); - //SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; SRateInfo *pBuf = (SRateInfo *)pCtx->aOutputBuf; char *indicator = pCtx->aInputElemBuf; @@ -4293,8 +4179,8 @@ static void rate_func_merge(SQLFunctionCtx *pCtx) { static void rate_func_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - memcpy(pResInfo->interResultBuf, pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); pResInfo->hasResult = ((SRateInfo*)pCtx->aInputElemBuf)->hasResult; SRateInfo* pRateInfo = (SRateInfo*)pCtx->aInputElemBuf; @@ -4305,8 +4191,8 @@ static void rate_func_copy(SQLFunctionCtx *pCtx) { static void rate_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); tscDebug("%p isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d", pCtx, pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult); @@ -4331,8 +4217,8 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) { static void irate_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); TSKEY *primaryKey = pCtx->ptsList; tscDebug("%p irate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull); @@ -4351,22 +4237,7 @@ static void irate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); // TODO: calc once if only call this function once ???? if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->lastValue)) { @@ -4394,8 +4265,8 @@ static void irate_function(SQLFunctionCtx *pCtx) { } // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4406,28 +4277,13 @@ static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { } // NOTE: keep the intermediate result into the interResultBuf - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } - + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); + pRateInfo->firstKey = pRateInfo->lastKey; pRateInfo->firstValue = pRateInfo->lastValue; @@ -4443,16 +4299,16 @@ static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { pResInfo->hasResult = DATA_SET_FLAG; // keep the data into the final output buffer for super table query since this execution may be the last one - if (pResInfo->superTableQ) { - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + if (pCtx->stableQuery) { + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } static void do_sumrate_merge(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - assert(pResInfo->superTableQ); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + assert(pCtx->stableQuery); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); char * input = GET_INPUT_CHAR(pCtx); for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { @@ -4476,7 +4332,7 @@ static void do_sumrate_merge(SQLFunctionCtx *pCtx) { if (DATA_SET_FLAG == pRateInfo->hasResult) { pResInfo->hasResult = DATA_SET_FLAG; SET_VAL(pCtx, pRateInfo->num, 1); - memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo)); + memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4491,10 +4347,10 @@ static void sumrate_func_second_merge(SQLFunctionCtx *pCtx) { } static void sumrate_finalizer(SQLFunctionCtx *pCtx) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); - tscDebug("%p sumrate_finalizer() superTableQ:%d num:%" PRId64 " sum:%f hasResult:%d", pCtx, pResInfo->superTableQ, pRateInfo->num, pRateInfo->sum, pRateInfo->hasResult); + tscDebug("%p sumrate_finalizer() superTableQ:%d num:%" PRId64 " sum:%f hasResult:%d", pCtx, pCtx->stableQuery, pRateInfo->num, pRateInfo->sum, pRateInfo->hasResult); if (pRateInfo->hasResult != DATA_SET_FLAG) { setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index b4c3f3549b1576b3ff83300d4f10d9e07778530d..538e652f3c6577098363565a4e76fa637b60709c 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -49,82 +49,6 @@ typedef struct SCreateBuilder { } SCreateBuilder; static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength); -static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) { - char buf[512] = {0}; - - int32_t len = 0; - int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false")); - switch (type) { - case TSDB_DATA_TYPE_BINARY: - return length; - case TSDB_DATA_TYPE_NCHAR: - return length; - case TSDB_DATA_TYPE_DOUBLE: { - double dv = 0; - dv = GET_DOUBLE_VAL(pData); - len = sprintf(buf, "%lf", dv); - if (strncasecmp("nan", buf, 3) == 0) { - len = 4; - } - } break; - case TSDB_DATA_TYPE_FLOAT: { - float fv = 0; - fv = GET_FLOAT_VAL(pData); - len = sprintf(buf, "%f", fv); - if (strncasecmp("nan", buf, 3) == 0) { - len = 4; - } - } break; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: - len = sprintf(buf, "%" PRId64, *(int64_t *)pData); - break; - case TSDB_DATA_TYPE_BOOL: - len = MAX_BOOL_TYPE_LENGTH; - break; - default: - len = sprintf(buf, "%d", *(int32_t *)pData); - break; - }; - return len; -} - -/* - * we need to convert all data into string, so we need to sprintf all kinds of - * non-string data into string, and record its length to get the right - * maximum length. The length may be less or greater than its original binary length: - * For example: - * length((short) 1) == 1, less than sizeof(short) - * length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t) - */ -static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) { - STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta; - - if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE || - pMeta->tableType == TSDB_STREAM_TABLE) { - return 0; - } - - char * pTagValue = tsGetTagsValue(pMeta); - SSchema *pTagsSchema = tscGetTableTagSchema(pMeta); - - int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type); - - pTagValue += pTagsSchema[0].bytes; - int32_t numOfTags = tscGetNumOfTags(pMeta); - - for (int32_t i = 1; i < numOfTags; ++i) { - int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type); - if (len < tLen) { - len = tLen; - } - - pTagValue += pTagsSchema[i].bytes; - } - - return len; -} - static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { SSqlRes *pRes = &pSql->res; @@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { return 0; } - // the following is handle display tags value for meters created according to metric - char *pTagValue = tsGetTagsValue(pMeta); + // the following is handle display tags for table created according to super table for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { // field name TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0); @@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i; const char *src = "TAG"; STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes); - - pTagValue += pSchema[i].bytes; } return 0; @@ -241,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false); + (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); @@ -251,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), - typeColLength, false); + -1000, typeColLength, false); rowLen += typeColLength + VARSTR_HEADER_SIZE; @@ -261,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), - sizeof(int32_t), false); + -1000, sizeof(int32_t), false); rowLen += sizeof(int32_t); @@ -271,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), - noteColLength, false); + -1000, noteColLength, false); rowLen += noteColLength + VARSTR_HEADER_SIZE; return rowLen; @@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { const int32_t TYPE_COLUMN_LENGTH = 16; const int32_t NOTE_COLUMN_MIN_LENGTH = 8; - int32_t noteFieldLen = tscMaxLengthOfTagsFields(pSql); - if (noteFieldLen == 0) { - noteFieldLen = NOTE_COLUMN_MIN_LENGTH; - } + int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql); +// if (noteFieldLen == 0) { +// noteFieldLen = NOTE_COLUMN_MIN_LENGTH; +// } int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen); tscFieldInfoUpdateOffset(pQueryInfo); @@ -420,7 +341,7 @@ TAOS_ROW tscFetchRow(void *param) { return NULL; } - void* data = doSetResultRowData(pSql, true); + void* data = doSetResultRowData(pSql); tscClearSqlOwner(pSql); return data; @@ -486,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - f.bytes, f.bytes - VARSTR_HEADER_SIZE, false); + pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); rowLen += f.bytes; @@ -501,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - (int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false); + (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false); rowLen += ddlLen + VARSTR_HEADER_SIZE; @@ -698,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, for (int32_t i = 0; i < numOfRows; ++i) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName); } @@ -721,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, for (int32_t i = 0; i < numOfRows; ++i) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); } @@ -731,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, for (int32_t i = numOfRows; i < totalRows; i++) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); } diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 44ccb2471aa980d789c8557aca7981b6981df02f..3c7d46f914dd2632ac97cbcb728898ff9a657e82 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -13,14 +13,15 @@ * along with this program. If not, see . */ +#include "tscLocalMerge.h" +#include "tscSubquery.h" #include "os.h" +#include "qAst.h" #include "tlosertree.h" +#include "tscLog.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "tutil.h" -#include "tscLog.h" -#include "tscLocalMerge.h" typedef struct SCompareParam { SLocalDataSource **pLocalData; @@ -97,14 +98,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc pCtx->param[2].i64Key = pQueryInfo->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[1].i64Key = pQueryInfo->order.orderColId; + } else if (functionId == TSDB_FUNC_APERCT) { + pCtx->param[0].i64Key = pExpr->param[0].i64Key; + pCtx->param[0].nType = pExpr->param[0].nType; } - SResultInfo *pResInfo = &pReducer->pResInfo[i]; - pResInfo->bufLen = pExpr->interBytes; - pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen); - - pCtx->resultInfo = &pReducer->pResInfo[i]; - pCtx->resultInfo->superTableQ = true; + pCtx->interBufBytes = pExpr->interBytes; + pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo)); + pCtx->stableQuery = true; } int16_t n = 0; @@ -132,40 +133,53 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc } static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { - int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); int32_t offset = 0; SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); for(int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - - pFillCol[i].col.bytes = pExpr->resBytes; - pFillCol[i].col.type = (int8_t)pExpr->resType; - pFillCol[i].col.colId = pExpr->colInfo.colId; - pFillCol[i].flag = pExpr->colInfo.flag; - pFillCol[i].col.offset = offset; - pFillCol[i].functionId = pExpr->functionId; - pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; - offset += pExpr->resBytes; + SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); + + if (pIField->pArithExprInfo == NULL) { + SSqlExpr* pExpr = pIField->pSqlExpr; + + pFillCol[i].col.bytes = pExpr->resBytes; + pFillCol[i].col.type = (int8_t)pExpr->resType; + pFillCol[i].col.colId = pExpr->colInfo.colId; + pFillCol[i].flag = pExpr->colInfo.flag; + pFillCol[i].col.offset = offset; + pFillCol[i].functionId = pExpr->functionId; + pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; + } else { + pFillCol[i].col.bytes = pIField->field.bytes; + pFillCol[i].col.type = (int8_t)pIField->field.type; + pFillCol[i].col.colId = -100; + pFillCol[i].flag = TSDB_COL_NORMAL; + pFillCol[i].col.offset = offset; + pFillCol[i].functionId = -1; + pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; + } + + offset += pFillCol[i].col.bytes; } return pFillCol; } void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - SColumnModel *finalmodel, SSqlObj* pSql) { + SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; if (pMemBuffer == NULL) { - tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscError("%p pMemBuffer is NULL", pMemBuffer); pRes->code = TSDB_CODE_TSC_APP_ERROR; return; } if (pDesc->pColumnModel == NULL) { - tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscError("%p no local buffer or intermediate result format model", pSql); pRes->code = TSDB_CODE_TSC_APP_ERROR; return; @@ -183,7 +197,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd } if (numOfFlush == 0 || numOfBuffer == 0) { - tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscDebug("%p retrieved no data", pSql); return; } @@ -192,7 +206,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity, pMemBuffer[0]->pageSize); - tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); pRes->code = TSDB_CODE_TSC_APP_ERROR; return; } @@ -203,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (pReducer == NULL) { tscError("%p failed to create local merge structure, out of memory", pSql); - tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; } @@ -227,7 +241,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (ds == NULL) { tscError("%p failed to create merge structure", pSql); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - taosTFree(pReducer); + tfree(pReducer); return; } @@ -254,7 +268,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (ds->filePage.num == 0) { // no data in this flush, the index does not increase tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx); - taosTFree(ds); + tfree(ds); continue; } @@ -264,7 +278,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd // no data actually, no need to merge result. if (idx == 0) { - taosTFree(pReducer); + tfree(pReducer); return; } @@ -272,7 +286,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd SCompareParam *param = malloc(sizeof(SCompareParam)); if (param == NULL) { - taosTFree(pReducer); + tfree(pReducer); return; } @@ -286,8 +300,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { - taosTFree(param); - taosTFree(pReducer); + tfree(param); + tfree(pReducer); return; } @@ -320,6 +334,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->resColModel = finalmodel; pReducer->resColModel->capacity = pReducer->nResultBufSize; + pReducer->finalModel = pFFModel; + assert(pReducer->finalRowSize > 0); if (pReducer->finalRowSize > 0) { pReducer->resColModel->capacity /= pReducer->finalRowSize; @@ -330,22 +346,19 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || /*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { - taosTFree(pReducer->pTempBuffer); - taosTFree(pReducer->discardData); - taosTFree(pReducer->pResultBuf); - taosTFree(pReducer->pFinalRes); - taosTFree(pReducer->prevRowOfInput); - taosTFree(pReducer->pLoserTree); - taosTFree(param); - taosTFree(pReducer); + tfree(pReducer->pTempBuffer); + tfree(pReducer->discardData); + tfree(pReducer->pResultBuf); + tfree(pReducer->pFinalRes); + tfree(pReducer->prevRowOfInput); + tfree(pReducer->pLoserTree); + tfree(param); + tfree(pReducer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; } - size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo); - pReducer->pTempBuffer->num = 0; - pReducer->pResInfo = calloc(numOfCols, sizeof(SResultInfo)); tscCreateResPointerInfo(pRes, pQueryInfo); tscInitSqlContext(pCmd, pReducer, pDesc); @@ -373,8 +386,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, - 4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, - tinfo.precision, pQueryInfo->fillType, pFillCol); + 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, + tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } } @@ -489,47 +502,41 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { tscDebug("%p waiting for delete procedure, status: %d", pSql, status); } - pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo); + pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo); if (pLocalReducer->pCtx != NULL) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfExprs; ++i) { SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; tVariantDestroy(&pCtx->tag); + tfree(pCtx->resultInfo); + if (pCtx->tagInfo.pTagCtxList != NULL) { - taosTFree(pCtx->tagInfo.pTagCtxList); + tfree(pCtx->tagInfo.pTagCtxList); } } - taosTFree(pLocalReducer->pCtx); + tfree(pLocalReducer->pCtx); } - taosTFree(pLocalReducer->prevRowOfInput); - - taosTFree(pLocalReducer->pTempBuffer); - taosTFree(pLocalReducer->pResultBuf); + tfree(pLocalReducer->prevRowOfInput); - if (pLocalReducer->pResInfo != NULL) { - size_t num = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t i = 0; i < num; ++i) { - taosTFree(pLocalReducer->pResInfo[i].interResultBuf); - } - - taosTFree(pLocalReducer->pResInfo); - } + tfree(pLocalReducer->pTempBuffer); + tfree(pLocalReducer->pResultBuf); if (pLocalReducer->pLoserTree) { - taosTFree(pLocalReducer->pLoserTree->param); - taosTFree(pLocalReducer->pLoserTree); + tfree(pLocalReducer->pLoserTree->param); + tfree(pLocalReducer->pLoserTree); } - taosTFree(pLocalReducer->pFinalRes); - taosTFree(pLocalReducer->discardData); + tfree(pLocalReducer->pFinalRes); + tfree(pLocalReducer->discardData); - tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, + tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel, pLocalReducer->numOfVnode); for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) { - taosTFree(pLocalReducer->pLocalDataSrc[i]); + tfree(pLocalReducer->pLocalDataSrc[i]); } pLocalReducer->numOfBuffer = 0; @@ -563,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm if (numOfGroupByCols > 0) { if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { - int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols; + int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols; // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { @@ -596,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm } *pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order); - taosTFree(orderColIndexList); + tfree(orderColIndexList); if (*pOrderDesc == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -649,7 +657,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage } int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc, - SColumnModel **pFinalModel, uint32_t nBufferSizes) { + SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -682,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr pSchema[i].bytes = pExpr->resBytes; pSchema[i].type = (int8_t)pExpr->resType; + tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name)); + rlen += pExpr->resBytes; } @@ -698,7 +708,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr pg *= 2; } - size_t numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups; + size_t numOfSubs = pSql->subState.numOfSub; + assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups); for (int32_t i = 0; i < numOfSubs; ++i) { (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel); (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; @@ -706,7 +717,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - taosTFree(pSchema); + tfree(pSchema); return pRes->code; } @@ -743,8 +754,20 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr } *pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity); - taosTFree(pSchema); + memset(pSchema, 0, sizeof(SSchema) * size); + size = tscNumOfFields(pQueryInfo); + + for(int32_t i = 0; i < size; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + pSchema[i].bytes = pField->field.bytes; + pSchema[i].type = pField->field.type; + tstrncpy(pSchema[i].name, pField->field.name, tListLen(pSchema[i].name)); + } + + *pFFModel = createColumnModel(pSchema, (int32_t) size, capacity); + + tfree(pSchema); return TSDB_CODE_SUCCESS; } @@ -754,16 +777,18 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr * @param pFinalModel * @param numOfVnodes */ -void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel *pFFModel, int32_t numOfVnodes) { destroyColumnModel(pFinalModel); + destroyColumnModel(pFFModel); + tOrderDescDestroy(pDesc); for (int32_t i = 0; i < numOfVnodes; ++i) { pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]); } - taosTFree(pMemBuffer); + tfree(pMemBuffer); } /** @@ -859,17 +884,17 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, tFilePage * pBeforeFillData = pLocalReducer->pResultBuf; pRes->data = pLocalReducer->pFinalRes; - pRes->numOfRows = pBeforeFillData->num; + pRes->numOfRows = (int32_t) pBeforeFillData->num; if (pQueryInfo->limit.offset > 0) { if (pQueryInfo->limit.offset < pRes->numOfRows) { - int32_t prevSize = (int32_t)pBeforeFillData->num; - tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); + int32_t prevSize = (int32_t) pBeforeFillData->num; + tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); /* remove the hole in column model */ - tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize); - pRes->numOfRows -= pQueryInfo->limit.offset; + pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset; pQueryInfo->limit.offset = 0; } else { pQueryInfo->limit.offset -= pRes->numOfRows; @@ -889,7 +914,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, pRes->numOfRows -= overflow; pBeforeFillData->num -= overflow; - tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize); // set remain data to be discarded, and reset the interpolation information savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); @@ -923,7 +948,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } while (1) { - int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); + int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); if (pQueryInfo->limit.offset < newRows) { newRows -= pQueryInfo->limit.offset; @@ -937,7 +962,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } pRes->data = pLocalReducer->pFinalRes; - pRes->numOfRows = newRows; + pRes->numOfRows = (int32_t) newRows; pQueryInfo->limit.offset = 0; break; @@ -952,7 +977,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } // all output in current group are completed - int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); + int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); if (totalRemainRows <= 0) { break; } @@ -973,10 +998,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo); } + int32_t offset = 0; for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i); memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows)); + offset += pField->bytes; } pRes->numOfRowsGroup += pRes->numOfRows; @@ -985,10 +1011,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO pBeforeFillData->num = 0; for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - taosTFree(pResPages[i]); + tfree(pResPages[i]); } - taosTFree(pResPages); + tfree(pResPages); } static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { @@ -1071,7 +1097,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) continue; } - SResultInfo* pResInfo = GET_RES_INFO(&pCtx[j]); + SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]); if (maxOutput < pResInfo->numOfRes) { maxOutput = pResInfo->numOfRes; } @@ -1229,6 +1255,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur tColModelCompact(pModel, pResBuf, pModel->capacity); + if (tscIsSecondStageQuery(pQueryInfo)) { + pLocalReducer->finalRowSize = doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize); + } + #ifdef _DEBUG_VIEW printf("final result before interpo:\n"); // tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num); @@ -1252,10 +1282,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur return true; } -void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - pLocalReducer->pCtx[i].aOutputBuf = - pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity; +void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning + size_t t = tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < t; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity; } memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); @@ -1300,7 +1331,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) { int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1)); // the first column must be the timestamp column - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); + int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); if (rows > 0) { // do fill gap doFillResult(pSql, pLocalReducer, false); } @@ -1329,7 +1360,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey; - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); + int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); if (rows > 0) { doFillResult(pSql, pLocalReducer, true); } @@ -1500,8 +1531,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { if (pLocalReducer->discard && sameGroup) { pLocalReducer->hasUnprocessedRow = false; tmpBuffer->num = 0; - } else { - // current row does not belongs to the previous group, so it is not be handled yet. + } else { // current row does not belongs to the previous group, so it is not be handled yet. pLocalReducer->hasUnprocessedRow = true; } @@ -1595,3 +1625,46 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) pRes->pLocalReducer->pResultBuf->num = numOfRes; pRes->data = pRes->pLocalReducer->pResultBuf->data; } + +int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { + char* pbuf = calloc(1, pOutput->num * rowSize); + + size_t size = tscNumOfFields(pQueryInfo); + SArithmeticSupport arithSup = {0}; + + // todo refactor + arithSup.offset = 0; + arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + arithSup.exprList = pQueryInfo->exprList; + arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); + + for(int32_t k = 0; k < arithSup.numOfCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); + arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset); + } + + int32_t offset = 0; + + for (int i = 0; i < size; ++i) { + SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + // calculate the result from several other columns + if (pSup->pArithExprInfo != NULL) { + arithSup.pArithExpr = pSup->pArithExprInfo; + tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc); + } else { + SSqlExpr* pExpr = pSup->pSqlExpr; + memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num); + } + + offset += pSup->field.bytes; + } + + assert(finalRowSize <= rowSize); + memcpy(pOutput->data, pbuf, pOutput->num * offset); + + tfree(pbuf); + tfree(arithSup.data); + + return offset; +} \ No newline at end of file diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index ac34d26d2fe747773a550dd76ac7e967a592199e..a44a158f93095c752f6aa203eb35e58e1d7ba2a0 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st } int32_t code = TSDB_CODE_TSC_INVALID_SQL; - char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" + char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \" if (NULL == tmpTokenBuf) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) { index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); + if (sToken.type != TK_STRING && sToken.type != TK_ID) { + code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); + goto _error; + } str += index; if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); @@ -1309,7 +1313,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) { if ((!pCmd->parseFinished) && (!initial)) { tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql); } - + ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); if (TSDB_CODE_SUCCESS != ret) { return ret; @@ -1406,7 +1410,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { assert(taos_errno(pSql) == code); taos_free_result(pSql); - taosTFree(pSupporter); + tfree(pSupporter); fclose(fp); pParentSql->res.code = code; @@ -1445,7 +1449,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { char *tokenBuf = calloc(1, 4096); - while ((readLen = taosGetline(&line, &n, fp)) != -1) { + while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; } @@ -1470,7 +1474,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { } } - taosTFree(tokenBuf); + tfree(tokenBuf); free(line); if (count > 0) { @@ -1483,7 +1487,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { } else { taos_free_result(pSql); - taosTFree(pSupporter); + tfree(pSupporter); fclose(fp); pParentSql->fp = pParentSql->fetchFp; @@ -1513,7 +1517,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { pSql->res.code = TAOS_SYSTEM_ERROR(errno); tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); - taosTFree(pSupporter) + tfree(pSupporter) tscQueueAsyncRes(pSql); return; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 1739e4348ca2d1e90e4cdc292a14c7dcc5dde2da..68f2ecbf0e194bc13362b0af965959367a9f86b8 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -268,7 +268,6 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { if (1) { // allow user bind param data with different type - short size = 0; union { int8_t v1; int16_t v2; @@ -600,7 +599,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { if ((*bind->length) > (uintptr_t)param->bytes) { return TSDB_CODE_TSC_INVALID_VALUE; } - size = (short)*bind->length; + short size = (short)*bind->length; STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size); return TSDB_CODE_SUCCESS; } break; diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index eb6843b0e4fd844c9d13da7f55dca47008f62a79..acc5acd786bfe00036e539a104da04af1485a263 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { } int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { - SCMHeartBeatMsg *pHeartbeat = pMsg; + SHeartBeatMsg *pHeartbeat = pMsg; int allocedQueriesNum = pHeartbeat->numOfQueries; int allocedStreamsNum = pHeartbeat->numOfStreams; @@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { } int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) + - sizeof(SCMHeartBeatMsg); + sizeof(SHeartBeatMsg); pHeartbeat->connId = htonl(pObj->connId); pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries); pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e2573f7e19303966e9f17a2731444bf9fca7ab87..15d2647c5117d0bffe6dd301fb503f7e9d54235f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -16,6 +16,7 @@ #define _BSD_SOURCE #define _XOPEN_SOURCE 500 #define _DEFAULT_SOURCE +#define _GNU_SOURCE #include "os.h" #include "qAst.h" @@ -51,26 +52,29 @@ typedef struct SConvertFunc { int32_t originFuncId; int32_t execFuncId; } SConvertFunc; -static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex); + +static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); static char* getAccountId(SSqlObj* pSql); -static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name); +static bool has(SArray* pFieldList, int32_t startIdx, const char* name); static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken); static bool hasSpecifyDB(SStrToken* pTableName); -static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); -static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); +static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd); +static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd); static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len); static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength); -static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName); static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult); static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, char* fieldName, SSqlExpr* pSqlExpr); -static int32_t changeFunctionID(int32_t optr, int16_t* functionId); + +static int32_t convertFunctionId(int32_t optr, int16_t* functionId); +static uint8_t convertOptr(SStrToken *pToken); + static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery); static bool validateIpAddress(const char* ip, size_t size); @@ -78,11 +82,11 @@ static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQuer static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery); static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo); -static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd); +static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd); -static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); +static int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); -static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); +static int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem); @@ -114,7 +118,7 @@ static int32_t optrToString(tSQLExpr* pExpr, char** exprString); static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex); static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); -static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate); +static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate); static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex); @@ -124,6 +128,49 @@ static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +int16_t getNewResColId(SQueryInfo* pQueryInfo) { + return pQueryInfo->resColumnId--; +} + +static uint8_t convertOptr(SStrToken *pToken) { + switch (pToken->type) { + case TK_LT: + return TSDB_RELATION_LESS; + case TK_LE: + return TSDB_RELATION_LESS_EQUAL; + case TK_GT: + return TSDB_RELATION_GREATER; + case TK_GE: + return TSDB_RELATION_GREATER_EQUAL; + case TK_NE: + return TSDB_RELATION_NOT_EQUAL; + case TK_AND: + return TSDB_RELATION_AND; + case TK_OR: + return TSDB_RELATION_OR; + case TK_EQ: + return TSDB_RELATION_EQUAL; + case TK_PLUS: + return TSDB_BINARY_OP_ADD; + case TK_MINUS: + return TSDB_BINARY_OP_SUBTRACT; + case TK_STAR: + return TSDB_BINARY_OP_MULTIPLY; + case TK_SLASH: + case TK_DIVIDE: + return TSDB_BINARY_OP_DIVIDE; + case TK_REM: + return TSDB_BINARY_OP_REMAINDER; + case TK_LIKE: + return TSDB_RELATION_LIKE; + case TK_ISNULL: + return TSDB_RELATION_ISNULL; + case TK_NOTNULL: + return TSDB_RELATION_NOTNULL; + default: { return 0; } + } +} + /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. @@ -417,7 +464,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { char* pMsg = pCmd->payload; - SCMCfgDnodeMsg* pCfg = (SCMCfgDnodeMsg*)pMsg; + SCfgDnodeMsg* pCfg = (SCfgDnodeMsg*)pMsg; pDCL->a[0].n = strdequote(pDCL->a[0].z); strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n); @@ -616,14 +663,20 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { return false; } -int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { +int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg1 = "invalid query expression"; const char* msg2 = "interval cannot be less than 10 ms"; + const char* msg3 = "sliding cannot be used without interval"; + + SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); if (pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0) { + if (pQuerySql->sliding.n > 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } return TSDB_CODE_SUCCESS; } @@ -656,7 +709,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ return TSDB_CODE_TSC_INVALID_SQL; } - if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + if (parseSlidingClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -709,7 +762,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ return TSDB_CODE_TSC_INVALID_SQL; } - if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + if (parseSlidingClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -767,13 +820,15 @@ int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQue return TSDB_CODE_SUCCESS; } -int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { +int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg0 = "sliding value too small"; const char* msg1 = "sliding value no larger than the interval value"; const char* msg2 = "sliding value can not less than 1% of interval value"; - const char* msg3 = "does not support sliding when interval is natual month/year"; + const char* msg3 = "does not support sliding when interval is natural month/year"; +// const char* msg4 = "sliding not support yet in ordinary query"; const static int32_t INTERVAL_SLIDING_FACTOR = 100; + SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -806,6 +861,10 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } +// if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) { +// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); +// } + return TSDB_CODE_SUCCESS; } @@ -852,7 +911,7 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa return TSDB_CODE_SUCCESS; } -static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { +static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { assert(pFieldList != NULL); const char* msg = "illegal number of columns"; @@ -864,35 +923,28 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { const char* msg6 = "invalid column name"; // number of fields no less than 2 - if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { + size_t numOfCols = taosArrayGetSize(pFieldList); + if (numOfCols <= 1 || numOfCols > TSDB_MAX_COLUMNS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // first column must be timestamp - if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { + TAOS_FIELD* pField = taosArrayGet(pFieldList, 0); + if (pField->type != TSDB_DATA_TYPE_TIMESTAMP) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } int32_t nLen = 0; - for (int32_t i = 0; i < pFieldList->nField; ++i) { - if (pFieldList->p[i].bytes == 0) { + for (int32_t i = 0; i < numOfCols; ++i) { + pField = taosArrayGet(pFieldList, i); + + if (pField->bytes == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } - nLen += pFieldList->p[i].bytes; - } - // max row length must be less than TSDB_MAX_BYTES_PER_ROW - if (nLen > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - return false; - } - - // field name must be unique - for (int32_t i = 0; i < pFieldList->nField; ++i) { - TAOS_FIELD* pField = &pFieldList->p[i]; if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; @@ -909,16 +961,25 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { return false; } - if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { + // field name must be unique + if (has(pFieldList, i + 1, pField->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } + + nLen += pField->bytes; + } + + // max row length must be less than TSDB_MAX_BYTES_PER_ROW + if (nLen > TSDB_MAX_BYTES_PER_ROW) { + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return false; } return true; } -static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd) { +static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); const char* msg1 = "invalid number of tag columns"; @@ -930,18 +991,21 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq const char* msg7 = "invalid binary/nchar tag length"; // number of fields at least 1 - if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { + size_t numOfTags = taosArrayGetSize(pTagsList); + if (numOfTags < 1 || numOfTags > TSDB_MAX_TAGS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } int32_t nLen = 0; - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (pTagsList->p[i].bytes == 0) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + if (p->bytes == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } - nLen += pTagsList->p[i].bytes; + + nLen += p->bytes; } // max tag row length must be less than TSDB_MAX_TAGS_LEN @@ -951,37 +1015,41 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq } // field name must be unique - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (has(pFieldList, 0, pTagsList->p[i].name) == true) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + + if (has(pFieldList, 0, p->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } /* timestamp in tag is not allowed */ - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + + if (p->type == TSDB_DATA_TYPE_TIMESTAMP) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } - if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { + if (p->type < TSDB_DATA_TYPE_BOOL || p->type > TSDB_DATA_TYPE_NCHAR) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } - if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || - (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { + if ((p->type == TSDB_DATA_TYPE_BINARY && p->bytes <= 0) || + (p->type == TSDB_DATA_TYPE_NCHAR && p->bytes <= 0)) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } - if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { + if (validateColumnName(p->name) != TSDB_CODE_SUCCESS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } - if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { + if (has(pTagsList, i + 1, p->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -1128,9 +1196,10 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } /* is contained in pFieldList or not */ -static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name) { - for (int32_t j = startIdx; j < pFieldList->nField; ++j) { - TAOS_FIELD* field = pFieldList->p + j; +static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { + size_t numOfCols = taosArrayGetSize(pFieldList); + for (int32_t j = startIdx; j < numOfCols; ++j) { + TAOS_FIELD* field = taosArrayGet(pFieldList, j); if (strncasecmp(name, field->name, sizeof(field->name) - 1) == 0) return true; } @@ -1210,8 +1279,9 @@ static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; tscColumnListInsert(pQueryInfo->colList, &tsCol); } + static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) { - const char* msg1 = "invalid column name, or illegal column type"; + const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables"; const char* msg2 = "invalid arithmetic expression in select clause"; const char* msg3 = "tag columns can not be used in arithmetic expression"; const char* msg4 = "columns from different table mixed up in arithmetic expression"; @@ -1241,7 +1311,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t SColumnIndex index = {.tableIndex = tableIndex}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), - sizeof(double), false); + -1000, sizeof(double), false); char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z; size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1); @@ -1257,6 +1327,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } + // check for if there is a tag in the arithmetic express size_t numOfNode = taosArrayGetSize(colList); for(int32_t k = 0; k < numOfNode; ++k) { SColIndex* pIndex = taosArrayGet(colList, k); @@ -1282,9 +1353,9 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t char* c = tbufGetData(&bw, false); // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); - + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); + // add ts column tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); @@ -1316,6 +1387,10 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t pArithExprInfo->interBytes = sizeof(double); pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + pArithExprInfo->base.functionId = TSDB_FUNC_ARITHM; + pArithExprInfo->base.numOfParams = 1; + pArithExprInfo->base.resColId = getNewResColId(pQueryInfo); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid); if (ret != TSDB_CODE_SUCCESS) { tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); @@ -1324,14 +1399,30 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t pInfo->pArithExprInfo = pArithExprInfo; } + + SBufferWriter bw = tbufInitWriter(NULL, false); + + TRY(0) { + exprTreeToBinary(&bw, pInfo->pArithExprInfo->pExpr); + } CATCH(code) { + tbufCloseWriter(&bw); + UNUSED(code); + // TODO: other error handling + } END_TRY + + SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; + pFuncMsg->arg[0].argBytes = (int16_t) tbufTell(&bw); + pFuncMsg->arg[0].argValue.pz = tbufGetData(&bw, true); + pFuncMsg->arg[0].argType = TSDB_DATA_TYPE_BINARY; + +// tbufCloseWriter(&bw); // TODO there is a memory leak } return TSDB_CODE_SUCCESS; } - static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex); + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -1363,13 +1454,13 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) { } } - SColumnIndex index = {0}; // set the constant column value always attached to first table. STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, PRIMARYKEY_TIMESTAMP_COL_INDEX); // add the timestamp column into the output columns + SColumnIndex index = {0}; // primary timestamp column info int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL); @@ -1476,7 +1567,7 @@ int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnLi return TSDB_CODE_SUCCESS; } -SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex) { +SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; int32_t numOfCols = tscGetNumOfColumns(pTableMeta); @@ -1488,20 +1579,22 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c if (functionId == TSDB_FUNC_TAGPRJ) { index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index); } else { index.columnIndex = colIndex; } - - return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, - pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ); + + int16_t colId = getNewResColId(pQueryInfo); + return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes, + (functionId == TSDB_FUNC_TAGPRJ)); } SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { + int16_t colId = getNewResColId(pQueryInfo); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, - pColSchema->bytes, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); + pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName)); SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex); @@ -1537,7 +1630,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum } for (int32_t j = 0; j < numOfTotalColumns; ++j) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex); + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex); tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName)); pIndex->columnIndex = j; @@ -1625,16 +1718,15 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, - char* aliasName, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult) { + const char* name, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult) { const char* msg1 = "not support column types"; int16_t type = 0; int16_t bytes = 0; - char columnName[TSDB_COL_NAME_LEN] = {0}; int32_t functionID = cvtFunc.execFuncId; if (functionID == TSDB_FUNC_SPREAD) { - int32_t t1 = pSchema[pColIndex->columnIndex].type; + int32_t t1 = pSchema->type; if (t1 == TSDB_DATA_TYPE_BINARY || t1 == TSDB_DATA_TYPE_NCHAR || t1 == TSDB_DATA_TYPE_BOOL) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return -1; @@ -1643,18 +1735,12 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS bytes = tDataTypeDesc[type].nSize; } } else { - type = pSchema[pColIndex->columnIndex].type; - bytes = pSchema[pColIndex->columnIndex].bytes; + type = pSchema->type; + bytes = pSchema->bytes; } - if (aliasName != NULL) { - tstrncpy(columnName, aliasName, sizeof(columnName)); - } else { - getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name); - } - - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false); - tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName)); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false); + tstrncpy(pExpr->aliasName, name, tListLen(pExpr->aliasName)); if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) { pExpr->colInfo.flag |= TSDB_COL_NULL; @@ -1674,7 +1760,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS // if it is not in the final result, do not add it SColumnList ids = getColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); if (finalResult) { - insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, columnName, pExpr); + insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->aliasName, pExpr); } else { tscColumnListInsert(pQueryInfo->colList, &(ids.ids[0])); } @@ -1682,6 +1768,23 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS return TSDB_CODE_SUCCESS; } +void setResultColName(char* name, tSQLExprItem* pItem, int32_t functionId, SStrToken* pToken) { + if (pItem->aliasName != NULL) { + tstrncpy(name, pItem->aliasName, TSDB_COL_NAME_LEN); + } else { + char uname[TSDB_COL_NAME_LEN] = {0}; + int32_t len = MIN(pToken->n + 1, TSDB_COL_NAME_LEN); + tstrncpy(uname, pToken->z, len); + + int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1] = {0}; + + snprintf(tmp, size, "%s(%s)", aAggs[functionId].aName, uname); + + tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + } +} + int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) { STableMetaInfo* pTableMetaInfo = NULL; int32_t optr = pItem->pNode->nSQLOptr; @@ -1704,7 +1807,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1730,7 +1833,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1 char buf[8] = {0}; int64_t val = -1; @@ -1742,7 +1845,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (val == 1) { index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } else { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -1762,12 +1865,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, isTag); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag); } } else { // count(*) is equalled to count(primary_timestamp_key) index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); @@ -1840,7 +1943,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col int32_t intermediateResSize = 0; int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1854,7 +1957,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col colIndex += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, - TSDB_KEYSIZE, false); + getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); SColumnList ids = getColumnList(1, 0, 0); insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName, pExpr); @@ -1865,7 +1968,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, resultSize, false); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); if (optr == TK_LEASTSQUARES) { /* set the leastsquares parameters */ @@ -1874,14 +1977,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES, 0); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES); memset(val, 0, tListLen(val)); if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) { return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } SColumnList ids = {0}; @@ -1911,7 +2014,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col bool requireAllFields = (pItem->pNode->pParam == NULL); int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -1939,8 +2042,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (pParamElem->pNode->nSQLOptr == TK_ALL) { - // select table.* + if (pParamElem->pNode->nSQLOptr == TK_ALL) { // select table.* SStrToken tmpToken = pParamElem->pNode->colInfo; if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -1950,9 +2052,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); + char name[TSDB_COL_NAME_LEN] = {0}; for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) { index.columnIndex = j; - if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex++, &index, finalResult) != 0) { + SStrToken t = {.z = pSchema[j].name, .n = (uint32_t)strnlen(pSchema[j].name, TSDB_COL_NAME_LEN)}; + setResultColName(name, pItem, cvtFunc.originFuncId, &t); + + if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, finalResult) != 0) { return TSDB_CODE_TSC_INVALID_SQL; } } @@ -1963,14 +2069,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); // functions can not be applied to tags if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex + i, &index, finalResult) != 0) { + char name[TSDB_COL_NAME_LEN] = {0}; + + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->colInfo); + + if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex + i, &index, finalResult) != 0) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -2007,7 +2117,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) { SColumnIndex index = {.tableIndex = j, .columnIndex = i}; - if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex, &index, finalResult) != 0) { + + char name[TSDB_COL_NAME_LEN] = {0}; + SStrToken t = {.z = pSchema[i].name, .n = (uint32_t)strnlen(pSchema[i].name, TSDB_COL_NAME_LEN)}; + setResultColName(name, pItem, cvtFunc.originFuncId, &t); + + if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index, finalResult) != 0) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -2088,14 +2203,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col * for dp = 100, it is max, */ int16_t functionId = 0; - if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); colIndex += 1; // the first column is ts - pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); + pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); @@ -2105,15 +2220,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int16_t functionId = 0; - if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } // todo REFACTOR // set the first column ts for top/bottom query SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, - TSDB_KEYSIZE, false); + pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo), + TSDB_KEYSIZE, false); tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].aName, sizeof(pExpr->aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -2123,8 +2238,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col colIndex += 1; // the first column is ts - pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t), 0); + pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); + addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName)); @@ -2240,10 +2355,6 @@ void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLengt } } -void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName) { - snprintf(resultFieldName, maxLen, "%s(%s)", aAggs[functionId].aName, columnName); -} - static bool isTablenameToken(SStrToken* token) { SStrToken tmpToken = *token; SStrToken tableToken = {0}; @@ -2321,6 +2432,8 @@ int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColum if (pTableToken->n == 0) { // only one table and no table name prefix in column name if (pQueryInfo->numOfTables == 1) { pIndex->tableIndex = 0; + } else { + pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL; } return TSDB_CODE_SUCCESS; @@ -2368,7 +2481,7 @@ int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex); } -int32_t changeFunctionID(int32_t optr, int16_t* functionId) { +int32_t convertFunctionId(int32_t optr, int16_t* functionId) { switch (optr) { case TK_COUNT: *functionId = TSDB_FUNC_COUNT; @@ -2612,7 +2725,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { } } - tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); return TSDB_CODE_SUCCESS; } @@ -2686,7 +2799,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { int32_t startIdx = 0; - + + size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo); + assert(numOfExpr > 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, startIdx); int32_t functionID = pExpr->functionId; @@ -2725,10 +2841,11 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { return true; } -int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd) { +int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; -// const char* msg3 = "group by columns must belong to one table"; + const char* msg3 = "columns from one table allowed as group by columns"; + const char* msg4 = "join query does not support group by"; const char* msg7 = "not support group by expression"; const char* msg8 = "not allowed column type for group by"; const char* msg9 = "tags not allowed for table query"; @@ -2744,19 +2861,26 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } - pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr; - if (pList->nExpr > TSDB_MAX_TAGS) { + pQueryInfo->groupbyExpr.numOfGroupCols = (int16_t)taosArrayGetSize(pList); + if (pQueryInfo->groupbyExpr.numOfGroupCols > TSDB_MAX_TAGS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + if (pQueryInfo->numOfTables > 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + STableMeta* pTableMeta = NULL; SSchema* pSchema = NULL; SSchema s = tscGetTbnameColumnSchema(); int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; - - for (int32_t i = 0; i < pList->nExpr; ++i) { - tVariant* pVar = &pList->a[i].pVar; + + size_t num = taosArrayGetSize(pList); + for (int32_t i = 0; i < num; ++i) { + tVariantListItem * pItem = taosArrayGet(pList, i); + tVariant* pVar = &pItem->pVar; + SStrToken token = {pVar->nLen, pVar->nType, pVar->pz}; SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -2764,7 +2888,11 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tableIndex = index.tableIndex; + if (tableIndex == COLUMN_INDEX_INITIAL_VAL) { + tableIndex = index.tableIndex; + } else if (tableIndex != index.tableIndex) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); pTableMeta = pTableMetaInfo->pTableMeta; @@ -2803,7 +2931,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* tscColumnListInsert(pTableMetaInfo->tagColList, &index); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by - if (pSchema->type > TSDB_DATA_TYPE_BINARY) { + if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); } @@ -2813,7 +2941,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* taosArrayPush(pGroupExpr->columnInfo, &colIndex); pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; - if (i == 0 && pList->nExpr > 1) { + if (i == 0 && num > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } } @@ -2825,7 +2953,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo) { if (QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { - tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); } else { tscFieldInfoUpdateOffset(pQueryInfo); } @@ -3349,7 +3477,8 @@ int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } -static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) { +static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, + int32_t* type, uint64_t* uid) { if (pExpr->nSQLOptr == TK_ID) { if (*type == NON_ARITHMEIC_EXPR) { *type = NORMAL_ARITHMETIC; @@ -3398,13 +3527,22 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQuer } // Not supported data type in arithmetic expression + uint64_t id = -1; for(int32_t i = 0; i < inc; ++i) { SSqlExpr* p1 = tscSqlExprGet(pQueryInfo, i + outputIndex); int16_t t = p1->resType; if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) { return TSDB_CODE_TSC_INVALID_SQL; } + + if (i == 0) { + id = p1->uid; + } else if (id != p1->uid){ + return TSDB_CODE_TSC_INVALID_SQL; + } } + + *uid = id; } return TSDB_CODE_SUCCESS; @@ -3416,13 +3554,16 @@ static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryI } tSQLExpr* pLeft = pExpr->pLeft; + uint64_t uidLeft = 0; + uint64_t uidRight = 0; + if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { int32_t ret = validateArithmeticSQLExpr(pCmd, pLeft, pQueryInfo, pList, type); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - int32_t ret = validateSQLExpr(pCmd, pLeft, pQueryInfo, pList, type); + int32_t ret = validateSQLExpr(pCmd, pLeft, pQueryInfo, pList, type, &uidLeft); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -3435,10 +3576,15 @@ static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryI return ret; } } else { - int32_t ret = validateSQLExpr(pCmd, pRight, pQueryInfo, pList, type); + int32_t ret = validateSQLExpr(pCmd, pRight, pQueryInfo, pList, type, &uidRight); if (ret != TSDB_CODE_SUCCESS) { return ret; } + + // the expression not from the same table, return error + if (uidLeft != uidRight && uidLeft != 0 && uidRight != 0) { + return TSDB_CODE_TSC_INVALID_SQL; + } } return TSDB_CODE_SUCCESS; @@ -3806,9 +3952,6 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* return; } - SStrToken t = {0}; - extractTableNameFromToken(&pLeft->colInfo, &t); - *pOut = *pExpr; (*pExpr) = NULL; @@ -3900,7 +4043,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); if (ret != TSDB_CODE_SUCCESS) { taosStringBuilderDestroy(&sb1); - taosTFree(segments); + tfree(segments); invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return ret; @@ -3913,7 +4056,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, pQueryInfo->tagCond.tbnameCond.cond = strdup(str); taosStringBuilderDestroy(&sb1); - taosTFree(segments); + tfree(segments); return TSDB_CODE_SUCCESS; } @@ -4043,7 +4186,7 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - SColumnIndex index = {0}; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { tscError("%p: invalid column name (left)", pQueryInfo); @@ -4309,8 +4452,8 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) { - tVariantList* pFillToken = pQuerySQL->fillType; - tVariantListItem* pItem = &pFillToken->a[0]; + SArray* pFillToken = pQuerySQL->fillType; + tVariantListItem* pItem = taosArrayGet(pFillToken, 0); const int32_t START_INTERPO_COL_IDX = 1; @@ -4322,7 +4465,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - size_t size = tscSqlExprNumOfExprs(pQueryInfo); + size_t size = tscNumOfFields(pQueryInfo); if (pQueryInfo->fillVal == NULL) { pQueryInfo->fillVal = calloc(size, sizeof(int64_t)); @@ -4336,12 +4479,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NULL; for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); - } else { - setNull((char*)&pQueryInfo->fillVal[i], pFields->type, pFields->bytes); - }; + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes); } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; @@ -4350,12 +4489,13 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { pQueryInfo->fillType = TSDB_FILL_SET_VALUE; - if (pFillToken->nExpr == 1) { + size_t num = taosArrayGetSize(pFillToken); + if (num == 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } int32_t startPos = 1; - int32_t numOfFillVal = pFillToken->nExpr - 1; + int32_t numOfFillVal = (int32_t)(num - 1); /* for point interpolation query, we do not have the timestamp column */ if (tscIsPointInterpQuery(pQueryInfo)) { @@ -4365,35 +4505,36 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery numOfFillVal = (int32_t)size; } } else { - numOfFillVal = (pFillToken->nExpr > (int32_t)size) ? (int32_t)size : pFillToken->nExpr; + numOfFillVal = (int16_t)((num > (int32_t)size) ? (int32_t)size : num); } int32_t j = 1; for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); continue; } - int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true); + tVariant* p = taosArrayGet(pFillToken, j); + int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true); if (ret != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } - if ((pFillToken->nExpr < size) || ((pFillToken->nExpr - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) { - tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1]; + if ((num < size) || ((num - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) { + tVariantListItem* lastItem = taosArrayGetLast(pFillToken); for (int32_t i = numOfFillVal; i < size; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); } else { - tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true); + tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pField->type, true); } } } @@ -4424,8 +4565,8 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; - const char* msg2 = "only support order by primary timestamp and queried column"; - const char* msg3 = "only support order by primary timestamp and first tag in groupby clause"; + const char* msg2 = "only support order by primary timestamp or queried column"; + const char* msg3 = "only support order by primary timestamp or first tag in groupby clause"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -4434,7 +4575,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return TSDB_CODE_SUCCESS; } - tVariantList* pSortorder = pQuerySql->pSortOrder; + SArray* pSortorder = pQuerySql->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -4442,18 +4583,19 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu * * for super table query, the order option must be less than 3. */ + size_t size = taosArrayGetSize(pSortorder); if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { - if (pSortorder->nExpr > 1) { + if (size > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); } } else { - if (pSortorder->nExpr > 2) { + if (size > 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } // handle the first part of order by - tVariant* pVar = &pSortorder->a[0].pVar; + tVariant* pVar = taosArrayGet(pSortorder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -4461,7 +4603,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu } SStrToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; - SColumnIndex index = {0}; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -4496,10 +4638,13 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu assert(!(orderByTags && orderByTS)); } - if (pSortorder->nExpr == 1) { + size_t s = taosArrayGetSize(pSortorder); + if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); @@ -4510,11 +4655,14 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - pQueryInfo->order.order = pSortorder->a[0].sortOrder; + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; // orderby ts query on super table @@ -4524,16 +4672,18 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu } } - if (pSortorder->nExpr == 2) { + if (s == 2) { + tVariantListItem *pItem = taosArrayGet(pQuerySql->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; } else { - pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } - tVariant* pVar2 = &pSortorder->a[1].pVar; + pItem = taosArrayGet(pQuerySql->pSortOrder, 1); + tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -4542,7 +4692,8 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } else { - pQueryInfo->order.order = pSortorder->a[1].sortOrder; + tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } @@ -4566,12 +4717,14 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* pItem = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* pItem = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; } return TSDB_CODE_SUCCESS; @@ -4639,27 +4792,28 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { - tFieldList* pFieldList = pAlterSQL->pAddColumns; - if (pFieldList->nField > 1) { + SArray* pFieldList = pAlterSQL->pAddColumns; + if (taosArrayGetSize(pFieldList) > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - if (!validateOneTags(pCmd, &pFieldList->p[0])) { + TAOS_FIELD* p = taosArrayGet(pFieldList, 0); + if (!validateOneTags(pCmd, p)) { return TSDB_CODE_TSC_INVALID_SQL; } - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { if (tscGetNumOfTags(pTableMeta) == 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } // numOfTags == 1 - if (pAlterSQL->varList->nExpr > 1) { + if (taosArrayGetSize(pAlterSQL->varList) > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); } - tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -4684,13 +4838,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { - tVariantList* pVarList = pAlterSQL->varList; - if (pVarList->nExpr > 2) { + SArray* pVarList = pAlterSQL->varList; + if (taosArrayGetSize(pVarList) > 2) { return TSDB_CODE_TSC_INVALID_SQL; } - tVariantListItem* pSrcItem = &pAlterSQL->varList->a[0]; - tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; + tVariantListItem* pSrcItem = taosArrayGet(pAlterSQL->varList, 0); + tVariantListItem* pDstItem = taosArrayGet(pAlterSQL->varList, 1); if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); @@ -4713,13 +4867,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19); } + tVariantListItem* pItem = taosArrayGet(pVarList, 0); + char name[TSDB_COL_NAME_LEN] = {0}; - strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen); + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + pItem = taosArrayGet(pVarList, 1); memset(name, 0, tListLen(name)); - strncpy(name, pVarList->a[1].pVar.pz, pVarList->a[1].pVar.nLen); + + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { @@ -4727,12 +4885,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // the following is used to handle tags value for table created according to super table pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL; - tVariantList* pVarList = pAlterSQL->varList; - tVariant* pTagName = &pVarList->a[0].pVar; + SArray* pVarList = pAlterSQL->varList; + tVariantListItem* item = taosArrayGet(pVarList, 0); int16_t numOfTags = tscGetNumOfTags(pTableMeta); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen}; + SStrToken name = {.type = TK_STRING, .z = item->pVar.pz, .n = item->pVar.nLen}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -4741,8 +4899,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg12); } + tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); - if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { + if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13); } @@ -4787,7 +4946,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } // copy the tag value to msg body - tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true); + pItem = taosArrayGet(pVarList, 1); + tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true); int32_t len = 0; if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) { @@ -4802,27 +4962,29 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pUpdateMsg->head.contLen = htonl(total); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) { - tFieldList* pFieldList = pAlterSQL->pAddColumns; - if (pFieldList->nField > 1) { + SArray* pFieldList = pAlterSQL->pAddColumns; + if (taosArrayGetSize(pFieldList) > 1) { const char* msg = "only support add one column"; return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (!validateOneColumn(pCmd, &pFieldList->p[0])) { + TAOS_FIELD* p = taosArrayGet(pFieldList, 0); + if (!validateOneColumn(pCmd, p)) { return TSDB_CODE_TSC_INVALID_SQL; } - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) { if (tscGetNumOfColumns(pTableMeta) == TSDB_MIN_COLUMNS) { // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg15); } - if (pAlterSQL->varList->nExpr > 1) { + size_t size = taosArrayGetSize(pAlterSQL->varList); + if (size > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg16); } - tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; @@ -5186,28 +5348,35 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn return TSDB_CODE_SUCCESS; } -static int32_t setKeepOption(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { +static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { const char* msg = "invalid number of options"; pMsg->daysToKeep = htonl(-1); pMsg->daysToKeep1 = htonl(-1); pMsg->daysToKeep2 = htonl(-1); - tVariantList* pKeep = pCreateDb->keep; + SArray* pKeep = pCreateDb->keep; if (pKeep != NULL) { - switch (pKeep->nExpr) { - case 1: - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); + size_t s = taosArrayGetSize(pKeep); + tVariantListItem* p0 = taosArrayGet(pKeep, 0); + switch (s) { + case 1: { + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + } break; case 2: { - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl((int32_t)pKeep->a[1].pVar.i64Key); + tVariantListItem* p1 = taosArrayGet(pKeep, 1); + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64Key); break; } case 3: { - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl((int32_t)pKeep->a[1].pVar.i64Key); - pMsg->daysToKeep2 = htonl((int32_t)pKeep->a[2].pVar.i64Key); + tVariantListItem* p1 = taosArrayGet(pKeep, 1); + tVariantListItem* p2 = taosArrayGet(pKeep, 2); + + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64Key); + pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64Key); break; } default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } @@ -5217,7 +5386,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo* return TSDB_CODE_SUCCESS; } -static int32_t setTimePrecision(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) { +static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) { const char* msg = "invalid time precision"; pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default @@ -5241,7 +5410,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCMCreateDbMsg* pMsg, SCreateDBIn return TSDB_CODE_SUCCESS; } -static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { +static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { pMsg->maxTables = htonl(-1); // max tables can not be set anymore pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize); pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks); @@ -5255,10 +5424,11 @@ static void setCreateDBOption(SCMCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) { pMsg->replications = pCreateDb->replica; pMsg->quorum = pCreateDb->quorum; pMsg->ignoreExist = pCreateDb->ignoreExists; + pMsg->update = pCreateDb->update; } int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) { - SCMCreateDbMsg* pMsg = (SCMCreateDbMsg*)(pCmd->payload); + SCreateDbMsg* pMsg = (SCreateDbMsg *)(pCmd->payload); setCreateDBOption(pMsg, pCreateDbSql); if (setKeepOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) { @@ -5281,21 +5451,27 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau if (pParentQueryInfo->groupbyExpr.numOfGroupCols > 0) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex); + SSqlExpr* pExpr = NULL; + size_t size = taosArrayGetSize(pQueryInfo->exprList); - - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, (int32_t)size - 1); + if (size > 0) { + pExpr = tscSqlExprGet(pQueryInfo, (int32_t)size - 1); + } - if (pExpr->functionId != TSDB_FUNC_TAG) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); - int16_t columnInfo = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); - SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo}; - SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); + if (pExpr == NULL || pExpr->functionId != TSDB_FUNC_TAG) { + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pParentQueryInfo, tableIndex); - int16_t type = pSchema[index.columnIndex].type; - int16_t bytes = pSchema[index.columnIndex].bytes; - char* name = pSchema[index.columnIndex].name; - - pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true); + int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); + + SSchema* pTagSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, colId); + int16_t colIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, colId); + SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex}; + + char* name = pTagSchema->name; + int16_t type = pTagSchema->type; + int16_t bytes = pTagSchema->bytes; + + pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list @@ -5332,7 +5508,9 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { tscAddSpecialColumnForSelect(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL); - SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, (int32_t)size); + int32_t numOfFields = tscNumOfFields(pQueryInfo); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfFields - 1); + doLimitOutputNormalColOfGroupby(pInfo->pSqlExpr); pInfo->visible = false; } @@ -5598,7 +5776,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); @@ -5761,7 +5939,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ SColumnIndex ind = {0}; SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT, - tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false); + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, getNewResColId(pQueryInfo), tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false); const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name; tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName)); @@ -5770,7 +5948,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ } // can only perform the parameters based on the macro definitation -int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) { +int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) { char msg[512] = {0}; if (pCreate->walLevel != -1 && (pCreate->walLevel < TSDB_MIN_WAL_LEVEL || pCreate->walLevel > TSDB_MAX_WAL_LEVEL)) { @@ -5887,8 +6065,8 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; - tFieldList* pFieldList = pCreateTable->colInfo.pColumns; - tFieldList* pTagList = pCreateTable->colInfo.pTagColumns; + SArray* pFieldList = pCreateTable->colInfo.pColumns; + SArray* pTagList = pCreateTable->colInfo.pTagColumns; assert(pFieldList != NULL); @@ -5910,18 +6088,23 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p } int32_t col = 0; - for (; col < pFieldList->nField; ++col) { - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[col]); + size_t numOfFields = taosArrayGetSize(pFieldList); + + for (; col < numOfFields; ++col) { + TAOS_FIELD* p = taosArrayGet(pFieldList, col); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->numOfCols = (int16_t)pFieldList->nField; + pCmd->numOfCols = (int16_t)numOfFields; if (pTagList != NULL) { // create super table[optional] - for (int32_t i = 0; i < pTagList->nField; ++i) { - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pTagList->p[i]); + size_t numOfTags = taosArrayGetSize(pTagList); + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagList, i); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->count = pTagList->nField; + pCmd->count =(int32_t) numOfTags; } return TSDB_CODE_SUCCESS; @@ -5962,14 +6145,15 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { // get meter meta from mnode tstrncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, sizeof(pCreateTable->usingInfo.tagdata.name)); - tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; + SArray* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; code = tscGetTableMeta(pSql, pStableMeterMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - if (tscGetNumOfTags(pStableMeterMetaInfo->pTableMeta) != pList->nExpr) { + size_t size = taosArrayGetSize(pList); + if (tscGetNumOfTags(pStableMeterMetaInfo->pTableMeta) != size) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } @@ -5983,18 +6167,19 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { } int32_t ret = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < pList->nExpr; ++i) { + for (int32_t i = 0; i < size; ++i) { SSchema* pSchema = &pTagSchema[i]; - + tVariantListItem* pItem = taosArrayGet(pList, i); + char tagVal[TSDB_MAX_TAGS_LEN]; if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - if (pList->a[i].pVar.nLen > pSchema->bytes) { + if (pItem->pVar.nLen > pSchema->bytes) { tdDestroyKVRowBuilder(&kvRowBuilder); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } - ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true); + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); // check again after the convert since it may be converted from binary to nchar. if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { @@ -6061,13 +6246,13 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; - if (pSrcMeterName == NULL || pSrcMeterName->nExpr == 0) { + SArray* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; + if (pSrcMeterName == NULL || taosArrayGetSize(pSrcMeterName) == 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - tVariant* pVar = &pSrcMeterName->a[0].pVar; - SStrToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; + tVariantListItem* p1 = taosArrayGet(pSrcMeterName, 0); + SStrToken srcToken = {.z = p1->pVar.pz, .n = p1->pVar.nLen, .type = TK_STRING}; if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6094,7 +6279,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { } // set interval value - if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + if (parseIntervalClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } else { if ((pQueryInfo->interval.interval > 0) && @@ -6132,7 +6317,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - tVariantListItem* pItem = &pQuerySql->fillType->a[0]; + tVariantListItem* pItem = taosArrayGet(pQuerySql->fillType, 0); if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { @@ -6147,7 +6332,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { } int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { - assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); + assert(pQuerySql != NULL && (pQuerySql->from == NULL || taosArrayGetSize(pQuerySql->from) > 0)); const char* msg0 = "invalid table name"; const char* msg2 = "point interpolation query needs timestamp"; @@ -6189,19 +6374,21 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql); } - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM * 2) { + size_t fromSize = taosArrayGetSize(pQuerySql->from); + if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } pQueryInfo->command = TSDB_SQL_SELECT; - if (pQuerySql->from->nExpr > 4) { + if (fromSize > 4) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); } // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ) { - tVariant* pTableItem = &pQuerySql->from->a[i].pVar; + for (int32_t i = 0; i < fromSize; ) { + tVariantListItem* item = taosArrayGet(pQuerySql->from, i); + tVariant* pTableItem = &item->pVar; if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); @@ -6226,21 +6413,21 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return code; } - tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; - if (pTableItem1->nType != TSDB_DATA_TYPE_BINARY) { + tVariantListItem* p1 = taosArrayGet(pQuerySql->from, i + 1); + if (p1->pVar.nType != TSDB_DATA_TYPE_BINARY) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); } - SStrToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING}; + SStrToken aliasName = {.z = p1->pVar.pz, .n = p1->pVar.nLen, .type = TK_STRING}; if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); } // has no table alias name - if (memcmp(pTableItem->pz, pTableItem1->pz, pTableItem1->nLen) == 0) { + if (memcmp(pTableItem->pz, p1->pVar.pz, p1->pVar.nLen) == 0) { extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName); } else { - tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + tstrncpy(pTableMetaInfo1->aliasName, p1->pVar.pz, sizeof(pTableMetaInfo1->aliasName)); } code = tscGetTableMeta(pSql, pTableMetaInfo1); @@ -6251,7 +6438,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { i += 2; } - assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr / 2); + assert(pQueryInfo->numOfTables == taosArrayGetSize(pQuerySql->from) / 2); bool isSTable = false; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -6285,12 +6472,12 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; } } else { // set the time rang - if (pQuerySql->from->nExpr > 2) { // it is a join query, no wher clause is not allowed. + if (taosArrayGetSize(pQuerySql->from) > 2) { // it is a join query, no wher clause is not allowed. return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query "); } } - int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2); + int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2); if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; @@ -6302,7 +6489,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } // set interval value - if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { + if (parseIntervalClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } else { if ((pQueryInfo->interval.interval > 0) && @@ -6424,6 +6611,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) { (*pExpr)->pSchema->type = (uint8_t)p1->resType; (*pExpr)->pSchema->bytes = p1->resBytes; + (*pExpr)->pSchema->colId = p1->resColId; if (uid != NULL) { *uid = p1->uid; @@ -6433,7 +6621,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS } } } else if (pSqlExpr->nSQLOptr == TK_ID) { // column name, normal column arithmetic expression - SColumnIndex index = {0}; + SColumnIndex index = COLUMN_INDEX_INITIALIZER; int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index); if (ret != TSDB_CODE_SUCCESS) { return ret; @@ -6473,7 +6661,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS (*pExpr)->_node.pRight = pRight; SStrToken t = {.type = pSqlExpr->nSQLOptr}; - (*pExpr)->_node.optr = getBinaryExprOptr(&t); + (*pExpr)->_node.optr = convertOptr(&t); assert((*pExpr)->_node.optr != 0); diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index dfd707344c5ab3ec42b49859937f604fa477fe10..fcc93ffadc4f7dc62b79fffe245947799a770b40 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) } // TODO for large number of columns, employ the binary search method -SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { +SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { STableComInfo tinfo = tscGetTableInfo(pTableMeta); for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) { @@ -130,17 +130,7 @@ SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { return NULL; } -struct SSchema tscGetTbnameColumnSchema() { - struct SSchema s = { - .colId = TSDB_TBNAME_COLUMN_INDEX, - .type = TSDB_DATA_TYPE_BINARY, - .bytes = TSDB_TABLE_NAME_LEN - }; - - strcpy(s.name, TSQL_TBNAME_L); - return s; -} -static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo *vgroupInfo) { +static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) { corVgroupInfo->version = 0; corVgroupInfo->inUse = 0; corVgroupInfo->numOfEps = vgroupInfo->numOfEps; @@ -166,7 +156,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size pTableMeta->id.tid = pTableMetaMsg->tid; pTableMeta->id.uid = pTableMetaMsg->uid; - SCMVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo; + SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo; pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps; pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId; @@ -177,7 +167,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size pVgroupInfo->epAddr[i].port = pEpMsg->port; } - tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo); + tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo); pTableMeta->sversion = pTableMetaMsg->sversion; pTableMeta->tversion = pTableMetaMsg->tversion; @@ -197,28 +187,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size return pTableMeta; } -/** - * the TableMeta data format in memory is as follows: - * - * +--------------------+ - * |STableMeta Body data| sizeof(STableMeta) - * +--------------------+ - * |Schema data | numOfTotalColumns * sizeof(SSchema) - * +--------------------+ - * |Tags data | tag_col_1.bytes + tag_col_2.bytes + .... - * +--------------------+ - * - * @param pTableMeta - * @return - */ -char* tsGetTagsValue(STableMeta* pTableMeta) { - int32_t offset = 0; -// int32_t numOfTotalCols = pTableMeta->numOfColumns + pTableMeta->numOfTags; -// uint32_t offset = sizeof(STableMeta) + numOfTotalCols * sizeof(SSchema); - - return ((char*)pTableMeta + offset); -} - // todo refactor UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) { for (int32_t i = 0; i < num; ++i) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index a0841fa2344682facb4a6dc8666716e38a31ea1d..cbc5604a279fdb4fd0eab8e9cdb902e3c42a4832 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -48,7 +48,7 @@ static int32_t getWaitingTimeInterval(int32_t count) { return initial * (2<<(count - 2)); } -static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) { +static void tscSetDnodeEpSet(SSqlObj* pSql, SVgroupInfo* pVgroupInfo) { assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); SRpcEpSet* pEpSet = &pSql->epSet; @@ -100,7 +100,7 @@ void tscUpdateMgmtEpSet(SRpcEpSet *pEpSet) { tscMgmtEpSet.epSet = *pEpSet; taosCorEndWrite(&tscMgmtEpSet.version); } -static void tscDumpEpSetFromVgroupInfo(SCMCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) { +static void tscDumpEpSetFromVgroupInfo(SCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) { if (pVgroupInfo == NULL) { return;} taosCorBeginRead(&pVgroupInfo->version); int8_t inUse = pVgroupInfo->inUse; @@ -117,14 +117,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { SSqlCmd *pCmd = &pObj->cmd; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) { return;} - SCMCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo; + SCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo; taosCorBeginWrite(&pVgroupInfo->version); tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse); pVgroupInfo->inUse = pEpSet->inUse; pVgroupInfo->numOfEps = pEpSet->numOfEps; for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) { - taosTFree(pVgroupInfo->epAddr[i].fqdn); + tfree(pVgroupInfo->epAddr[i].fqdn); pVgroupInfo->epAddr[i].fqdn = strndup(pEpSet->fqdn[i], tListLen(pEpSet->fqdn[i])); pVgroupInfo->epAddr[i].port = pEpSet->port[i]; } @@ -150,7 +150,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (pObj == NULL) return; if (pObj != pObj->signature) { - tscError("heart beat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature); + tscError("heartbeat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature); return; } @@ -158,12 +158,12 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { SSqlRes *pRes = &pSql->res; if (code == 0) { - SCMHeartBeatRsp *pRsp = (SCMHeartBeatRsp *)pRes->pRsp; - SRpcEpSet * epSet = &pRsp->epSet; + SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; + SRpcEpSet * epSet = &pRsp->epSet; if (epSet->numOfEps > 0) { tscEpSetHtons(epSet); tscUpdateMgmtEpSet(epSet); - } + } pSql->pTscObj->connId = htonl(pRsp->connId); @@ -175,44 +175,44 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (pRsp->streamId) tscKillStream(pObj, htonl(pRsp->streamId)); } } else { - tscDebug("heartbeat failed, code:%s", tstrerror(code)); + tscDebug("%p heartbeat failed, code:%s", pObj->pHb, tstrerror(code)); } if (pObj->pHb != NULL) { int32_t waitingDuring = tsShellActivityTimer * 500; - tscDebug("%p start heartbeat in %dms", pSql, waitingDuring); + tscDebug("%p send heartbeat in %dms", pSql, waitingDuring); - taosTmrReset(tscProcessActivityTimer, waitingDuring, pObj, tscTmr, &pObj->pTimer); + taosTmrReset(tscProcessActivityTimer, waitingDuring, (void *)pObj->rid, tscTmr, &pObj->pTimer); } else { tscDebug("%p start to close tscObj:%p, not send heartbeat again", pSql, pObj); } } void tscProcessActivityTimer(void *handle, void *tmrId) { - STscObj *pObj = (STscObj *)handle; - if (pObj == NULL || pObj->signature != pObj) { - return; - } + int64_t rid = (int64_t) handle; + STscObj *pObj = taosAcquireRef(tscRefId, rid); + if (pObj == NULL) return; SSqlObj* pHB = pObj->pHb; - if (pObj->pTimer != tmrId || pHB == NULL) { - return; - } void** p = taosCacheAcquireByKey(tscObjCache, &pHB, sizeof(TSDB_CACHE_PTR_TYPE)); if (p == NULL) { tscWarn("%p HB object has been released already", pHB); + taosReleaseRef(tscRefId, pObj->rid); return; } assert(*pHB->self == pHB); + pHB->retry = 0; int32_t code = tscProcessSql(pHB); taosCacheRelease(tscObjCache, (void**) &p, false); if (code != TSDB_CODE_SUCCESS) { tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); } + + taosReleaseRef(tscRefId, rid); } int tscSendMsgToServer(SSqlObj *pSql) { @@ -237,15 +237,11 @@ int tscSendMsgToServer(SSqlObj *pSql) { .pCont = pMsg, .contLen = pSql->cmd.payloadLen, .ahandle = pSql, - .handle = &pSql->pRpcCtx, + .handle = NULL, .code = 0 }; - // NOTE: the rpc context should be acquired before sending data to server. - // Otherwise, the pSql object may have been released already during the response function, which is - // processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely - // cause crash. - rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg); + rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid); return TSDB_CODE_SUCCESS; } @@ -265,7 +261,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { SSqlCmd *pCmd = &pSql->cmd; assert(*pSql->self == pSql); - pSql->pRpcCtx = NULL; + pSql->rpcRid = -1; if (pObj->signature != pObj) { tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature); @@ -361,7 +357,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { memcpy(pRes->pRsp, rpcMsg->pCont, pRes->rspLen); } } else { - pRes->pRsp = NULL; + tfree(pRes->pRsp); } /* @@ -481,14 +477,25 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int32_t vgIndex = pTableMetaInfo->vgroupIndex; - - SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList; - assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + if (pTableMetaInfo->pVgroupTables == NULL) { + SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; + assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + + pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex); + } else { + int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + assert(vgIndex >= 0 && vgIndex < numOfVgroups); + + SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); - pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); + pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex); + } } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId); + tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId); } pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg); @@ -538,11 +545,28 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) { SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo)); - - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs); - - return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + 4096; + + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2); + + int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0; + + int32_t tableSerialize = 0; + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + if (pTableMetaInfo->pVgroupTables != NULL) { + size_t numOfGroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables); + + int32_t totalTables = 0; + for (int32_t i = 0; i < numOfGroups; ++i) { + SVgroupTableInfo *pTableInfo = taosArrayGet(pTableMetaInfo->pVgroupTables, i); + totalTables += (int32_t) taosArrayGetSize(pTableInfo->itemList); + } + + tableSerialize = totalTables * sizeof(STableIdInfo); + } + + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + + tableSerialize + 4096; } static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { @@ -552,7 +576,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) { - SCMVgroupInfo* pVgroupInfo = NULL; + SVgroupInfo* pVgroupInfo = NULL; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int32_t index = pTableMetaInfo->vgroupIndex; assert(index >= 0); @@ -662,19 +686,19 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->limit = htobe64(pQueryInfo->limit.limit); pQueryMsg->offset = htobe64(pQueryInfo->limit.offset); pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList)); - pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval); - pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding); + pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval); + pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding); pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset); pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit; - pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit; - pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit; + pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit; + pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit; pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); pQueryMsg->queryType = htonl(pQueryInfo->type); size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); - pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); + pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number // set column list ids size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); @@ -736,12 +760,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_INVALID_SQL; } + assert(pExpr->resColId < 0); + pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId); pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex); pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag); pSqlFuncExpr->functionId = htons(pExpr->functionId); pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); + pSqlFuncExpr->resColId = htons(pExpr->resColId); pMsg += sizeof(SSqlFuncMsg); for (int32_t j = 0; j < pExpr->numOfParams; ++j) { @@ -759,7 +786,74 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSqlFuncExpr = (SSqlFuncMsg *)pMsg; } - + + size_t output = tscNumOfFields(pQueryInfo); + + if (tscIsSecondStageQuery(pQueryInfo)) { + pQueryMsg->secondStageOutput = htonl((int32_t) output); + + SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + + for (int32_t i = 0; i < output; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + SSqlExpr *pExpr = pField->pSqlExpr; + if (pExpr != NULL) { + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { + tscError("%p table schema is not matched with parsed sql", pSql); + return TSDB_CODE_TSC_INVALID_SQL; + } + + pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId); + pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex); + pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag); + + pSqlFuncExpr1->functionId = htons(pExpr->functionId); + pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams); + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { + // todo add log + pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); + pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen); + + if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { + memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen); + pMsg += pExpr->param[j].nLen; + } else { + pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key); + } + } + + pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + } else { + assert(pField->pArithExprInfo != NULL); + SExprInfo* pExprInfo = pField->pArithExprInfo; + + pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId); + pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId); + pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams); + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { + // todo add log + pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType); + pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes); + + if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) { + memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes); + pMsg += pExprInfo->base.arg[j].argBytes; + } else { + pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64); + } + } + + pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + } + } + } else { + pQueryMsg->secondStageOutput = 0; + } + // serialize the table info (sid, uid, tags) pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg); @@ -786,7 +880,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } if (pQueryInfo->fillType != TSDB_FILL_NONE) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { *((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]); pMsg += sizeof(pQueryInfo->fillVal[0]); } @@ -846,41 +940,25 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // compressed ts block pQueryMsg->tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); - int32_t tsLen = 0; - int32_t numOfBlocks = 0; if (pQueryInfo->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, pTableMetaInfo->vgroupIndex); - assert(QUERY_IS_JOIN_QUERY(pQueryInfo->type) && pBlockInfo != NULL); // this query should not be sent - - // todo refactor - if (fseek(pQueryInfo->tsBuf->f, pBlockInfo->offset, SEEK_SET) != 0) { - int code = TAOS_SYSTEM_ERROR(ferror(pQueryInfo->tsBuf->f)); - tscError("%p: fseek failed: %s", pSql, tstrerror(code)); - return code; - } - - size_t s = fread(pMsg, 1, pBlockInfo->compLen, pQueryInfo->tsBuf->f); - if (s != pBlockInfo->compLen) { - int code = TAOS_SYSTEM_ERROR(ferror(pQueryInfo->tsBuf->f)); - tscError("%p: fread didn't return expected data: %s", pSql, tstrerror(code)); + // note: here used the index instead of actual vnode id. + int32_t vnodeIndex = pTableMetaInfo->vgroupIndex; + int32_t code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsLen, &pQueryMsg->tsNumOfBlocks); + if (code != TSDB_CODE_SUCCESS) { return code; } - pMsg += pBlockInfo->compLen; - tsLen = pBlockInfo->compLen; - numOfBlocks = pBlockInfo->numOfBlocks; - } + pMsg += pQueryMsg->tsLen; - pQueryMsg->tsLen = htonl(tsLen); - pQueryMsg->tsNumOfBlocks = htonl(numOfBlocks); - if (pQueryInfo->tsBuf != NULL) { pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); + pQueryMsg->tsLen = htonl(pQueryMsg->tsLen); + pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks); } int32_t msgLen = (int32_t)(pMsg - pCmd->payload); - tscDebug("%p msg built success,len:%d bytes", pSql, msgLen); + tscDebug("%p msg built success, len:%d bytes", pSql, msgLen); pCmd->payloadLen = msgLen; pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; @@ -892,10 +970,10 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMCreateDbMsg); + pCmd->payloadLen = sizeof(SCreateDbMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DB; - SCMCreateDbMsg *pCreateDbMsg = (SCMCreateDbMsg*)pCmd->payload; + SCreateDbMsg *pCreateDbMsg = (SCreateDbMsg *)pCmd->payload; assert(pCmd->numOfClause == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); @@ -906,13 +984,13 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMCreateDnodeMsg); + pCmd->payloadLen = sizeof(SCreateDnodeMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMCreateDnodeMsg *pCreate = (SCMCreateDnodeMsg *)pCmd->payload; + SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload; strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE; @@ -922,13 +1000,13 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMCreateAcctMsg); + pCmd->payloadLen = sizeof(SCreateAcctMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMCreateAcctMsg *pAlterMsg = (SCMCreateAcctMsg *)pCmd->payload; + SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload; SStrToken *pName = &pInfo->pDCLInfo->user.user; SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd; @@ -967,14 +1045,14 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMCreateUserMsg); + pCmd->payloadLen = sizeof(SCreateUserMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMCreateUserMsg *pAlterMsg = (SCMCreateUserMsg*)pCmd->payload; + SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload; SUserInfo *pUser = &pInfo->pDCLInfo->user; strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n); @@ -999,21 +1077,21 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMCfgDnodeMsg); + pCmd->payloadLen = sizeof(SCfgDnodeMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_CONFIG_DNODE; return TSDB_CODE_SUCCESS; } int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMDropDbMsg); + pCmd->payloadLen = sizeof(SDropDbMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMDropDbMsg *pDropDbMsg = (SCMDropDbMsg*)pCmd->payload; + SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db)); @@ -1043,13 +1121,13 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMDropDnodeMsg); + pCmd->payloadLen = sizeof(SDropDnodeMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMDropDnodeMsg *pDrop = (SCMDropDnodeMsg *)pCmd->payload; + SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep)); pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE; @@ -1059,7 +1137,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMDropUserMsg); + pCmd->payloadLen = sizeof(SDropUserMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { @@ -1067,7 +1145,7 @@ int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMDropUserMsg *pDropMsg = (SCMDropUserMsg*)pCmd->payload; + SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user)); @@ -1076,7 +1154,7 @@ int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMDropUserMsg); + pCmd->payloadLen = sizeof(SDropUserMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { @@ -1084,7 +1162,7 @@ int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMDropUserMsg *pDropMsg = (SCMDropUserMsg*)pCmd->payload; + SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user)); @@ -1093,14 +1171,14 @@ int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMUseDbMsg); + pCmd->payloadLen = sizeof(SUseDbMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMUseDbMsg *pUseDbMsg = (SCMUseDbMsg*)pCmd->payload; + SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); strcpy(pUseDbMsg->db, pTableMetaInfo->name); pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB; @@ -1112,14 +1190,14 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { STscObj *pObj = pSql->pTscObj; SSqlCmd *pCmd = &pSql->cmd; pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW; - pCmd->payloadLen = sizeof(SCMShowMsg) + 100; + pCmd->payloadLen = sizeof(SShowMsg) + 100; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMShowMsg *pShowMsg = (SCMShowMsg*)pCmd->payload; + SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); size_t nameLen = strlen(pTableMetaInfo->name); @@ -1146,13 +1224,13 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pShowMsg->payloadLen = htons(pEpAddr->n); } - pCmd->payloadLen = sizeof(SCMShowMsg) + pShowMsg->payloadLen; + pCmd->payloadLen = sizeof(SShowMsg) + pShowMsg->payloadLen; return TSDB_CODE_SUCCESS; } int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMKillQueryMsg); + pCmd->payloadLen = sizeof(SKillQueryMsg); switch (pCmd->command) { case TSDB_SQL_KILL_QUERY: @@ -1264,8 +1342,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - return minMsgSize() + sizeof(SCMAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + - TSDB_EXTRA_PAYLOAD_SIZE; + return minMsgSize() + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE; } int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { @@ -1284,7 +1361,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMAlterTableMsg *pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload; + SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload; tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db); strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name); @@ -1333,10 +1410,10 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; - pCmd->payloadLen = sizeof(SCMAlterDbMsg); + pCmd->payloadLen = sizeof(SAlterDbMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_DB; - SCMAlterDbMsg *pAlterDbMsg = (SCMAlterDbMsg*)pCmd->payload; + SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db)); @@ -1361,19 +1438,6 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } -static int tscSetResultPointer(SQueryInfo *pQueryInfo, SSqlRes *pRes) { - if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { - return pRes->code; - } - - for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); - pRes->tsrow[i] = (unsigned char*)((char*) pRes->data + offset * pRes->numOfRows); - } - - return 0; -} - /* * this function can only be called once. * by using pRes->rspType to denote its status @@ -1384,15 +1448,18 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pRes->code = TSDB_CODE_SUCCESS; if (pRes->rspType == 0) { pRes->numOfRows = numOfRes; pRes->row = 0; pRes->rspType = 1; - tscSetResultPointer(pQueryInfo, pRes); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { + return pRes->code; + } + + tscSetResRawPtr(pRes, pQueryInfo); } else { tscResetForNextRetrieve(pRes); } @@ -1436,10 +1503,11 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) { } pRes->code = tscDoLocalMerge(pSql); - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); tscCreateResPointerInfo(pRes, pQueryInfo); + tscSetResRawPtr(pRes, pQueryInfo); } pRes->row = 0; @@ -1461,14 +1529,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { STscObj *pObj = pSql->pTscObj; SSqlCmd *pCmd = &pSql->cmd; pCmd->msgType = TSDB_MSG_TYPE_CM_CONNECT; - pCmd->payloadLen = sizeof(SCMConnectMsg); + pCmd->payloadLen = sizeof(SConnectMsg); if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMConnectMsg *pConnect = (SCMConnectMsg*)pCmd->payload; + SConnectMsg *pConnect = (SConnectMsg*)pCmd->payload; // TODO refactor full_name char *db; // ugly code to move the space @@ -1490,11 +1558,11 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SCMTableInfoMsg* pInfoMsg = (SCMTableInfoMsg *)pCmd->payload; + STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload; strcpy(pInfoMsg->tableId, pTableMetaInfo->name); pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0); - char* pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg); + char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); size_t len = htonl(pCmd->tagData.dataLen); if (pSql->cmd.autoCreated) { @@ -1513,7 +1581,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { /** * multi table meta req pkg format: - * | SMgmtHead | SCMMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ...... + * | SMgmtHead | SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ...... * no used 4B **/ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { @@ -1531,16 +1599,16 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SMgmtHead *pMgmt = (SMgmtHead *)(pCmd->payload + tsRpcHeadSize); memset(pMgmt->db, 0, TSDB_TABLE_FNAME_LEN); // server don't need the db - SCMMultiTableInfoMsg *pInfoMsg = (SCMMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); + SMultiTableInfoMsg *pInfoMsg = (SMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); pInfoMsg->numOfTables = htonl((int32_t)pCmd->count); if (pCmd->payloadLen > 0) { memcpy(pInfoMsg->tableIds, tmpData, pCmd->payloadLen); } - taosTFree(tmpData); + tfree(tmpData); - pCmd->payloadLen += sizeof(SMgmtHead) + sizeof(SCMMultiTableInfoMsg); + pCmd->payloadLen += sizeof(SMgmtHead) + sizeof(SMultiTableInfoMsg); pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META; assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize); @@ -1585,12 +1653,12 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char* pMsg = pCmd->payload; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - - SCMSTableVgroupMsg *pStableVgroupMsg = (SCMSTableVgroupMsg *) pMsg; + + SSTableVgroupMsg *pStableVgroupMsg = (SSTableVgroupMsg *)pMsg; pStableVgroupMsg->numOfTables = htonl(pQueryInfo->numOfTables); - pMsg += sizeof(SCMSTableVgroupMsg); - - for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + pMsg += sizeof(SSTableVgroupMsg); + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i); size_t size = sizeof(pTableMetaInfo->name); tstrncpy(pMsg, pTableMetaInfo->name, size); @@ -1623,14 +1691,17 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { numOfStreams++; } - int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SCMHeartBeatMsg) + 100; + int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100; if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { pthread_mutex_unlock(&pObj->mutex); - tscError("%p failed to malloc for heartbeat msg", pSql); + tscError("%p failed to create heartbeat msg", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMHeartBeatMsg *pHeartbeat = (SCMHeartBeatMsg *)pCmd->payload; + // TODO the expired hb and client can not be identified by server till now. + SHeartBeatMsg *pHeartbeat = (SHeartBeatMsg *)pCmd->payload; + tstrncpy(pHeartbeat->clientVer, version, tListLen(pHeartbeat->clientVer)); + pHeartbeat->numOfQueries = numOfQueries; pHeartbeat->numOfStreams = numOfStreams; @@ -1704,8 +1775,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000); - - // todo handle out of memory case + if (pTableMetaInfo->pTableMeta == NULL) { free(pTableMeta); return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1719,7 +1789,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { /** * multi table meta rsp pkg format: - * | STaosRsp | ieType | SCMMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2 + * | STaosRsp | ieType | SMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2 * |...... 1B 1B 4B **/ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) { @@ -1736,9 +1806,9 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) { rsp++; - SCMMultiTableInfoMsg *pInfo = (SCMMultiTableInfoMsg *)rsp; + SMultiTableInfoMsg *pInfo = (SMultiTableInfoMsg *)rsp; totalNum = htonl(pInfo->numOfTables); - rsp += sizeof(SCMMultiTableInfoMsg); + rsp += sizeof(SMultiTableInfoMsg); for (i = 0; i < totalNum; i++) { SMultiTableMeta *pMultiMeta = (SMultiTableMeta *)rsp; @@ -1830,10 +1900,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { SSqlRes* pRes = &pSql->res; // NOTE: the order of several table must be preserved. - SCMSTableVgroupRspMsg *pStableVgroup = (SCMSTableVgroupRspMsg *)pRes->pRsp; + SSTableVgroupRspMsg *pStableVgroup = (SSTableVgroupRspMsg *)pRes->pRsp; pStableVgroup->numOfTables = htonl(pStableVgroup->numOfTables); - char* pMsg = pRes->pRsp + sizeof(SCMSTableVgroupRspMsg); - + char *pMsg = pRes->pRsp + sizeof(SSTableVgroupRspMsg); + // master sqlObj locates in param SSqlObj* parent = pSql->param; assert(parent != NULL); @@ -1845,18 +1915,18 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { SVgroupsMsg * pVgroupMsg = (SVgroupsMsg *) pMsg; pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); - size_t size = sizeof(SCMVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg); + size_t size = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg); - size_t vgroupsz = sizeof(SCMVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); pInfo->vgroupList = calloc(1, vgroupsz); assert(pInfo->vgroupList != NULL); pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { //just init, no need to lock - SCMVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; + SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; - SCMVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; + SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; pVgroups->vgId = htonl(vmsg->vgId); pVgroups->numOfEps = vmsg->numOfEps; @@ -1878,10 +1948,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { * current process do not use the cache at all */ int tscProcessShowRsp(SSqlObj *pSql) { - STableMetaMsg * pMetaMsg; - SCMShowRsp *pShow; - SSchema * pSchema; - char key[20]; + STableMetaMsg *pMetaMsg; + SShowRsp * pShow; + SSchema * pSchema; + char key[20]; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; @@ -1890,7 +1960,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - pShow = (SCMShowRsp *)pRes->pRsp; + pShow = (SShowRsp *)pRes->pRsp; pShow->qhandle = htobe64(pShow->qhandle); pRes->qhandle = pShow->qhandle; @@ -1937,13 +2007,13 @@ int tscProcessShowRsp(SSqlObj *pSql) { SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, - pTableSchema[i].type, pTableSchema[i].bytes, pTableSchema[i].bytes, false); + pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false); } pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput; tscFieldInfoUpdateOffset(pQueryInfo); - taosTFree(pTableMeta); + tfree(pTableMeta); return 0; } @@ -1968,7 +2038,7 @@ static void createHBObj(STscObj* pObj) { pSql->cmd.command = pQueryInfo->command; if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { - taosTFree(pSql); + tfree(pSql); return; } @@ -1983,11 +2053,12 @@ static void createHBObj(STscObj* pObj) { } int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_TABLE_FNAME_LEN * 2]; STscObj *pObj = pSql->pTscObj; SSqlRes *pRes = &pSql->res; - SCMConnectRsp *pConnect = (SCMConnectRsp *)pRes->pRsp; + char temp[TSDB_TABLE_FNAME_LEN * 2] = {0}; + + SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp; tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); @@ -2005,7 +2076,9 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pObj->connId = htonl(pConnect->connId); createHBObj(pObj); - taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer); + + //launch a timer to send heartbeat to maintain the connection and send status to mnode + taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer); return 0; } @@ -2114,7 +2187,16 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) { return pRes->code; } - + + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + if (pCmd->command == TSDB_SQL_RETRIEVE) { + tscSetResRawPtr(pRes, pQueryInfo); + } else if ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) { + tscSetResRawPtr(pRes, pQueryInfo); + } else if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { + tscSetResRawPtr(pRes, pQueryInfo); + } + if (pSql->pSubscription != NULL) { int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput; @@ -2136,7 +2218,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { } pRes->row = 0; - tscDebug("%p numOfRows:%" PRId64 ", offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed); + tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed); return 0; } @@ -2271,7 +2353,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) { for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i); STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta); - tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList); + tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables); } if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8cac9b3398a0bc4569b52acc7858bbc1e50de542..de0177647234d0b63fa21a3b0fe2ec5763f40da3 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -28,7 +28,6 @@ #include "tutil.h" #include "ttimer.h" #include "tscProfile.h" -#include "ttimer.h" static bool validImpl(const char* str, size_t maxsize) { if (str == NULL) { @@ -161,6 +160,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa registerSqlObj(pSql); tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg); + pObj->rid = taosAddRef(tscRefId, pObj); return pSql; } @@ -278,9 +278,9 @@ void taos_close(TAOS *taos) { SSqlObj* pHb = pObj->pHb; if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) { - if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode - rpcCancelRequest(pHb->pRpcCtx); - pHb->pRpcCtx = NULL; + if (pHb->rpcRid > 0) { // wait for rsp from dnode + rpcCancelRequest(pHb->rpcRid); + pHb->rpcRid = -1; } tscDebug("%p HB is freed", pHb); @@ -296,7 +296,8 @@ void taos_close(TAOS *taos) { } tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn); - tscCloseTscObj(pObj); + + taosRemoveRef(tscRefId, pObj->rid); } void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { @@ -320,7 +321,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES if (sqlLen > (uint32_t)tsMaxSQLStringLen) { tscError("sql string exceeds max length:%d", tsMaxSQLStringLen); - terrno = TSDB_CODE_TSC_INVALID_SQL; + terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT; return NULL; } @@ -393,7 +394,7 @@ int taos_affected_rows(TAOS_RES *tres) { SSqlObj* pSql = (SSqlObj*) tres; if (pSql == NULL || pSql->signature != pSql) return 0; - return (int)(pSql->res.numOfRows); + return pSql->res.numOfRows; } TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { @@ -419,7 +420,16 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) { SInternalField* pField = tscFieldInfoGetInternalField(pFieldInfo, i); if (pField->visible) { - f[j++] = pField->field; + f[j] = pField->field; + + // revise the length for binary and nchar fields + if (f[j].type == TSDB_DATA_TYPE_BINARY) { + f[j].bytes -= VARSTR_HEADER_SIZE; + } else if (f[j].type == TSDB_DATA_TYPE_NCHAR) { + f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE; + } + + j += 1; } } @@ -442,50 +452,30 @@ int taos_retrieve(TAOS_RES *res) { if (pCmd->command < TSDB_SQL_LOCAL) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } - tscProcessSql(pSql); - return (int)pRes->numOfRows; + tscProcessSql(pSql); + return pRes->numOfRows; } -int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { - SSqlObj *pSql = (SSqlObj *)res; - SSqlCmd *pCmd = &pSql->cmd; +static bool needToFetchNewBlock(SSqlObj* pSql) { SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; - if (pRes->qhandle == 0 || pSql->signature != pSql) { - *rows = NULL; - return 0; - } - - // Retrieve new block - tscResetForNextRetrieve(pRes); - if (pCmd->command < TSDB_SQL_LOCAL) { - pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - } - - tscProcessSql(pSql); - if (pRes->numOfRows == 0) { - *rows = NULL; - return 0; - } - - // secondary merge has handle this situation - if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) { - pRes->numOfClauseTotal += pRes->numOfRows; - } - - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - if (pQueryInfo == NULL) - return 0; - - assert(0); - for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); - } - - *rows = pRes->tsrow; - - return (int)((pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows); + return (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) && + (pCmd->command == TSDB_SQL_RETRIEVE || + pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE || + pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE || + pCmd->command == TSDB_SQL_FETCH || + pCmd->command == TSDB_SQL_SHOW || + pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || + pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE || + pCmd->command == TSDB_SQL_SELECT || + pCmd->command == TSDB_SQL_DESCRIBE_TABLE || + pCmd->command == TSDB_SQL_SERV_STATUS || + pCmd->command == TSDB_SQL_CURRENT_DB || + pCmd->command == TSDB_SQL_SERV_VERSION || + pCmd->command == TSDB_SQL_CLI_VERSION || + pCmd->command == TSDB_SQL_CURRENT_USER); } TAOS_ROW taos_fetch_row(TAOS_RES *res) { @@ -508,77 +498,50 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { // set the sql object owner tscSetSqlOwner(pSql); - // current data set are exhausted, fetch more data from node - if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) && - (pCmd->command == TSDB_SQL_RETRIEVE || - pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE || - pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE || - pCmd->command == TSDB_SQL_FETCH || - pCmd->command == TSDB_SQL_SHOW || - pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || - pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE || - pCmd->command == TSDB_SQL_SELECT || - pCmd->command == TSDB_SQL_DESCRIBE_TABLE || - pCmd->command == TSDB_SQL_SERV_STATUS || - pCmd->command == TSDB_SQL_CURRENT_DB || - pCmd->command == TSDB_SQL_SERV_VERSION || - pCmd->command == TSDB_SQL_CLI_VERSION || - pCmd->command == TSDB_SQL_CURRENT_USER )) { + // current data set are exhausted, fetch more result from node + if (pRes->row >= pRes->numOfRows && needToFetchNewBlock(pSql)) { taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); tsem_wait(&pSql->rspSem); } - void* data = doSetResultRowData(pSql, true); + void* data = doSetResultRowData(pSql); tscClearSqlOwner(pSql); return data; } int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { -#if 0 SSqlObj *pSql = (SSqlObj *)res; - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - - int nRows = 0; - if (pSql == NULL || pSql->signature != pSql) { terrno = TSDB_CODE_TSC_DISCONNECTED; - *rows = NULL; return 0; } - // projection query on metric, pipeline retrieve data from vnode list, - // instead of two-stage mergednodeProcessMsgFromShell free qhandle - nRows = taos_fetch_block_impl(res, rows); - - // current subclause is completed, try the next subclause - while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) { - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - - pSql->cmd.command = pQueryInfo->command; - pCmd->clauseIndex++; - - pRes->numOfTotal += pRes->numOfClauseTotal; - pRes->numOfClauseTotal = 0; - pRes->rspType = 0; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; - pSql->subState.numOfSub = 0; - taosTFree(pSql->pSubs); + if (pRes->qhandle == 0 || + pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED || + pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || + pCmd->command == TSDB_SQL_INSERT) { + return 0; + } - assert(pSql->fp == NULL); + tscResetForNextRetrieve(pRes); - tscDebug("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause); - tscProcessSql(pSql); + // set the sql object owner + tscSetSqlOwner(pSql); - nRows = taos_fetch_block_impl(res, rows); + // current data set are exhausted, fetch more data from node + if (needToFetchNewBlock(pSql)) { + taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); + tsem_wait(&pSql->rspSem); } - return nRows; -#endif + *rows = pRes->urow; - (*rows) = taos_fetch_row(res); - return ((*rows) != NULL)? 1:0; + tscClearSqlOwner(pSql); + return pRes->numOfRows; } int taos_select_db(TAOS *taos, const char *db) { @@ -599,7 +562,7 @@ int taos_select_db(TAOS *taos, const char *db) { } // send free message to vnode to free qhandle and corresponding resources in vnode -static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) { +static bool tscKillQueryInDnode(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; @@ -746,9 +709,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) { assert(pSubObj->self == (SSqlObj**) p); pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; - if (pSubObj->pRpcCtx != NULL) { - rpcCancelRequest(pSubObj->pRpcCtx); - pSubObj->pRpcCtx = NULL; + if (pSubObj->rpcRid > 0) { + rpcCancelRequest(pSubObj->rpcRid); + pSubObj->rpcRid = -1; } tscQueueAsyncRes(pSubObj); @@ -773,7 +736,7 @@ void taos_stop_query(TAOS_RES *res) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { - assert(pSql->pRpcCtx == NULL); + assert(pSql->rpcRid <= 0); tscKillSTableQuery(pSql); } else { if (pSql->cmd.command < TSDB_SQL_LOCAL) { @@ -782,9 +745,9 @@ void taos_stop_query(TAOS_RES *res) { * reset and freed in the processMsgFromServer function, and causes the invalid * write problem for rpcCancelRequest. */ - if (pSql->pRpcCtx != NULL) { - rpcCancelRequest(pSql->pRpcCtx); - pSql->pRpcCtx = NULL; + if (pSql->rpcRid > 0) { + rpcCancelRequest(pSql->rpcRid); + pSql->rpcRid = -1; } tscQueueAsyncRes(pSql); @@ -794,6 +757,25 @@ void taos_stop_query(TAOS_RES *res) { tscDebug("%p query is cancelled", res); } +bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { + SSqlObj *pSql = (SSqlObj *)res; + if (pSql == NULL || pSql->signature != pSql) { + return true; + } + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + if (pQueryInfo == NULL) { + return true; + } + + SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, col); + if (col < 0 || col >= tscNumOfFields(pQueryInfo) || row < 0 || row > pSql->res.numOfRows) { + return true; + } + + return isNull(((char*) pSql->res.urow[col]) + row * pInfo->field.bytes, pInfo->field.type); +} + int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int len = 0; for (int i = 0; i < num_fields; ++i) { @@ -891,18 +873,16 @@ int taos_validate_sql(TAOS *taos, const char *sql) { int32_t sqlLen = (int32_t)strlen(sql); if (sqlLen > tsMaxSQLStringLen) { tscError("%p sql too long", pSql); - pRes->code = TSDB_CODE_TSC_INVALID_SQL; - taosTFree(pSql); - return pRes->code; + tfree(pSql); + return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); if (pSql->sqlstr == NULL) { - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer", pSql); tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); - taosTFree(pSql); - return pRes->code; + tfree(pSql); + return TSDB_CODE_TSC_OUT_OF_MEMORY; } strtolower(pSql->sqlstr, sql); diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 0f67911bbea992503979e5de019e4e10d3bf3c14..68c3bcae165050863cc4bf9c92a1510581531c3a 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false); tscFreeSqlResult(pSql); - taosTFree(pSql->pSubs); + tfree(pSql->pSubs); pSql->subState.numOfSub = 0; pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); tscSetNextLaunchTimer(pStream, pSql); @@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) { pStream->pSql = NULL; taos_free_result(pSql); - taosTFree(pStream); + tfree(pStream); } } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 6b615c3a9b14f97f57013f476b60acc0db53db53..6ebbeeef411f8922662e7045027cf55410cbf89d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -23,7 +23,6 @@ #include "tscSubquery.h" #include "tschemautil.h" #include "tsclient.h" -#include "tscSubquery.h" typedef struct SInsertSupporter { SSqlObj* pSql; @@ -33,11 +32,26 @@ typedef struct SInsertSupporter { static void freeJoinSubqueryObj(SSqlObj* pSql); static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql); -static bool tsCompare(int32_t order, int64_t left, int64_t right) { +static int32_t tsCompare(int32_t order, int64_t left, int64_t right) { + if (left == right) { + return 0; + } + if (order == TSDB_ORDER_ASC) { - return left < right; + return left < right? -1:1; } else { - return left > right; + return left > right? -1:1; + } +} + +static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) { + while (tsBufNextPos(pTSBuf)) { + STSElem el1 = tsBufGetElem(pTSBuf); + + int32_t res = tVariantCompare(el1.tag, tag1); + if (res != 0) { // it is a record with new tag + return; + } } } @@ -52,13 +66,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ SLimitVal* pLimit = &pQueryInfo->limit; int32_t order = pQueryInfo->order.order; - + SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0); SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0); - + pSubQueryInfo1->tsBuf = output1; pSubQueryInfo2->tsBuf = output2; + TSKEY st = taosGetTimestampUs(); + // no result generated, return directly if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) { tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); @@ -87,58 +103,74 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ int64_t numOfInput1 = 1; int64_t numOfInput2 = 1; - while (1) { - STSElem elem1 = tsBufGetElem(pSupporter1->pTSBuf); - STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf); + while(1) { + STSElem elem = tsBufGetElem(pSupporter1->pTSBuf); -#ifdef _DEBUG_VIEW - tscInfo("%" PRId64 ", tags:%"PRId64" \t %" PRId64 ", tags:%"PRId64, elem1.ts, elem1.tag.i64Key, elem2.ts, elem2.tag.i64Key); -#endif + // no data in pSupporter1 anymore, jump out of loop + if (!tsBufIsValidElem(&elem)) { + break; + } - int32_t res = tVariantCompare(&elem1.tag, &elem2.tag); - if (res == -1 || (res == 0 && tsCompare(order, elem1.ts, elem2.ts))) { - if (!tsBufNextPos(pSupporter1->pTSBuf)) { - break; - } + // find the data in supporter2 with the same tag value + STSElem e2 = tsBufFindElemStartPosByTag(pSupporter2->pTSBuf, elem.tag); - numOfInput1++; - } else if ((res > 0) || (res == 0 && tsCompare(order, elem2.ts, elem1.ts))) { - if (!tsBufNextPos(pSupporter2->pTSBuf)) { - break; - } + /** + * there are elements in pSupporter2 with the same tag, continue + */ + tVariant tag1 = {0}; + tVariantAssign(&tag1, elem.tag); - numOfInput2++; - } else { - /* - * in case of stable query, limit/offset is not applied here. the limit/offset is applied to the - * final results which is acquired after the secondry merge of in the client. - */ - if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { - if (win->skey > elem1.ts) { - win->skey = elem1.ts; + if (tsBufIsValidElem(&e2)) { + while (1) { + STSElem elem1 = tsBufGetElem(pSupporter1->pTSBuf); + STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf); + + // data with current are exhausted + if (!tsBufIsValidElem(&elem1) || tVariantCompare(elem1.tag, &tag1) != 0) { + break; } - - if (win->ekey < elem1.ts) { - win->ekey = elem1.ts; + + if (!tsBufIsValidElem(&elem2) || tVariantCompare(elem2.tag, &tag1) != 0) { // ignore all records with the same tag + skipRemainValue(pSupporter1->pTSBuf, &tag1); + break; } - - tsBufAppend(output1, elem1.vnode, &elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); - tsBufAppend(output2, elem2.vnode, &elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); - } else { - pLimit->offset -= 1; - } - if (!tsBufNextPos(pSupporter1->pTSBuf)) { - break; - } + /* + * in case of stable query, limit/offset is not applied here. the limit/offset is applied to the + * final results which is acquired after the secondary merge of in the client. + */ + int32_t re = tsCompare(order, elem1.ts, elem2.ts); + if (re < 0) { + tsBufNextPos(pSupporter1->pTSBuf); + numOfInput1++; + } else if (re > 0) { + tsBufNextPos(pSupporter2->pTSBuf); + numOfInput2++; + } else { + if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { + if (win->skey > elem1.ts) { + win->skey = elem1.ts; + } + + if (win->ekey < elem1.ts) { + win->ekey = elem1.ts; + } + + tsBufAppend(output1, elem1.id, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); + tsBufAppend(output2, elem2.id, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); + } else { + pLimit->offset -= 1;//offset apply to projection? + } - numOfInput1++; + tsBufNextPos(pSupporter1->pTSBuf); + numOfInput1++; - if (!tsBufNextPos(pSupporter2->pTSBuf)) { - break; + tsBufNextPos(pSupporter2->pTSBuf); + numOfInput2++; + } } - - numOfInput2++; + } else { // no data in pSupporter2, ignore current data in pSupporter2 + skipRemainValue(pSupporter1->pTSBuf, &tag1); } } @@ -158,9 +190,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ tsBufDestroy(pSupporter1->pTSBuf); tsBufDestroy(pSupporter2->pTSBuf); - tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " - "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, numOfInput1, numOfInput2, output1->numOfTotal, - win->skey, win->ekey); + TSKEY et = taosGetTimestampUs(); + tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " + "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us", + pSql, numOfInput1, numOfInput2, output1->numOfTotal, output1->numOfGroups, win->skey, win->ekey, + tsBufGetNumOfGroup(output1), et - st); return output1->numOfTotal; } @@ -216,7 +250,12 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { pSupporter->f = NULL; } - taosTFree(pSupporter->pIdTagList); + if (pSupporter->pVgroupTables != NULL) { + taosArrayDestroy(pSupporter->pVgroupTables); + pSupporter->pVgroupTables = NULL; + } + + tfree(pSupporter->pIdTagList); tscTagCondRelease(&pSupporter->tagCond); free(pSupporter); } @@ -240,6 +279,68 @@ static UNUSED_FUNC bool needSecondaryQuery(SQueryInfo* pQueryInfo) { return false; } +static void filterVgroupTables(SQueryInfo* pQueryInfo, SArray* pVgroupTables) { + int32_t num = 0; + int32_t* list = NULL; + tsBufGetGroupIdList(pQueryInfo->tsBuf, &num, &list); + + // The virtual node, of which all tables are disqualified after the timestamp intersection, + // is removed to avoid next stage query. + // TODO: If tables from some vnodes are not qualified for next stage query, discard them. + for (int32_t k = 0; k < taosArrayGetSize(pVgroupTables);) { + SVgroupTableInfo* p = taosArrayGet(pVgroupTables, k); + + bool found = false; + for (int32_t f = 0; f < num; ++f) { + if (p->vgInfo.vgId == list[f]) { + found = true; + break; + } + } + + if (!found) { + tscRemoveVgroupTableGroup(pVgroupTables, k); + } else { + k++; + } + } + + assert(taosArrayGetSize(pVgroupTables) > 0); + TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY); + + tfree(list); +} + +static SArray* buildVgroupTableByResult(SQueryInfo* pQueryInfo, SArray* pVgroupTables) { + int32_t num = 0; + int32_t* list = NULL; + tsBufGetGroupIdList(pQueryInfo->tsBuf, &num, &list); + + size_t numOfGroups = taosArrayGetSize(pVgroupTables); + + SArray* pNew = taosArrayInit(num, sizeof(SVgroupTableInfo)); + + SVgroupTableInfo info; + for (int32_t i = 0; i < num; ++i) { + int32_t vnodeId = list[i]; + + for (int32_t j = 0; j < numOfGroups; ++j) { + SVgroupTableInfo* p1 = taosArrayGet(pVgroupTables, j); + if (p1->vgInfo.vgId == vnodeId) { + tscVgroupTableCopy(&info, p1); + break; + } + } + + taosArrayPush(pNew, &info); + } + + tfree(list); + TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY); + + return pNew; +} + /* * launch secondary stage query to fetch the result that contains timestamp in set */ @@ -305,7 +406,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { // set the second stage sub query for join process TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE); - memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval)); tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond); @@ -313,24 +413,27 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { pQueryInfo->colList = pSupporter->colList; pQueryInfo->exprList = pSupporter->exprList; pQueryInfo->fieldsInfo = pSupporter->fieldsInfo; + pQueryInfo->groupbyExpr = pSupporter->groupInfo; + + assert(pNew->subState.numOfSub == 0 && pNew->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1); + + tscFieldInfoUpdateOffset(pQueryInfo); + + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + pTableMetaInfo->pVgroupTables = pSupporter->pVgroupTables; pSupporter->exprList = NULL; pSupporter->colList = NULL; + pSupporter->pVgroupTables = NULL; memset(&pSupporter->fieldsInfo, 0, sizeof(SFieldInfo)); - - SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); - assert(pNew->subState.numOfSub == 0 && pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); - - tscFieldInfoUpdateOffset(pNewQueryInfo); - - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); - + memset(&pSupporter->groupInfo, 0, sizeof(SSqlGroupbyExpr)); + /* * When handling the projection query, the offset value will be modified for table-table join, which is changed * during the timestamp intersection. */ pSupporter->limit = pQueryInfo->limit; - pNewQueryInfo->limit = pSupporter->limit; + pQueryInfo->limit = pSupporter->limit; SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0); @@ -345,7 +448,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { tscAddSpecialColumnForSelect(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL); tscPrintSelectClause(pNew, 0); - tscFieldInfoUpdateOffset(pNewQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); pExpr = tscSqlExprGet(pQueryInfo, 0); } @@ -356,14 +459,25 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value - pExpr->param[0].i64Key = colId; + pExpr->param[0] = (tVariant) {.i64Key = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; pExpr->numOfParams = 1; } - size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + assert(pTableMetaInfo->pVgroupTables != NULL); + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + SArray* p = buildVgroupTableByResult(pQueryInfo, pTableMetaInfo->pVgroupTables); + tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); + pTableMetaInfo->pVgroupTables = p; + } else { + filterVgroupTables(pQueryInfo, pTableMetaInfo->pVgroupTables); + } + } + + size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", - pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, taosArrayGetSize(pNewQueryInfo->exprList), - numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); + pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList), + numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); } //prepare the subqueries object failed, abort @@ -409,7 +523,7 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) { assert(pSqlObj->subState.numOfRemain > 0); if (atomic_sub_fetch_32(&pSqlObj->subState.numOfRemain, 1) <= 0) { - tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code); + tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code)); freeJoinSubqueryObj(pSqlObj); } } @@ -418,20 +532,40 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) { static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) { assert(pQueryInfo->window.skey <= win->skey && pQueryInfo->window.ekey >= win->ekey); pQueryInfo->window = *win; + + } -int32_t tscCompareTidTags(const void* p1, const void* p2) { - const STidTags* t1 = (const STidTags*) varDataVal(p1); - const STidTags* t2 = (const STidTags*) varDataVal(p2); +int32_t tidTagsCompar(const void* p1, const void* p2) { + const STidTags* t1 = (const STidTags*) (p1); + const STidTags* t2 = (const STidTags*) (p2); if (t1->vgId != t2->vgId) { return (t1->vgId > t2->vgId) ? 1 : -1; } - if (t1->tid != t2->tid) { - return (t1->tid > t2->tid) ? 1 : -1; + tstr* tag1 = (tstr*) t1->tag; + tstr* tag2 = (tstr*) t2->tag; + + if (tag1->len != tag2->len) { + return (tag1->len > tag2->len)? 1: -1; + } + + return strncmp(tag1->data, tag2->data, tag1->len); +} + +int32_t tagValCompar(const void* p1, const void* p2) { + const STidTags* t1 = (const STidTags*) varDataVal(p1); + const STidTags* t2 = (const STidTags*) varDataVal(p2); + + tstr* tag1 = (tstr*) t1->tag; + tstr* tag2 = (tstr*) t2->tag; + + if (tag1->len != tag2->len) { + return (tag1->len > tag2->len)? 1: -1; } - return 0; + + return memcmp(tag1->data, tag2->data, tag1->len); } void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables) { @@ -449,7 +583,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr SVgroupTableInfo info = {{0}}; for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { - tscSCMVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); + tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); break; } } @@ -457,27 +591,40 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr vgTables = taosArrayInit(4, sizeof(STableIdInfo)); info.itemList = vgTables; + + if (taosArrayGetSize(result) > 0) { + SVgroupTableInfo* prevGroup = taosArrayGet(result, taosArrayGetSize(result) - 1); + tscDebug("%p vgId:%d, tables:%"PRIzu, pSql, prevGroup->vgInfo.vgId, taosArrayGetSize(prevGroup->itemList)); + } + taosArrayPush(result, &info); } - tscDebug("%p tid:%d, uid:%"PRIu64",vgId:%d added for vnode query", pSql, tt->tid, tt->uid, tt->vgId) STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN}; taosArrayPush(vgTables, &item); + + tscTrace("%p tid:%d, uid:%"PRIu64",vgId:%d added", pSql, tt->tid, tt->uid, tt->vgId); prev = tt; } pTableMetaInfo->pVgroupTables = result; pTableMetaInfo->vgroupIndex = 0; + + if (taosArrayGetSize(result) > 0) { + SVgroupTableInfo* g = taosArrayGet(result, taosArrayGetSize(result) - 1); + tscDebug("%p vgId:%d, tables:%"PRIzu, pSql, g->vgInfo.vgId, taosArrayGetSize(g->itemList)); + } } static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) { SSqlCmd* pCmd = &pSql->cmd; tscClearSubqueryInfo(pCmd); tscFreeSqlResult(pSql); - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo->numOfTables == 1); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - tscInitQueryInfo(pQueryInfo); TSDB_QUERY_CLEAR_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); @@ -524,13 +671,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* tscProcessSql(pSql); } -static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1, SSqlObj* pPSqlObj) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);// todo: tags mismatch, tags not completed - SColumn *pCol = taosArrayGetP(pTableMetaInfo->tagColList, 0); - SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex]; - +static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSqlObj* pPSqlObj) { for(int32_t i = 1; i < p1->num; ++i) { STidTags* prev = (STidTags*) varDataVal(p1->pIdTagList + (i - 1) * p1->tagSize); STidTags* p = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize); @@ -547,24 +688,26 @@ static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1, } static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) { - tscDebug("%p all subqueries retrieve complete, do tags match", pParentSql); - SJoinSupporter* p1 = pParentSql->pSubs[0]->param; SJoinSupporter* p2 = pParentSql->pSubs[1]->param; - qsort(p1->pIdTagList, p1->num, p1->tagSize, tscCompareTidTags); - qsort(p2->pIdTagList, p2->num, p2->tagSize, tscCompareTidTags); + tscDebug("%p all subquery retrieve complete, do tags match, %d, %d", pParentSql, p1->num, p2->num); + + // sort according to the tag value + qsort(p1->pIdTagList, p1->num, p1->tagSize, tagValCompar); + qsort(p2->pIdTagList, p2->num, p2->tagSize, tagValCompar); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); - SSchema* pColSchema = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); + SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); // int16_t for padding - *s1 = taosArrayInit(p1->num, p1->tagSize - sizeof(int16_t)); - *s2 = taosArrayInit(p2->num, p2->tagSize - sizeof(int16_t)); + int32_t size = p1->tagSize - sizeof(int16_t); + *s1 = taosArrayInit(p1->num, size); + *s2 = taosArrayInit(p2->num, size); - if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) { + if (!(checkForDuplicateTagVal(pColSchema, p1, pParentSql) && checkForDuplicateTagVal(pColSchema, p2, pParentSql))) { return TSDB_CODE_QRY_DUP_JOIN_KEY; } @@ -590,6 +733,27 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar } } + // reorganize the tid-tag value according to both the vgroup id and tag values + // sort according to the tag value + size_t t1 = taosArrayGetSize(*s1); + size_t t2 = taosArrayGetSize(*s2); + + qsort((*s1)->pData, t1, size, tidTagsCompar); + qsort((*s2)->pData, t2, size, tidTagsCompar); + +#if 0 + for(int32_t k = 0; k < t1; ++k) { + STidTags* p = (*s1)->pData + size * k; + printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data); + } + + for(int32_t k = 0; k < t1; ++k) { + STidTags* p = (*s2)->pData + size * k; + printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data); + } +#endif + + tscDebug("%p tags match complete, result: %"PRIzu", %"PRIzu, pParentSql, t1, t2); return TSDB_CODE_SUCCESS; } @@ -689,11 +853,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow } if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. + assert(pParentSql->fp != tscJoinQueryCallback); + tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); // set no result command pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + assert(pParentSql->fp != tscJoinQueryCallback); + (*pParentSql->fp)(pParentSql->param, pParentSql, 0); } else { // proceed to for ts_comp query @@ -708,6 +876,12 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0); tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2); + SSqlObj* psub1 = pParentSql->pSubs[0]; + ((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo1->pVgroupTables); + + SSqlObj* psub2 = pParentSql->pSubs[1]; + ((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables); + pParentSql->subState.numOfSub = 2; pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub; @@ -766,9 +940,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pSupporter->pTSBuf = pBuf; } else { assert(pQueryInfo->numOfTables == 1); // for subquery, only one - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - tsBufMerge(pSupporter->pTSBuf, pBuf, pTableMetaInfo->vgroupIndex); + tsBufMerge(pSupporter->pTSBuf, pBuf); tsBufDestroy(pBuf); } @@ -776,7 +948,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (!pRes->completed) { taosGetTmpfilePath("ts-join", pSupporter->path); pSupporter->f = fopen(pSupporter->path, "w"); - pRes->row = (int32_t)pRes->numOfRows; + pRes->row = pRes->numOfRows; taos_fetch_rows_a(tres, tsCompRetrieveCallback, param); return; @@ -802,7 +974,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // TODO check for failure pSupporter->f = fopen(pSupporter->path, "w"); - pRes->row = (int32_t)pRes->numOfRows; + pRes->row = pRes->numOfRows; // set the callback function pSql->fp = tscJoinQueryCallback; @@ -835,6 +1007,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // launch the query the retrieve actual results from vnode along with the filtered timestamp SQueryInfo* pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); updateQueryTimeRange(pPQueryInfo, &win); + + //update the vgroup that involved in real data query tscLaunchRealSubqueries(pParentSql); } @@ -868,20 +1042,28 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR assert(pQueryInfo->numOfTables == 1); // for projection query, need to try next vnode if current vnode is exhausted - if ((++pTableMetaInfo->vgroupIndex) < pTableMetaInfo->vgroupList->numOfVgroups) { - pState->numOfRemain = 1; - pState->numOfSub = 1; + int32_t numOfVgroups = 0; // TODO refactor + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } else { + numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + } + if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { + tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSql, pTableMetaInfo->vgroupIndex); pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; - tscProcessSql(pSql); + tscProcessSql(pSql); return; + } else { + tscDebug("%p no result in current subquery anymore", pSql); } } - if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) { - tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pParentSql->subState.numOfRemain, pState->numOfSub); + assert(pState->numOfRemain > 0); + if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) { + tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub); return; } @@ -893,62 +1075,66 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR } // update the records for each subquery in parent sql object. + bool stableQuery = tscIsTwoStageSTableQuery(pQueryInfo, 0); for (int32_t i = 0; i < pState->numOfSub; ++i) { if (pParentSql->pSubs[i] == NULL) { + tscDebug("%p %p sub:%d not retrieve data", pParentSql, NULL, i); continue; } SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; - pRes1->numOfClauseTotal += pRes1->numOfRows; - } - // data has retrieved to client, build the join results - tscBuildResFromSubqueries(pParentSql); -} - -static SJoinSupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) { - int32_t notInvolved = 0; - SJoinSupporter* pSupporter = NULL; - SSubqueryState* pState = &pSql->subState; - - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - if (pSql->pSubs[i] == NULL) { - notInvolved++; + if (pRes1->row > 0 && pRes1->numOfRows > 0) { + tscDebug("%p sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql, pParentSql->pSubs[i], i, + pRes1->numOfRows, pRes1->numOfTotal); + assert(pRes1->row < pRes1->numOfRows); } else { - pSupporter = (SJoinSupporter*)pSql->pSubs[i]->param; + if (!stableQuery) { + pRes1->numOfClauseTotal += pRes1->numOfRows; + } + + tscDebug("%p sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql, pParentSql->pSubs[i], i, + pRes1->numOfRows, pRes1->numOfTotal); } } - - pState->numOfRemain = numOfFetch; - return pSupporter; + + // data has retrieved to client, build the join results + tscBuildResFromSubqueries(pParentSql); } -void tscFetchDatablockFromSubquery(SSqlObj* pSql) { +void tscFetchDatablockForSubquery(SSqlObj* pSql) { assert(pSql->subState.numOfSub >= 1); int32_t numOfFetch = 0; - bool hasData = true; + bool hasData = true; + bool reachLimit = false; + + // if the subquery is NULL, it does not involved in the final result generation for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - // if the subquery is NULL, it does not involved in the final result generation SSqlObj* pSub = pSql->pSubs[i]; if (pSub == NULL) { continue; } - + SSqlRes *pRes = &pSub->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); if (!tscHasReachLimitation(pQueryInfo, pRes)) { if (pRes->row >= pRes->numOfRows) { + // no data left in current result buffer hasData = false; + // The current query is completed for the active vnode, try next vnode if exists + // If it is completed, no need to fetch anymore. if (!pRes->completed) { numOfFetch++; } } } else { // has reach the limitation, no data anymore if (pRes->row >= pRes->numOfRows) { - hasData = false; + reachLimit = true; + hasData = false; break; } } @@ -958,29 +1144,113 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { if (hasData) { tscBuildResFromSubqueries(pSql); return; - } else if (numOfFetch <= 0) { + } + + // If at least one subquery is completed in current vnode, try the next vnode in case of multi-vnode + // super table projection query. + if (reachLimit) { pSql->res.completed = true; freeJoinSubqueryObj(pSql); - + if (pSql->res.code == TSDB_CODE_SUCCESS) { (*pSql->fp)(pSql->param, pSql, 0); } else { tscQueueAsyncRes(pSql); } - + + return; + } + + if (numOfFetch <= 0) { + bool tryNextVnode = false; + + bool orderedPrjQuery = false; + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + SQueryInfo* p = tscGetQueryInfoDetail(&pSub->cmd, 0); + orderedPrjQuery = tscNonOrderedProjectionQueryOnSTable(p, 0); + if (orderedPrjQuery) { + break; + } + } + + // get the number of subquery that need to retrieve the next vnode. + if (orderedPrjQuery) { + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) { + pSql->subState.numOfRemain++; + } + } + } + + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pSub->res.row >= pSub->res.numOfRows && + pSub->res.completed) { + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); + + // for projection query, need to try next vnode if current vnode is exhausted + int32_t numOfVgroups = 0; // TODO refactor + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } else { + numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + } + + if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { + tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSub, + pTableMetaInfo->vgroupIndex); + pSub->cmd.command = TSDB_SQL_SELECT; + pSub->fp = tscJoinQueryCallback; + + tscProcessSql(pSub); + tryNextVnode = true; + } else { + tscDebug("%p no result in current subquery anymore", pSub); + } + } + } + + if (tryNextVnode) { + return; + } + + pSql->res.completed = true; + freeJoinSubqueryObj(pSql); + + if (pSql->res.code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, 0); + } else { + tscQueueAsyncRes(pSql); + } + return; } // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled + // retrieve data from current vnode. tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch); - SJoinSupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch); - + SJoinSupporter* pSupporter = NULL; + pSql->subState.numOfRemain = numOfFetch; + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSql1 = pSql->pSubs[i]; if (pSql1 == NULL) { continue; } - + SSqlRes* pRes1 = &pSql1->res; SSqlCmd* pCmd1 = &pSql1->cmd; @@ -1013,7 +1283,6 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - tscDebug("%p all subquery response, retrieve data for subclause:%d", pSql, pCmd->clauseIndex); // the column transfer support struct has been built if (pRes->pColumnIndex != NULL) { @@ -1109,21 +1378,23 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { return; } - // wait for the other subqueries response from vnode - if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) { - return; + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + + // In case of consequence query from other vnode, do not wait for other query response here. + if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) { + if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) { + return; + } } tscSetupOutputColumnIndex(pParentSql); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); /** * if the query is a continue query (vgroupIndex > 0 for projection query) for next vnode, do the retrieval of * data instead of returning to its invoker */ if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { - pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub; // reset the record value - pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data pSql->cmd.command = TSDB_SQL_FETCH; tscProcessSql(pSql); @@ -1192,6 +1463,9 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter return TSDB_CODE_TSC_OUT_OF_MEMORY; } + pSupporter->groupInfo = pNewQueryInfo->groupbyExpr; + memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); + pNew->cmd.numOfCols = 0; pNewQueryInfo->interval.interval = 0; pSupporter->limit = pNewQueryInfo->limit; @@ -1212,17 +1486,9 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter assert(pTagCond->joinInfo.hasJoin); int32_t tagColId = tscGetJoinTagColIdByUid(pTagCond, pTableMetaInfo->pTableMeta->id.uid); - SSchema* s = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); - - // get the tag colId column index - int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); - SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - for(int32_t i = 0; i < numOfTags; ++i) { - if (pSchema[i].colId == tagColId) { - colIndex.columnIndex = i; - break; - } - } + SSchema* s = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); + + colIndex.columnIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, tagColId); int16_t bytes = 0; int16_t type = 0; @@ -1303,6 +1569,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables); for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i); if (pSupporter == NULL) { // failed to create support struct, abort current query @@ -1357,8 +1624,8 @@ static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { SRetrieveSupport* pSupport = pSub->param; - taosTFree(pSupport->localBuffer); - taosTFree(pSupport); + tfree(pSupport->localBuffer); + tfree(pSupport); taos_free_result(pSub); } @@ -1375,9 +1642,10 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { } tExtMemBuffer ** pMemoryBuf = NULL; - tOrderDescriptor *pDesc = NULL; - SColumnModel * pModel = NULL; - + tOrderDescriptor *pDesc = NULL; + SColumnModel *pModel = NULL; + SColumnModel *pFinalModel = NULL; + pRes->qhandle = 0x1; // hack the qhandle check const uint32_t nBufferSize = (1u << 16); // 64KB @@ -1386,14 +1654,20 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSubqueryState *pState = &pSql->subState; - pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; + pState->numOfSub = 0; + if (pTableMetaInfo->pVgroupTables == NULL) { + pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; + } else { + pState->numOfSub = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } + assert(pState->numOfSub > 0); - int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize); + int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, &pFinalModel, nBufferSize); if (ret != 0) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; tscQueueAsyncRes(pSql); - taosTFree(pMemoryBuf); + tfree(pMemoryBuf); return ret; } @@ -1402,9 +1676,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tscDebug("%p retrieved query data from %d vnode(s)", pSql, pState->numOfSub); if (pSql->pSubs == NULL) { - taosTFree(pSql->pSubs); + tfree(pSql->pSubs); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub); + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel,pState->numOfSub); tscQueueAsyncRes(pSql); return ret; @@ -1427,19 +1701,20 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); if (trs->localBuffer == NULL) { tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); - taosTFree(trs); + tfree(trs); break; } trs->subqueryIndex = i; trs->pParentSql = pSql; trs->pFinalColModel = pModel; - + trs->pFFColModel = pFinalModel; + SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); if (pNew == NULL) { tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); - taosTFree(trs->localBuffer); - taosTFree(trs); + tfree(trs->localBuffer); + tfree(trs); break; } @@ -1457,13 +1732,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tscError("%p failed to prepare subquery structure and launch subqueries", pSql); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub); + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub); doCleanupSubqueries(pSql, i); return pRes->code; // free all allocated resource } if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) { - tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub); + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub); doCleanupSubqueries(pSql, i); return pRes->code; } @@ -1489,12 +1764,8 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { } tscDebug("%p start to free subquery supp obj:%p", pSql, trsupport); -// int32_t index = trsupport->subqueryIndex; -// SSqlObj *pParentSql = trsupport->pParentSql; - -// assert(pSql == pParentSql->pSubs[index]); - taosTFree(trsupport->localBuffer); - taosTFree(trsupport); + tfree(trsupport->localBuffer); + tfree(trsupport); } static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); @@ -1518,8 +1789,8 @@ static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, in SSqlObj *pParentSql = trsupport->pParentSql; int32_t subqueryIndex = trsupport->subqueryIndex; - STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); - SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); + SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); @@ -1607,7 +1878,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tstrerror(pParentSql->res.code)); // release allocated resource - tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, + tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, trsupport->pFFColModel, pState->numOfSub); tscFreeRetrieveSup(pSql); @@ -1683,7 +1954,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0); tscClearInterpInfo(pPQueryInfo); - tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, pParentSql); + tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql); tscDebug("%p build loser tree completed", pParentSql); pParentSql->res.precision = pSql->res.precision; @@ -1721,7 +1992,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); - SCMVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -1761,7 +2032,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR assert(pRes->numOfRows == numOfRows); int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows); - tscDebug("%p sub:%p retrieve numOfRows:%" PRId64 " totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", pParentSql, pSql, + tscDebug("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", pParentSql, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx); if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { @@ -1788,7 +2059,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR } int32_t ret = saveToBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pRes->data, - (int32_t)pRes->numOfRows, pQueryInfo->groupbyExpr.orderType); + pRes->numOfRows, pQueryInfo->groupbyExpr.orderType); if (ret != 0) { // set no disk space error info, and abort retry tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE); } else if (pRes->completed) { @@ -1832,7 +2103,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); - SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; + SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; // stable query killed or other subquery failed, all query stopped if (pParentSql->res.code != TSDB_CODE_SUCCESS) { @@ -1894,13 +2165,13 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) pParentObj->res.code = pSql->res.code; } - taosTFree(pSupporter); + tfree(pSupporter); if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) { return; } - tscDebug("%p Async insertion completed, total inserted:%" PRId64, pParentObj, pParentObj->res.numOfRows); + tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); // restore user defined fp pParentObj->fp = pParentObj->fetchFp; @@ -2017,7 +2288,7 @@ static char* getResultBlockPosition(SSqlCmd* pCmd, SSqlRes* pRes, int32_t column assert(pInfo->pSqlExpr != NULL); *bytes = pInfo->pSqlExpr->resBytes; - char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows; + char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + pRes->row * (*bytes); return pData; } @@ -2029,21 +2300,24 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { int32_t numOfRes = INT32_MAX; for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - if (pSql->pSubs[i] == NULL) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { continue; } - numOfRes = (int32_t)(MIN(numOfRes, pSql->pSubs[i]->res.numOfRows)); + int32_t remain = (int32_t)(pSub->res.numOfRows - pSub->res.row); + numOfRes = (int32_t)(MIN(numOfRes, remain)); } - if (numOfRes == 0) { + if (numOfRes == 0) { // no result any more, free all subquery objects + freeJoinSubqueryObj(pSql); return; } - int32_t totalSize = tscGetResRowLength(pQueryInfo->exprList); + int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); - assert(numOfRes * totalSize > 0); - char* tmp = realloc(pRes->pRsp, numOfRes * totalSize); + assert(numOfRes * rowSize > 0); + char* tmp = realloc(pRes->pRsp, numOfRes * rowSize + sizeof(tFilePage)); if (tmp == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; @@ -2051,26 +2325,49 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { pRes->pRsp = tmp; } - pRes->data = pRes->pRsp; + tFilePage* pFilePage = (tFilePage*) pRes->pRsp; + pFilePage->num = numOfRes; + pRes->data = pFilePage->data; char* data = pRes->data; + int16_t bytes = 0; size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for(int32_t i = 0; i < numOfExprs; ++i) { SColumnIndex* pIndex = &pRes->pColumnIndex[i]; - SSqlRes *pRes1 = &pSql->pSubs[pIndex->tableIndex]->res; - SSqlCmd *pCmd1 = &pSql->pSubs[pIndex->tableIndex]->cmd; + SSqlRes* pRes1 = &pSql->pSubs[pIndex->tableIndex]->res; + SSqlCmd* pCmd1 = &pSql->pSubs[pIndex->tableIndex]->cmd; char* pData = getResultBlockPosition(pCmd1, pRes1, pIndex->columnIndex, &bytes); memcpy(data, pData, bytes * numOfRes); data += bytes * numOfRes; - pRes1->row = numOfRes; + } + + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + pSub->res.row += numOfRes; + assert(pSub->res.row <= pSub->res.numOfRows); } pRes->numOfRows = numOfRes; pRes->numOfClauseTotal += numOfRes; + + int32_t finalRowSize = 0; + for(int32_t i = 0; i < tscNumOfFields(pQueryInfo); ++i) { + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + finalRowSize += pField->bytes; + } + + doArithmeticCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize); + + pRes->data = pFilePage->data; + tscSetResRawPtr(pRes, pQueryInfo); } void tscBuildResFromSubqueries(SSqlObj *pSql) { @@ -2083,11 +2380,12 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { if (pRes->tsrow == NULL) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); + pRes->numOfCols = (int16_t) tscSqlExprNumOfExprs(pQueryInfo); - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - pRes->tsrow = calloc(numOfExprs, POINTER_BYTES); - pRes->buffer = calloc(numOfExprs, POINTER_BYTES); - pRes->length = calloc(numOfExprs, sizeof(int32_t)); + pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES); + pRes->urow = calloc(pRes->numOfCols, POINTER_BYTES); + pRes->buffer = calloc(pRes->numOfCols, POINTER_BYTES); + pRes->length = calloc(pRes->numOfCols, sizeof(int32_t)); if (pRes->tsrow == NULL || pRes->buffer == NULL || pRes->length == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2107,7 +2405,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } } -static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) { +static UNUSED_FUNC void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) { SSqlRes *pRes = &pSql->res; if (pRes->tsrow[columnIndex] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) { @@ -2131,7 +2429,7 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF } } -static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) { +char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) { SArithmeticSupport *pSupport = (SArithmeticSupport *) param; int32_t index = -1; @@ -2149,13 +2447,13 @@ static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) return pSupport->data[index] + pSupport->offset * pExpr->resBytes; } -TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult) { +TAOS_ROW doSetResultRowData(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows); if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker - taosTFree(pRes->tsrow); + tfree(pRes->tsrow); return pRes->tsrow; } @@ -2163,47 +2461,19 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult) { size_t size = tscNumOfFields(pQueryInfo); for (int i = 0; i < size; ++i) { - SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); - if (pSup->pSqlExpr != NULL) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); - } + SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); - // primary key column cannot be null in interval query, no need to check - if (i == 0 && pQueryInfo->interval.interval > 0) { - continue; - } + int32_t type = pInfo->field.type; + int32_t bytes = pInfo->field.bytes; - TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); - if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) { - transferNcharData(pSql, i, pField); + if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { + pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i]; + } else { + pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]); + pRes->length[i] = varDataLen(pRes->urow[i]); } - // calculate the result from several other columns - if (pSup->pArithExprInfo != NULL) { - if (pRes->pArithSup == NULL) { - pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport)); - } - - pRes->pArithSup->offset = 0; - pRes->pArithSup->pArithExpr = pSup->pArithExprInfo; - pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - pRes->pArithSup->exprList = pQueryInfo->exprList; - pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES); - - if (pRes->buffer[i] == NULL) { - TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - pRes->buffer[i] = malloc(field->bytes); - } - - for(int32_t k = 0; k < pRes->pArithSup->numOfCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); - pRes->pArithSup->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes; - } - - tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup, - TSDB_ORDER_ASC, getArithemicInputSrc); - pRes->tsrow[i] = (unsigned char*)pRes->buffer[i]; - } + ((char**) pRes->urow)[i] += bytes; } pRes->row++; // index increase one-step diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 47c2d35a75cba4787e00c645e2f242ae24788df7..9e9a00550a4497ae11cbdffe48991c2fd7b742d6 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -36,6 +36,7 @@ void * tscTmr; void * tscQhandle; void * tscCheckDiskUsageTmr; int tsInsertHeadSize; +int tscRefId = -1; int tscNumOfThreads; @@ -79,7 +80,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon void taos_init_imp(void) { - char temp[128]; + char temp[128] = {0}; errno = TSDB_CODE_SUCCESS; srand(taosGetTimestampSec()); @@ -103,8 +104,8 @@ void taos_init_imp(void) { taosReadGlobalCfg(); taosCheckGlobalCfg(); - taosPrintGlobalCfg(); + rpcInit(); tscDebug("starting to initialize TAOS client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } @@ -146,29 +147,45 @@ void taos_init_imp(void) { tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj"); } + tscRefId = taosOpenRef(200, tscCloseTscObj); + + // in other language APIs, taos_cleanup is not available yet. + // So, to make sure taos_cleanup will be invoked to clean up the allocated + // resource to suppress the valgrind warning. + atexit(taos_cleanup); tscDebug("client is initialized successfully"); } void taos_init() { pthread_once(&tscinit, taos_init_imp); } -void taos_cleanup() { - if (tscMetaCache != NULL) { - taosCacheCleanup(tscMetaCache); - tscMetaCache = NULL; +// this function may be called by user or system, or by both simultaneously. +void taos_cleanup(void) { + tscDebug("start to cleanup client environment"); - taosCacheCleanup(tscObjCache); - tscObjCache = NULL; + void* m = tscMetaCache; + if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) { + taosCacheCleanup(m); } - - if (tscQhandle != NULL) { - taosCleanUpScheduler(tscQhandle); - tscQhandle = NULL; + + m = tscObjCache; + if (m != NULL && atomic_val_compare_exchange_ptr(&tscObjCache, m, 0) == m) { + taosCacheCleanup(m); + } + + m = tscQhandle; + if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) { + taosCleanUpScheduler(m); } + taosCloseRef(tscRefId); taosCleanupKeywordsTable(); taosCloseLog(); - - taosTmrCleanUp(tscTmr); + if (tscEmbedded == 0) rpcCleanup(); + + m = tscTmr; + if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) { + taosTmrCleanUp(m); + } } static int taos_options_imp(TSDB_OPTION option, const char *pStr) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index b60bf958a84bcefd8fb14970923f4af1074b7a6f..7a82bcaaab63603d136ae79f28b1625c90944779 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -71,7 +71,8 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) { } bool tscQueryTags(SQueryInfo* pQueryInfo) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; @@ -201,13 +202,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t i = 0; i < size; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); assert(pExpr != NULL); -// if (pExpr == NULL) { -// return false; -// } int32_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TAG) { @@ -222,6 +219,22 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { return true; } +bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) { + if (tscIsProjectionQuery(pQueryInfo)) { + return false; + } + + size_t numOfOutput = tscNumOfFields(pQueryInfo); + for(int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo; + if (pExprInfo != NULL) { + return true; + } + } + + return false; +} + bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -245,21 +258,25 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo) { } pQueryInfo->fillType = TSDB_FILL_NONE; - taosTFree(pQueryInfo->fillVal); + tfree(pQueryInfo->fillVal); } int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { if (pRes->tsrow == NULL) { - int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput; - pRes->numOfCols = numOfOutput; + pRes->numOfCols = pQueryInfo->fieldsInfo.numOfOutput; - pRes->tsrow = calloc(numOfOutput, POINTER_BYTES); - pRes->length = calloc(numOfOutput, sizeof(int32_t)); - pRes->buffer = calloc(numOfOutput, POINTER_BYTES); + pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES); + pRes->urow = calloc(pRes->numOfCols, POINTER_BYTES); + pRes->length = calloc(pRes->numOfCols, sizeof(int32_t)); + pRes->buffer = calloc(pRes->numOfCols, POINTER_BYTES); // not enough memory - if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) { - taosTFree(pRes->tsrow); + if (pRes->tsrow == NULL || pRes->urow == NULL || pRes->length == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) { + tfree(pRes->tsrow); + tfree(pRes->urow); + tfree(pRes->length); + tfree(pRes->buffer); + pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return pRes->code; } @@ -268,27 +285,93 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } +void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { + assert(pRes->numOfCols > 0); + + int32_t offset = 0; + + for (int32_t i = 0; i < pRes->numOfCols; ++i) { + SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + pRes->urow[i] = pRes->data + offset * pRes->numOfRows; + pRes->length[i] = pInfo->field.bytes; + + offset += pInfo->field.bytes; + + // generated the user-defined column result + if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { + if (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) { + setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows); + } else { + if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) { + assert(pInfo->pSqlExpr->param[1].nLen <= pInfo->field.bytes); + + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; + + memcpy(varDataVal(p), pInfo->pSqlExpr->param[1].pz, pInfo->pSqlExpr->param[1].nLen); + varDataSetLen(p, pInfo->pSqlExpr->param[1].nLen); + } + } else { + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; + memcpy(p, &pInfo->pSqlExpr->param[1].i64Key, pInfo->field.bytes); + } + } + } + + } else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { + // convert unicode to native code in a temporary buffer extra one byte for terminated symbol + pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + + // string terminated char for binary data + memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); + + char* p = pRes->urow[i]; + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* dst = pRes->buffer[i] + k * pInfo->field.bytes; + + if (isNull(p, TSDB_DATA_TYPE_NCHAR)) { + memcpy(dst, p, varDataTLen(p)); + } else { + int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); + varDataSetLen(dst, length); + + if (length == 0) { + tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); + } + } + + p += pInfo->field.bytes; + } + + memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + } + } +} + static void tscDestroyResPointerInfo(SSqlRes* pRes) { if (pRes->buffer != NULL) { // free all buffers containing the multibyte string for (int i = 0; i < pRes->numOfCols; i++) { - taosTFree(pRes->buffer[i]); + tfree(pRes->buffer[i]); } pRes->numOfCols = 0; } - taosTFree(pRes->pRsp); + tfree(pRes->pRsp); - taosTFree(pRes->tsrow); - taosTFree(pRes->length); - taosTFree(pRes->buffer); + tfree(pRes->tsrow); + tfree(pRes->length); + tfree(pRes->buffer); + tfree(pRes->urow); - taosTFree(pRes->pGroupRec); - taosTFree(pRes->pColumnIndex); + tfree(pRes->pGroupRec); + tfree(pRes->pColumnIndex); if (pRes->pArithSup != NULL) { - taosTFree(pRes->pArithSup->data); - taosTFree(pRes->pArithSup); + tfree(pRes->pArithSup->data); + tfree(pRes->pArithSup); } pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free @@ -305,11 +388,11 @@ static void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) { freeQueryInfoImpl(pQueryInfo); clearAllTableMetaInfo(pQueryInfo, (const char*)addr, removeFromCache); - taosTFree(pQueryInfo); + tfree(pQueryInfo); } pCmd->numOfClause = 0; - taosTFree(pCmd->pQueryInfo); + tfree(pCmd->pQueryInfo); } void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) { @@ -338,34 +421,6 @@ void tscFreeSqlResult(SSqlObj* pSql) { memset(&pSql->res, 0, sizeof(SSqlRes)); } -void tscPartiallyFreeSqlObj(SSqlObj* pSql) { - if (pSql == NULL || pSql->signature != pSql) { - return; - } - - SSqlCmd* pCmd = &pSql->cmd; - int32_t cmd = pCmd->command; - if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT || - cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) { - tscRemoveFromSqlList(pSql); - } - - // pSql->sqlstr will be used by tscBuildQueryStreamDesc -// if (pObj->signature == pObj) { - //pthread_mutex_lock(&pObj->mutex); - taosTFree(pSql->sqlstr); - //pthread_mutex_unlock(&pObj->mutex); -// } - - tscFreeSqlResult(pSql); - - taosTFree(pSql->pSubs); - pSql->subState.numOfSub = 0; - pSql->self = 0; - - tscResetSqlCmdObj(pCmd, false); -} - static void tscFreeSubobj(SSqlObj* pSql) { if (pSql->subState.numOfSub == 0) { return; @@ -404,7 +459,7 @@ void tscFreeRegisteredSqlObj(void *pSql) { tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", *p, pTscObj, ref); if (ref == 0) { tscDebug("%p all sqlObj freed, free tscObj:%p", *p, pTscObj); - tscCloseTscObj(pTscObj); + taosRemoveRef(tscRefId, pTscObj->rid); } } @@ -415,14 +470,14 @@ void tscFreeTableMetaHelper(void *pTableMeta) { assert(numOfEps >= 0 && numOfEps <= TSDB_MAX_REPLICA); for(int32_t i = 0; i < numOfEps; ++i) { - taosTFree(p->vgroupInfo.epAddr[i].fqdn); + tfree(p->vgroupInfo.epAddr[i].fqdn); } int32_t numOfEps1 = p->corVgroupInfo.numOfEps; assert(numOfEps1 >= 0 && numOfEps1 <= TSDB_MAX_REPLICA); for(int32_t i = 0; i < numOfEps1; ++i) { - taosTFree(p->corVgroupInfo.epAddr[i].fqdn); + tfree(p->corVgroupInfo.epAddr[i].fqdn); } } @@ -434,22 +489,32 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscDebug("%p start to free sqlObj", pSql); pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; + tscFreeSubobj(pSql); - tscPartiallyFreeSqlObj(pSql); + SSqlCmd* pCmd = &pSql->cmd; + int32_t cmd = pCmd->command; + if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT || + cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) { + tscRemoveFromSqlList(pSql); + } pSql->signature = NULL; pSql->fp = NULL; - - SSqlCmd* pCmd = &pSql->cmd; + tfree(pSql->sqlstr); + + tfree(pSql->pSubs); + pSql->subState.numOfSub = 0; + pSql->self = 0; + + tscFreeSqlResult(pSql); + tscResetSqlCmdObj(pCmd, false); memset(pCmd->payload, 0, (size_t)pCmd->allocSize); - taosTFree(pCmd->payload); + tfree(pCmd->payload); pCmd->allocSize = 0; - taosTFree(pSql->sqlstr); tsem_destroy(&pSql->rspSem); - free(pSql); } @@ -458,15 +523,15 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) { return; } - taosTFree(pDataBlock->pData); - taosTFree(pDataBlock->params); + tfree(pDataBlock->pData); + tfree(pDataBlock->params); // free the refcount for metermeta if (pDataBlock->pTableMeta != NULL) { taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false); } - taosTFree(pDataBlock); + tfree(pDataBlock); } SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes, @@ -741,7 +806,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); - taosTFree(dataBuf->pData); + tfree(dataBuf->pData); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -786,8 +851,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { } // TODO: all subqueries should be freed correctly before close this connection. -void tscCloseTscObj(STscObj* pObj) { - assert(pObj != NULL); +void tscCloseTscObj(void *param) { + STscObj *pObj = param; pObj->signature = NULL; taosTmrStopA(&(pObj->pTimer)); @@ -801,7 +866,7 @@ void tscCloseTscObj(STscObj* pObj) { pthread_mutex_destroy(&pObj->mutex); tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, p); - taosTFree(pObj); + tfree(pObj); } bool tscIsInsertData(char* sqlstr) { @@ -876,28 +941,11 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->offset = 0; - - for (int32_t i = 1; i < numOfExprs; ++i) { - SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); - SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i); - - p->offset = prev->offset + prev->resBytes; - } -} -void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) { - if (tscSqlExprNumOfExprs(pQueryInfo) == 0) { - return; - } - - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); - pExpr->offset = 0; - - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 1; i < numOfExprs; ++i) { SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i); - + p->offset = prev->offset + prev->resBytes; } } @@ -965,18 +1013,26 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { if (pInfo->pArithExprInfo != NULL) { tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL); - taosTFree(pInfo->pArithExprInfo); + + SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; + for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) { + if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { + tfree(pFuncMsg->arg[j].argValue.pz); + } + } + + tfree(pInfo->pArithExprInfo); } } taosArrayDestroy(pFieldInfo->internalField); - taosTFree(pFieldInfo->final); + tfree(pFieldInfo->final); memset(pFieldInfo, 0, sizeof(SFieldInfo)); } static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, int32_t colType) { + int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr)); @@ -1009,8 +1065,9 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol pExpr->resType = type; pExpr->resBytes = size; + pExpr->resColId = resColId; pExpr->interBytes = interSize; - + if (pTableMetaInfo->pTableMeta) { pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; } @@ -1019,20 +1076,20 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol } SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol) { + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList); if (index == num) { - return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayInsert(pQueryInfo->exprList, index, &pExpr); return pExpr; } SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol) { - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { + SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayPush(pQueryInfo->exprList, &pExpr); return pExpr; } @@ -1060,16 +1117,14 @@ size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) { return taosArrayGetSize(pQueryInfo->exprList); } -void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex) { - if (pExpr == NULL || argument == NULL || bytes == 0) { - return; - } +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) { + assert (pExpr != NULL || argument != NULL || bytes != 0); // set parameter value // transfer to tVariant from byte data/no ascii data tVariantCreateFromBinary(&pExpr->param[pExpr->numOfParams], argument, bytes, type); - pExpr->numOfParams += 1; + assert(pExpr->numOfParams <= 3); } @@ -1086,7 +1141,7 @@ void* sqlExprDestroy(SSqlExpr* pExpr) { tVariantDestroy(&pExpr->param[i]); } - taosTFree(pExpr); + tfree(pExpr); return NULL; } @@ -1121,6 +1176,8 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco } *p1 = *pExpr; + memset(p1->param, 0, sizeof(tVariant) * tListLen(p1->param)); + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { tVariantAssign(&p1->param[j], &pExpr->param[j]); } @@ -1184,11 +1241,11 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { for(int32_t i = 0; i < numOfFilters; ++i) { if (pFilterInfo[i].filterstr) { - taosTFree(pFilterInfo[i].pz); + tfree(pFilterInfo[i].pz); } } - taosTFree(pFilterInfo); + tfree(pFilterInfo); } SColumn* tscColumnClone(const SColumn* src) { @@ -1254,8 +1311,7 @@ void tscColumnListDestroy(SArray* pColumnList) { * */ static int32_t validateQuoteToken(SStrToken* pToken) { - strdequote(pToken->z); - pToken->n = (uint32_t)strtrim(pToken->z); + tscDequoteAndTrimToken(pToken); int32_t k = tSQLGetToken(pToken->z, &pToken->type); @@ -1270,8 +1326,6 @@ static int32_t validateQuoteToken(SStrToken* pToken) { } void tscDequoteAndTrimToken(SStrToken* pToken) { - assert(pToken->type == TK_STRING); - uint32_t first = 0, last = pToken->n; // trim leading spaces @@ -1383,7 +1437,8 @@ int32_t tscValidateName(SStrToken* pToken) { } else { pStr[firstPartLen] = TS_PATH_DELIMITER[0]; memmove(&pStr[firstPartLen + 1], pToken->z, pToken->n); - pStr[firstPartLen + sizeof(TS_PATH_DELIMITER[0]) + pToken->n] = 0; + uint32_t offset = (uint32_t)(pToken->z - (pStr + firstPartLen + 1)); + memset(pToken->z + pToken->n - offset, ' ', offset); } pToken->n += (firstPartLen + sizeof(TS_PATH_DELIMITER[0])); pToken->z = pStr; @@ -1476,7 +1531,7 @@ void tscTagCondRelease(STagCond* pTagCond) { size_t s = taosArrayGetSize(pTagCond->pCond); for (int32_t i = 0; i < s; ++i) { SCond* p = taosArrayGet(pTagCond->pCond, i); - taosTFree(p->cond); + tfree(p->cond); } taosArrayDestroy(pTagCond->pCond); @@ -1622,6 +1677,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX; + pQueryInfo->resColumnId= -1000; } int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) { @@ -1663,11 +1719,12 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { if (pQueryInfo->groupbyExpr.columnInfo != NULL) { taosArrayDestroy(pQueryInfo->groupbyExpr.columnInfo); pQueryInfo->groupbyExpr.columnInfo = NULL; + pQueryInfo->groupbyExpr.numOfGroupCols = 0; } pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf); - taosTFree(pQueryInfo->fillVal); + tfree(pQueryInfo->fillVal); } void tscClearSubqueryInfo(SSqlCmd* pCmd) { @@ -1678,23 +1735,71 @@ void tscClearSubqueryInfo(SSqlCmd* pCmd) { } void tscFreeVgroupTableInfo(SArray* pVgroupTables) { - if (pVgroupTables != NULL) { - size_t num = taosArrayGetSize(pVgroupTables); - for (size_t i = 0; i < num; i++) { - SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); + if (pVgroupTables == NULL) { + return; + } - for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { - taosTFree(pInfo->vgInfo.epAddr[j].fqdn); - } + size_t num = taosArrayGetSize(pVgroupTables); + for (size_t i = 0; i < num; i++) { + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); - taosArrayDestroy(pInfo->itemList); + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + tfree(pInfo->vgInfo.epAddr[j].fqdn); } - taosArrayDestroy(pVgroupTables); + + taosArrayDestroy(pInfo->itemList); + } + + taosArrayDestroy(pVgroupTables); +} + +void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { + assert(pVgroupTable != NULL && index >= 0); + + size_t size = taosArrayGetSize(pVgroupTable); + assert(size > index); + + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + tfree(pInfo->vgInfo.epAddr[j].fqdn); + } + + taosArrayDestroy(pInfo->itemList); + taosArrayRemove(pVgroupTable, index); +} + +void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { + memset(info, 0, sizeof(SVgroupTableInfo)); + + info->vgInfo = pInfo->vgInfo; + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); + } + + info->itemList = taosArrayClone(pInfo->itemList); +} + +SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) { + if (pVgroupTables == NULL) { + return NULL; } + + size_t num = taosArrayGetSize(pVgroupTables); + SArray* pa = taosArrayInit(num, sizeof(SVgroupTableInfo)); + + SVgroupTableInfo info; + for (size_t i = 0; i < num; i++) { + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); + tscVgroupTableCopy(&info, pInfo); + + taosArrayPush(pa, &info); + } + + return pa; } void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) { - tscDebug("%p deref the table meta in cache, numOfTables:%d", address, pQueryInfo->numOfTables); + tscDebug("%p unref %d tables in the tableMeta cache", address, pQueryInfo->numOfTables); for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); @@ -1704,11 +1809,11 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool rem free(pTableMetaInfo); } - taosTFree(pQueryInfo->pTableMetaInfo); + tfree(pQueryInfo->pTableMetaInfo); } STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta, - SVgroupsInfo* vgroupList, SArray* pTagCols) { + SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) { void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (pAlloc == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1734,6 +1839,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); } + // TODO handle malloc failure pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES); if (pTableMetaInfo->tagColList == NULL) { return NULL; @@ -1742,13 +1848,15 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST if (pTagCols != NULL) { tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1); } + + pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables); pQueryInfo->numOfTables += 1; return pTableMetaInfo; } STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo* pQueryInfo) { - return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL); + return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL, NULL); } void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache) { @@ -1822,7 +1930,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm assert(pSql->cmd.clauseIndex == 0); STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); - tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL); + tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); registerSqlObj(pNew); return pNew; @@ -1987,14 +2095,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup assert(pTableMeta != NULL); - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, + pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, + pTableMetaInfo->pVgroupTables); } if (pFinalInfo->pTableMeta == NULL) { @@ -2106,6 +2216,21 @@ int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) { } } +int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) { + int32_t numOfTags = tscGetNumOfTags(pTableMeta); + + SSchema* pSchema = tscGetTableTagSchema(pTableMeta); + for(int32_t i = 0; i < numOfTags; ++i) { + if (pSchema[i].colId == colId) { + return i; + } + } + + // can not reach here + assert(0); + return INT16_MIN; +} + bool tscIsUpdateQuery(SSqlObj* pSql) { if (pSql == NULL || pSql->signature != pSql) { terrno = TSDB_CODE_TSC_DISCONNECTED; @@ -2283,7 +2408,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { pRes->numOfTotal = num; - taosTFree(pSql->pSubs); + tfree(pSql->pSubs); pSql->subState.numOfSub = 0; pSql->fp = fp; @@ -2375,7 +2500,7 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { return NULL; } - size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups; + size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups; SVgroupsInfo* pNew = calloc(1, size); if (pNew == NULL) { return NULL; @@ -2384,9 +2509,9 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { pNew->numOfVgroups = vgroupList->numOfVgroups; for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SCMVgroupInfo* pNewVInfo = &pNew->vgroups[i]; + SVgroupInfo* pNewVInfo = &pNew->vgroups[i]; - SCMVgroupInfo* pvInfo = &vgroupList->vgroups[i]; + SVgroupInfo* pvInfo = &vgroupList->vgroups[i]; pNewVInfo->vgId = pvInfo->vgId; pNewVInfo->numOfEps = pvInfo->numOfEps; @@ -2405,21 +2530,22 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { } for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SCMVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; + SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { - taosTFree(pVgroupInfo->epAddr[j].fqdn); + tfree(pVgroupInfo->epAddr[j].fqdn); } } - taosTFree(vgroupList); + tfree(vgroupList); return NULL; } -void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src) { +void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { dst->vgId = src->vgId; dst->numOfEps = src->numOfEps; for(int32_t i = 0; i < dst->numOfEps; ++i) { + tfree(dst->epAddr[i].fqdn); dst->epAddr[i].port = src->epAddr[i].port; dst->epAddr[i].fqdn = strdup(src->epAddr[i].fqdn); } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index cc4afeb3f850785ffb18a972961668f91061c6a4..8d4949d9b4364fd1e8c70cbb883aa56468724108 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -80,7 +80,7 @@ typedef struct { #define schemaFLen(s) ((s)->flen) #define schemaVLen(s) ((s)->vlen) #define schemaColAt(s, i) ((s)->columns + i) -#define tdFreeSchema(s) taosTFree((s)) +#define tdFreeSchema(s) tfree((s)) STSchema *tdDupSchema(STSchema *pSchema); int tdEncodeSchema(void **buf, STSchema *pSchema); @@ -119,6 +119,33 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version); int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes); STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder); +// ----------------- Semantic timestamp key definition +typedef uint64_t TKEY; + +#define TKEY_INVALID UINT64_MAX +#define TKEY_NULL TKEY_INVALID +#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63) +#define TKEY_DELETE_FLAG (((TKEY)1) << 62) +#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG | TKEY_DELETE_FLAG)) + +#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0) +#define TKEY_IS_DELETED(tkey) (((tkey)&TKEY_DELETE_FLAG) != 0) +#define tdSetTKEYDeleted(tkey) ((tkey) | TKEY_DELETE_FLAG) +#define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key))) +#define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1)) + +static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { + TSKEY key1 = tdGetKey(*(TKEY *)tkey1); + TSKEY key2 = tdGetKey(*(TKEY *)tkey2); + + if (key1 < key2) { + return -1; + } else if (key1 > key2) { + return 1; + } else { + return 0; + } +} // ----------------- Data row structure /* A data row, the format is like below: @@ -129,6 +156,8 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder); * +----------+----------+---------------------------------+---------------------------------+ * | len | sversion | First part | Second part | * +----------+----------+---------------------------------+---------------------------------+ + * + * NOTE: timestamp in this row structure is TKEY instead of TSKEY */ typedef void *SDataRow; @@ -137,11 +166,13 @@ typedef void *SDataRow; #define dataRowLen(r) (*(uint16_t *)(r)) #define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) -#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r))) +#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) +#define dataRowKey(r) tdGetKey(dataRowTKey(r)) #define dataRowSetLen(r, l) (dataRowLen(r) = (l)) #define dataRowSetVersion(r, v) (dataRowVersion(r) = (v)) #define dataRowCpy(dst, r) memcpy((dst), (r), dataRowLen(r)) #define dataRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_DATA_ROW_HEAD_SIZE) +#define dataRowDeleted(r) TKEY_IS_DELETED(dataRowTKey(r)) SDataRow tdNewDataRowFromSchema(STSchema *pSchema); void tdFreeDataRow(SDataRow row); @@ -154,16 +185,18 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row)); - switch (type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(ptr, value, varDataTLen(value)); - dataRowLen(row) += varDataTLen(value); - break; - default: + if (IS_VAR_DATA_TYPE(type)) { + *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); + memcpy(ptr, value, varDataTLen(value)); + dataRowLen(row) += varDataTLen(value); + } else { + if (offset == 0) { + ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); + TKEY tvalue = tdGetTKEY(*(TSKEY *)value); + memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]); + } else { memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]); - break; + } } return 0; @@ -171,12 +204,10 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i // NOTE: offset here including the header size static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) { - switch (type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset)); - default: - return POINTER_SHIFT(row, offset); + if (IS_VAR_DATA_TYPE(type)) { + return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset)); + } else { + return POINTER_SHIFT(row, offset); } } @@ -196,7 +227,6 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints); void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints); -void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows); void dataColSetOffset(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); @@ -204,28 +234,20 @@ void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) { - switch (pCol->type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); - break; - - default: - return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); - break; + if (IS_VAR_DATA_TYPE(pCol->type)) { + return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); + } else { + return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); } } static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) { ASSERT(rows > 0); - switch (pDataCol->type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1)); - break; - default: - return TYPE_BYTES[pDataCol->type] * rows; + if (IS_VAR_DATA_TYPE(pDataCol->type)) { + return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1)); + } else { + return TYPE_BYTES[pDataCol->type] * rows; } } @@ -243,9 +265,14 @@ typedef struct { } SDataCols; #define keyCol(pCols) (&((pCols)->cols[0])) // Key column -#define dataColsKeyAt(pCols, idx) ((TSKEY *)(keyCol(pCols)->pData))[(idx)] -#define dataColsKeyFirst(pCols) dataColsKeyAt(pCols, 0) -#define dataColsKeyLast(pCols) ((pCols->numOfRows == 0) ? 0 : dataColsKeyAt(pCols, (pCols)->numOfRows - 1)) +#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] +#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx)) +#define dataColsTKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, 0)) +#define dataColsKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, 0)) +#define dataColsTKeyLast(pCols) \ + (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, (pCols)->numOfRows - 1)) +#define dataColsKeyLast(pCols) \ + (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, (pCols)->numOfRows - 1)) SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows); void tdResetDataCols(SDataCols *pCols); @@ -253,10 +280,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); void tdFreeDataCols(SDataCols *pCols); void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); -void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop); //!!!! int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge); -void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows); // ----------------- K-V data row structure /* @@ -284,7 +308,7 @@ typedef struct { #define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r)) #define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset) #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) -#define kvRowFree(r) taosTFree(r) +#define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) SKVRow tdKVRowDup(SKVRow row); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 515115c323294a67318b5eb1dd17660e651d09f8..4087f638a95b063fcf6081db2556758643ef1c9a 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -44,13 +44,18 @@ extern int32_t tsMaxShellConns; extern int32_t tsShellActivityTimer; extern uint32_t tsMaxTmrCtrl; extern float tsNumOfThreadsPerCore; -extern float tsRatioOfQueryThreads; +extern int32_t tsNumOfCommitThreads; +extern float tsRatioOfQueryThreads; // todo remove it extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; -extern char tsCharset[]; // default encode string +extern char tsCharset[]; // default encode string extern int32_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +extern char tsTempDir[]; + +//query buffer management +extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing // client extern int32_t tsTableMetaKeepTimer; @@ -84,6 +89,7 @@ extern int16_t tsWAL; extern int32_t tsFsyncPeriod; extern int32_t tsReplications; extern int32_t tsQuorum; +extern int32_t tsUpdate; // balance extern int32_t tsEnableBalance; @@ -180,13 +186,13 @@ extern int32_t debugFlag; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) -void taosInitGlobalCfg(); -bool taosCheckGlobalCfg(); -void taosSetAllDebugFlag(); -bool taosCfgDynamicOptions(char *msg); -int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port); -bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId); - +void taosInitGlobalCfg(); +int32_t taosCheckGlobalCfg(); +void taosSetAllDebugFlag(); +bool taosCfgDynamicOptions(char *msg); +int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port); +bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId); + #ifdef __cplusplus } #endif diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 6b73d98b81fa8cbcce8d322f566d8b6709396f1c..6c48ca72f3b5ea010c662faa09211afd40152615 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -35,6 +35,6 @@ bool tscValidateTableNameLength(size_t len); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); -// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision); +SSchema tscGetTbnameColumnSchema(); #endif // TDENGINE_NAME_H diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 28289b051e4ef32e1b1e22847df584238857002b..f21205479396ba606a1212f30350df2e0b3f59b5 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -18,6 +18,9 @@ #include "tcoding.h" #include "wchar.h" +static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, + int limit2, int tRows); + /** * Duplicate the schema and return a new object */ @@ -94,7 +97,7 @@ int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) { void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder) { if (pBuilder) { - taosTFree(pBuilder->columns); + tfree(pBuilder->columns); } } @@ -202,7 +205,7 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE; pDataCol->len = 0; - if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(pDataCol->type)) { pDataCol->dataOff = (VarDataOffsetT *)(*pBuf); pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints); pDataCol->spaceSize = pDataCol->bytes * maxPoints; @@ -215,60 +218,29 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) } } +// value from timestamp should be TKEY here instead of TSKEY void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) { ASSERT(pCol != NULL && value != NULL); - switch (pCol->type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - // set offset - pCol->dataOff[numOfRows] = pCol->len; - // Copy data - memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value)); - // Update the length - pCol->len += varDataTLen(value); - break; - default: - ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows); - memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); - pCol->len += pCol->bytes; - break; - } -} - -void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows) { - int pointsLeft = numOfRows - pointsToPop; - - ASSERT(pointsLeft > 0); - - if (pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR) { - ASSERT(pCol->len > 0); - VarDataOffsetT toffset = pCol->dataOff[pointsToPop]; - pCol->len = pCol->len - toffset; - ASSERT(pCol->len > 0); - memmove(pCol->pData, POINTER_SHIFT(pCol->pData, toffset), pCol->len); - dataColSetOffset(pCol, pointsLeft); + if (IS_VAR_DATA_TYPE(pCol->type)) { + // set offset + pCol->dataOff[numOfRows] = pCol->len; + // Copy data + memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value)); + // Update the length + pCol->len += varDataTLen(value); } else { ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows); - pCol->len = TYPE_BYTES[pCol->type] * pointsLeft; - memmove(pCol->pData, POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * pointsToPop), pCol->len); + memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); + pCol->len += pCol->bytes; } } bool isNEleNull(SDataCol *pCol, int nEle) { - switch (pCol->type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - for (int i = 0; i < nEle; i++) { - if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; - } - return true; - default: - for (int i = 0; i < nEle; i++) { - if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; - } - return true; + for (int i = 0; i < nEle; i++) { + if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; } + return true; } void dataColSetNullAt(SDataCol *pCol, int index) { @@ -367,8 +339,8 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { void tdFreeDataCols(SDataCols *pCols) { if (pCols) { - taosTFree(pCols->buf); - taosTFree(pCols->cols); + tfree(pCols->buf); + tfree(pCols->cols); free(pCols); } } @@ -390,7 +362,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize; pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf))); - if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { ASSERT(pDataCols->cols[i].dataOff != NULL); pRet->cols[i].dataOff = (int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf))); @@ -400,7 +372,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { pRet->cols[i].len = pDataCols->cols[i].len; if (pDataCols->cols[i].len > 0) { memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len); - if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints); } } @@ -420,58 +392,54 @@ void tdResetDataCols(SDataCols *pCols) { } void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { - ASSERT(dataColsKeyLast(pCols) < dataRowKey(row)); + ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); int rcol = 0; int dcol = 0; - while (dcol < pCols->numOfCols) { - SDataCol *pDataCol = &(pCols->cols[dcol]); - if (rcol >= schemaNCols(pSchema)) { - dataColSetNullAt(pDataCol, pCols->numOfRows); - dcol++; - continue; + if (dataRowDeleted(row)) { + for (; dcol < pCols->numOfCols; dcol++) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (dcol == 0) { + dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints); + } else { + dataColSetNullAt(pDataCol, pCols->numOfRows); + } } + } else { + while (dcol < pCols->numOfCols) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (rcol >= schemaNCols(pSchema)) { + dataColSetNullAt(pDataCol, pCols->numOfRows); + dcol++; + continue; + } - STColumn *pRowCol = schemaColAt(pSchema, rcol); - if (pRowCol->colId == pDataCol->colId) { - void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset+TD_DATA_ROW_HEAD_SIZE); - dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); - dcol++; - rcol++; - } else if (pRowCol->colId < pDataCol->colId) { - rcol++; - } else { - dataColSetNullAt(pDataCol, pCols->numOfRows); - dcol++; + STColumn *pRowCol = schemaColAt(pSchema, rcol); + if (pRowCol->colId == pDataCol->colId) { + void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE); + dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); + dcol++; + rcol++; + } else if (pRowCol->colId < pDataCol->colId) { + rcol++; + } else { + dataColSetNullAt(pDataCol, pCols->numOfRows); + dcol++; + } } } pCols->numOfRows++; } -// Pop pointsToPop points from the SDataCols -void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop) { - int pointsLeft = pCols->numOfRows - pointsToPop; - if (pointsLeft <= 0) { - tdResetDataCols(pCols); - return; - } - - for (int iCol = 0; iCol < pCols->numOfCols; iCol++) { - SDataCol *pCol = pCols->cols + iCol; - dataColPopPoints(pCol, pointsToPop, pCols->numOfRows); - } - pCols->numOfRows = pointsLeft; -} - int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); - ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); ASSERT(target->numOfCols == source->numOfCols); SDataCols *pTarget = NULL; if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap + ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { if (source->cols[j].len > 0) { @@ -499,17 +467,23 @@ _err: return -1; } -void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows) { +// src2 data has more priority than src1 +static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, + int limit2, int tRows) { tdResetDataCols(target); ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows); while (target->numOfRows < tRows) { if (*iter1 >= limit1 && *iter2 >= limit2) break; - TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1]; - TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2]; + TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1); + TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1); + TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2); + TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2); - if (key1 <= key2) { + ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1))); + + if (key1 < key2) { for (int i = 0; i < src1->numOfCols; i++) { ASSERT(target->cols[i].type == src1->cols[i].type); if (src1->cols[i].len > 0) { @@ -520,19 +494,23 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limi target->numOfRows++; (*iter1)++; - if (key1 == key2) (*iter2)++; - } else { - for (int i = 0; i < src2->numOfCols; i++) { - ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0) { - dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, - target->maxPoints); + } else if (key1 >= key2) { + if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { + for (int i = 0; i < src2->numOfCols; i++) { + ASSERT(target->cols[i].type == src2->cols[i].type); + if (src2->cols[i].len > 0) { + dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, + target->maxPoints); + } } + target->numOfRows++; } - target->numOfRows++; (*iter2)++; + if (key1 == key2) (*iter1)++; } + + ASSERT(target->numOfRows <= target->maxPoints); } } @@ -691,8 +669,8 @@ int tdInitKVRowBuilder(SKVRowBuilder *pBuilder) { } void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder) { - taosTFree(pBuilder->pColIdx); - taosTFree(pBuilder->buf); + tfree(pBuilder->pColIdx); + tfree(pBuilder->buf); } void tdResetKVRowBuilder(SKVRowBuilder *pBuilder) { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index c24ba490ba7f4cb25ba032b0404790d68540c826..4495c3d9288a5df0b8944aa444625c143b5c7670 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -45,19 +45,21 @@ int32_t tsEnableTelemetryReporting = 1; char tsEmail[TSDB_FQDN_LEN] = {0}; // common -int32_t tsRpcTimer = 1000; -int32_t tsRpcMaxTime = 600; // seconds; -int32_t tsMaxShellConns = 5000; +int32_t tsRpcTimer = 1000; +int32_t tsRpcMaxTime = 600; // seconds; +int32_t tsMaxShellConns = 5000; int32_t tsMaxConnections = 5000; -int32_t tsShellActivityTimer = 3; // second -float tsNumOfThreadsPerCore = 1.0; -float tsRatioOfQueryThreads = 0.5; -int8_t tsDaylight = 0; +int32_t tsShellActivityTimer = 3; // second +float tsNumOfThreadsPerCore = 1.0f; +int32_t tsNumOfCommitThreads = 1; +float tsRatioOfQueryThreads = 0.5f; +int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string int32_t tsEnableCoreFile = 0; int32_t tsMaxBinaryDisplayWidth = 30; +char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/"; /* * denote if the server needs to compress response message at the application layer to client, including query rsp, @@ -99,6 +101,12 @@ float tsStreamComputDelayRatio = 0.1f; int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance +// the maximum allowed query buffer size during query processing for each data node. +// -1 no limit (default) +// 0 no query allowed, queries are disabled +// positive value (in MB) +int32_t tsQueryBufferSize = -1; + // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; @@ -113,6 +121,7 @@ int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; +int32_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; int32_t tsMaxVgroupsPerDb = 0; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; @@ -210,6 +219,8 @@ int32_t (*monitorStartSystemFp)() = NULL; void (*monitorStopSystemFp)() = NULL; void (*monitorExecuteSQLFp)(char *sql) = NULL; +char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"}; + static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT; void taosSetAllDebugFlag() { @@ -416,6 +427,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "numOfCommitThreads"; + cfg.ptr = &tsNumOfCommitThreads; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 1; + cfg.maxValue = 100; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "ratioOfQueryThreads"; cfg.ptr = &tsRatioOfQueryThreads; cfg.valType = TAOS_CFG_VTYPE_FLOAT; @@ -676,7 +697,7 @@ static void doInitGlobalConfig(void) { cfg.minValue = TSDB_MIN_CACHE_BLOCK_SIZE; cfg.maxValue = TSDB_MAX_CACHE_BLOCK_SIZE; cfg.ptrLength = 0; - cfg.unitType = TAOS_CFG_UTYPE_Mb; + cfg.unitType = TAOS_CFG_UTYPE_MB; taosInitConfigOption(cfg); cfg.option = "blocks"; @@ -779,6 +800,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "update"; + cfg.ptr = &tsUpdate; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = TSDB_MIN_DB_UPDATE; + cfg.maxValue = TSDB_MAX_DB_UPDATE; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "mqttHostName"; cfg.ptr = tsMqttHostName; cfg.valType = TAOS_CFG_VTYPE_STRING; @@ -839,6 +870,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "queryBufferSize"; + cfg.ptr = &tsQueryBufferSize; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = -1; + cfg.maxValue = 500000000000.0f; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + // locale & charset cfg.option = "timezone"; cfg.ptr = tsTimezone; @@ -1294,13 +1335,23 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + + cfg.option = "tempDir"; + cfg.ptr = tsTempDir; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(tsTempDir); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); } void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); } -bool taosCheckGlobalCfg() { +int32_t taosCheckGlobalCfg() { char fqdn[TSDB_FQDN_LEN]; uint16_t port; @@ -1359,7 +1410,9 @@ bool taosCheckGlobalCfg() { tsSyncPort = tsServerPort + TSDB_PORT_SYNC; tsHttpPort = tsServerPort + TSDB_PORT_HTTP; - return true; + taosPrintGlobalCfg(); + + return 0; } int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 8879e9e7979be5b019b3048389105faa86ac9225..bea8c52ef21ea607bee5379ace879ef936ff60bb 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) { pToken->z = r; } } + +SSchema tscGetTbnameColumnSchema() { + struct SSchema s = { + .colId = TSDB_TBNAME_COLUMN_INDEX, + .type = TSDB_DATA_TYPE_BINARY, + .bytes = TSDB_TABLE_NAME_LEN + }; + + strcpy(s.name, TSQL_TBNAME_L); + return s; +} \ No newline at end of file diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 50554ce08e3fb659c1a5915c4c50b09f950324b4..f28481977f41bf550205d2669c6f90a016d2b7c4 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -355,32 +355,6 @@ bool isValidDataType(int32_t type) { return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR; } -//bool isNull(const char *val, int32_t type) { -// switch (type) { -// case TSDB_DATA_TYPE_BOOL: -// return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; -// case TSDB_DATA_TYPE_TINYINT: -// return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; -// case TSDB_DATA_TYPE_SMALLINT: -// return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; -// case TSDB_DATA_TYPE_INT: -// return *(uint32_t *)val == TSDB_DATA_INT_NULL; -// case TSDB_DATA_TYPE_BIGINT: -// case TSDB_DATA_TYPE_TIMESTAMP: -// return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; -// case TSDB_DATA_TYPE_FLOAT: -// return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; -// case TSDB_DATA_TYPE_DOUBLE: -// return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; -// case TSDB_DATA_TYPE_NCHAR: -// return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; -// case TSDB_DATA_TYPE_BINARY: -// return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; -// default: -// return false; -// }; -//} - void setVardataNull(char* val, int32_t type) { if (type == TSDB_DATA_TYPE_BINARY) { varDataSetLen(val, sizeof(int8_t)); @@ -433,14 +407,10 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { *(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL; } break; - case TSDB_DATA_TYPE_NCHAR: // todo : without length? - for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * bytes) = TSDB_DATA_NCHAR_NULL; - } - break; + case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * bytes) = TSDB_DATA_BINARY_NULL; + setVardataNull(val + i * bytes, type); } break; default: { diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index 005def6dc597361436b03c15535840af2bd3461e..6dd065382265cb09e26f8d1b6f86c4b5b3f5110b 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -108,7 +108,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32 break; } case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method - pVar->pz = calloc(len, sizeof(char)); + pVar->pz = calloc(len + 1, sizeof(char)); memcpy(pVar->pz, pz, len); pVar->nLen = (int32_t)len; break; @@ -125,7 +125,7 @@ void tVariantDestroy(tVariant *pVar) { if (pVar == NULL) return; if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) { - taosTFree(pVar->pz); + tfree(pVar->pz); pVar->nLen = 0; } @@ -144,21 +144,24 @@ void tVariantDestroy(tVariant *pVar) { void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; - *pDst = *pSrc; - + pDst->nType = pSrc->nType; if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) { - int32_t len = pSrc->nLen + 1; - if (pSrc->nType == TSDB_DATA_TYPE_NCHAR) { - len = len * TSDB_NCHAR_SIZE; - } - - pDst->pz = calloc(1, len); - memcpy(pDst->pz, pSrc->pz, len); + int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE; + char* p = realloc(pDst->pz, len); + assert(p); + + memset(p, 0, len); + pDst->pz = p; + + memcpy(pDst->pz, pSrc->pz, pSrc->nLen); + pDst->nLen = pSrc->nLen; return; + } - // this is only for string array - if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { + if (pSrc->nType >= TSDB_DATA_TYPE_BOOL && pSrc->nType <= TSDB_DATA_TYPE_DOUBLE) { + pDst->i64Key = pSrc->i64Key; + } else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array size_t num = taosArrayGetSize(pSrc->arr); pDst->arr = taosArrayInit(num, sizeof(char*)); for(size_t i = 0; i < num; i++) { @@ -166,8 +169,6 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { char* n = strdup(p); taosArrayPush(pDst->arr, &n); } - - return; } pDst->nLen = tDataTypeDesc[pDst->nType].nSize; diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..b6f143e1813d60c1ac4ae8356efdca4929c51345 --- /dev/null +++ b/src/connector/C#/TDengineDriver.cs @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} \ No newline at end of file diff --git a/src/connector/go b/src/connector/go index 8d7bf743852897110cbdcc7c4322cd7a74d4167b..050667e5b4d0eafa5387e4283e713559b421203f 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b +Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index d598db167eb256fe67409b7bb3d0eb7fffc3ff8c..ec77d9049a719dabfd1a7c1122a209e201861944 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit d598db167eb256fe67409b7bb3d0eb7fffc3ff8c +Subproject commit ec77d9049a719dabfd1a7c1122a209e201861944 diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension new file mode 160000 index 0000000000000000000000000000000000000000..b62a26ecc164a310104df57691691b237e091c89 --- /dev/null +++ b/src/connector/hivemq-tdengine-extension @@ -0,0 +1 @@ +Subproject commit b62a26ecc164a310104df57691691b237e091c89 diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 3f6ebeff03f538e4963540e015c7fd4b310acc42..51db837c7b1149bfc5dca6d69a953ceb6b3eb898 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,14 +5,13 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.6 + 2.0.10 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc TDengine JDBC Driver - GNU AFFERO GENERAL PUBLIC LICENSE Version 3 diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 3b62f66d2ec88002d2f749166fb00bff670617ee..e7124a0599fa80baabba84700eb097bde3e57287 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -56,6 +56,23 @@ test + + + org.apache.httpcomponents + httpclient + 4.5.8 + + + org.apache.commons + commons-lang3 + 3.9 + + + com.alibaba + fastjson + 1.2.58 + + diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java new file mode 100644 index 0000000000000000000000000000000000000000..f864788bfffc8bdfefb0b91ec645a10ae8eec843 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java @@ -0,0 +1,161 @@ +package com.taosdata.jdbc; + +import java.io.*; +import java.sql.Driver; +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.StringTokenizer; + +public abstract class AbstractTaosDriver implements Driver { + + private static final String TAOS_CFG_FILENAME = "taos.cfg"; + + /** + * @param cfgDirPath + * @return return the config dir + **/ + protected File loadConfigDir(String cfgDirPath) { + if (cfgDirPath == null) + return loadDefaultConfigDir(); + File cfgDir = new File(cfgDirPath); + if (!cfgDir.exists()) + return loadDefaultConfigDir(); + return cfgDir; + } + + /** + * @return search the default config dir, if the config dir is not exist will return null + */ + protected File loadDefaultConfigDir() { + File cfgDir; + File cfgDir_linux = new File("/etc/taos"); + cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null; + File cfgDir_windows = new File("C:\\TDengine\\cfg"); + cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir; + return cfgDir; + } + + protected List loadConfigEndpoints(File cfgFile) { + List endpoints = new ArrayList<>(); + try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) { + String line = null; + while ((line = reader.readLine()) != null) { + if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) { + endpoints.add(line.substring(line.indexOf('p') + 1).trim()); + } + if (endpoints.size() > 1) + break; + } + } catch (FileNotFoundException e) { + e.printStackTrace(); + } catch (IOException e) { + e.printStackTrace(); + } + return endpoints; + } + + protected void loadTaosConfig(Properties info) { + if ((info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null || + info.getProperty(TSDBDriver.PROPERTY_KEY_HOST).isEmpty()) && ( + info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null || + info.getProperty(TSDBDriver.PROPERTY_KEY_PORT).isEmpty())) { + File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR)); + File cfgFile = cfgDir.listFiles((dir, name) -> TAOS_CFG_FILENAME.equalsIgnoreCase(name))[0]; + List endpoints = loadConfigEndpoints(cfgFile); + if (!endpoints.isEmpty()) { + info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]); + info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]); + } + } + } + + protected DriverPropertyInfo[] getPropertyInfo(Properties info) { + DriverPropertyInfo hostProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_HOST, info.getProperty(TSDBDriver.PROPERTY_KEY_HOST)); + hostProp.required = false; + hostProp.description = "Hostname"; + + DriverPropertyInfo portProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PORT, info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT)); + portProp.required = false; + portProp.description = "Port"; + + DriverPropertyInfo dbProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_DBNAME, info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME)); + dbProp.required = false; + dbProp.description = "Database name"; + + DriverPropertyInfo userProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_USER, info.getProperty(TSDBDriver.PROPERTY_KEY_USER)); + userProp.required = true; + userProp.description = "User"; + + DriverPropertyInfo passwordProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PASSWORD, info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); + passwordProp.required = true; + passwordProp.description = "Password"; + + DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5]; + propertyInfo[0] = hostProp; + propertyInfo[1] = portProp; + propertyInfo[2] = dbProp; + propertyInfo[3] = userProp; + propertyInfo[4] = passwordProp; + return propertyInfo; + } + + protected Properties parseURL(String url, Properties defaults) { + Properties urlProps = (defaults != null) ? defaults : new Properties(); + + // parse properties + int beginningOfSlashes = url.indexOf("//"); + int index = url.indexOf("?"); + if (index != -1) { + String paramString = url.substring(index + 1, url.length()); + url = url.substring(0, index); + StringTokenizer queryParams = new StringTokenizer(paramString, "&"); + while (queryParams.hasMoreElements()) { + String parameterValuePair = queryParams.nextToken(); + int indexOfEqual = parameterValuePair.indexOf("="); + String parameter = null; + String value = null; + if (indexOfEqual != -1) { + parameter = parameterValuePair.substring(0, indexOfEqual); + if (indexOfEqual + 1 < parameterValuePair.length()) { + value = parameterValuePair.substring(indexOfEqual + 1); + } + } + if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) { + urlProps.setProperty(parameter, value); + } + } + } + + // parse Product Name + String dbProductName = url.substring(0, beginningOfSlashes); + dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1); + dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); + // parse dbname + url = url.substring(beginningOfSlashes + 2); + int indexOfSlash = url.indexOf("/"); + if (indexOfSlash != -1) { + if (indexOfSlash + 1 < url.length()) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1)); + } + url = url.substring(0, indexOfSlash); + } + // parse port + int indexOfColon = url.indexOf(":"); + if (indexOfColon != -1) { + if (indexOfColon + 1 < url.length()) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1)); + } + url = url.substring(0, indexOfColon); + } + // parse host + if (url != null && url.length() > 0 && url.trim().length() > 0) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); + } + return urlProps; + } + + + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java index 5c7f80c715c12d1d3831a4cfbfe5f4a326b569eb..633fdcd5ab7c9f077abbd725c2511bcc2251db44 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java @@ -16,10 +16,10 @@ package com.taosdata.jdbc; public class ColumnMetaData { - int colType = 0; - String colName = null; - int colSize = -1; - int colIndex = 0; + private int colType = 0; + private String colName = null; + private int colSize = -1; + private int colIndex = 0; public int getColSize() { return colSize; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index ac0e4eb84aa0fbcc8162e68d94de00cb2f4e79f5..94abe3965507170a4b31e17ebb431ddcb4fa11f8 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -14,7 +14,6 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.io.*; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; @@ -35,11 +34,10 @@ import java.util.*; import java.util.concurrent.Executor; public class TSDBConnection implements Connection { + protected Properties props = null; private TSDBJNIConnector connector = null; - protected Properties props = null; - private String catalog = null; private TSDBDatabaseMetaData dbMetaData = null; @@ -47,16 +45,21 @@ public class TSDBConnection implements Connection { private Properties clientInfoProps = new Properties(); private int timeoutMilliseconds = 0; - - private String tsCharSet = ""; + + private boolean batchFetch = false; public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { this.dbMetaData = meta; - connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), - info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER), + info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), + info.getProperty(TSDBDriver.PROPERTY_KEY_USER), info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); + + String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD); + if (batchLoad != null) { + this.batchFetch = Boolean.parseBoolean(batchLoad); + } } private void connect(String host, int port, String dbName, String user, String password) throws SQLException { @@ -197,12 +200,14 @@ public class TSDBConnection implements Connection { } public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + //todo: implement getWarnings according to the warning messages returned from TDengine + return null; +// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } public void clearWarnings() throws SQLException { // left blank to support HikariCP connection - //todo: implement getWarnings according to the warning messages returned from TDengine + //todo: implement clearWarnings according to the warning messages returned from TDengine } public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { @@ -222,6 +227,14 @@ public class TSDBConnection implements Connection { return this.prepareStatement(sql); } + + public Boolean getBatchFetch() { + return this.batchFetch; + } + + public void setBatchFetch(Boolean batchFetch) { + this.batchFetch = batchFetch; + } public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index c1d9d2af8e5a5c24dcfed6039e3ce06530b95276..f4dee67adf03a3474f582dbe662a8c5e988e3fab 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { } public int getDriverMajorVersion() { - return 0; + return 2; } public int getDriverMinorVersion() { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 97d93fb0a18fa887465583bce1492c8305faaec5..06f88cebfaa8aa90cc81506a98374ec8076ad82e 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -14,13 +14,8 @@ *****************************************************************************/ package com.taosdata.jdbc; - -import java.io.*; - import java.sql.*; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; +import java.util.*; import java.util.logging.Logger; /** @@ -42,78 +37,60 @@ import java.util.logging.Logger; * register it with the DriverManager. This means that a user can load and * register a driver by doing Class.forName("foo.bah.Driver") */ -public class TSDBDriver implements java.sql.Driver { - +public class TSDBDriver extends AbstractTaosDriver { @Deprecated private static final String URL_PREFIX1 = "jdbc:TSDB://"; private static final String URL_PREFIX = "jdbc:TAOS://"; - /** - * Key used to retrieve the database value from the properties instance passed - * to the driver. - */ - public static final String PROPERTY_KEY_DBNAME = "dbname"; - /** * Key used to retrieve the host value from the properties instance passed to * the driver. */ public static final String PROPERTY_KEY_HOST = "host"; - /** - * Key used to retrieve the password value from the properties instance passed - * to the driver. - */ - public static final String PROPERTY_KEY_PASSWORD = "password"; - /** * Key used to retrieve the port number value from the properties instance * passed to the driver. */ public static final String PROPERTY_KEY_PORT = "port"; - + /** + * Key used to retrieve the database value from the properties instance passed + * to the driver. + */ + public static final String PROPERTY_KEY_DBNAME = "dbname"; /** * Key used to retrieve the user value from the properties instance passed to * the driver. */ public static final String PROPERTY_KEY_USER = "user"; - - + /** + * Key used to retrieve the password value from the properties instance passed + * to the driver. + */ + public static final String PROPERTY_KEY_PASSWORD = "password"; /** * Key for the configuration file directory of TSDB client in properties instance */ public static final String PROPERTY_KEY_CONFIG_DIR = "cfgdir"; - /** * Key for the timezone used by the TSDB client in properties instance */ public static final String PROPERTY_KEY_TIME_ZONE = "timezone"; - /** * Key for the locale used by the TSDB client in properties instance */ public static final String PROPERTY_KEY_LOCALE = "locale"; - - /** * Key for the char encoding used by the TSDB client in properties instance */ public static final String PROPERTY_KEY_CHARSET = "charset"; - public static final String PROPERTY_KEY_PROTOCOL = "protocol"; - - /** - * Index for port coming out of parseHostPortPair(). + * fetch data from native function in a batch model */ - public final static int PORT_NUMBER_INDEX = 1; - - /** - * Index for host coming out of parseHostPortPair(). - */ - public final static int HOST_NAME_INDEX = 0; - + public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; + private TSDBDatabaseMetaData dbMetaData = null; static { @@ -124,74 +101,23 @@ public class TSDBDriver implements java.sql.Driver { } } - private List loadConfigEndpoints(File cfgFile) { - List endpoints = new ArrayList<>(); - try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) { - String line = null; - while ((line = reader.readLine()) != null) { - if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) { - endpoints.add(line.substring(line.indexOf('p') + 1).trim()); - } - if (endpoints.size() > 1) - break; - } - } catch (FileNotFoundException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } - return endpoints; - } - - /** - * @param cfgDirPath - * @return return the config dir - **/ - private File loadConfigDir(String cfgDirPath) { - if (cfgDirPath == null) - return loadDefaultConfigDir(); - File cfgDir = new File(cfgDirPath); - if (!cfgDir.exists()) - return loadDefaultConfigDir(); - return cfgDir; - } - - /** - * @return search the default config dir, if the config dir is not exist will return null - */ - private File loadDefaultConfigDir() { - File cfgDir; - File cfgDir_linux = new File("/etc/taos"); - cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null; - File cfgDir_windows = new File("C:\\TDengine\\cfg"); - cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir; - return cfgDir; - } - public Connection connect(String url, Properties info) throws SQLException { - if (url == null) { + if (url == null) throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!")); - } + + if (!acceptsURL(url)) + return null; Properties props = null; if ((props = parseURL(url, info)) == null) { return null; } - //load taos.cfg start - if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null) { - File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR)); - File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0]; - List endpoints = loadConfigEndpoints(cfgFile); - if (!endpoints.isEmpty()) { - info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]); - info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]); - } - } + loadTaosConfig(info); try { - TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET), - (String) props.get(PROPERTY_KEY_TIME_ZONE)); + TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), + (String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE)); Connection newConn = new TSDBConnection(props, this.dbMetaData); return newConn; } catch (SQLWarning sqlWarning) { @@ -208,43 +134,15 @@ public class TSDBDriver implements java.sql.Driver { } /** - * Parses hostPortPair in the form of [host][:port] into an array, with the - * element of index HOST_NAME_INDEX being the host (or null if not specified), - * and the element of index PORT_NUMBER_INDEX being the port (or null if not - * specified). - * - * @param hostPortPair host and port in form of of [host][:port] - * @return array containing host and port as Strings - * @throws SQLException if a parse error occurs + * @param url the URL of the database + * @return true if this driver understands the given URL; + * false otherwise + * @throws SQLException if a database access error occurs or the url is {@code null} */ - protected static String[] parseHostPortPair(String hostPortPair) throws SQLException { - String[] splitValues = new String[2]; - - int portIndex = hostPortPair.indexOf(":"); - - String hostname = null; - - if (portIndex != -1) { - if ((portIndex + 1) < hostPortPair.length()) { - String portAsString = hostPortPair.substring(portIndex + 1); - hostname = hostPortPair.substring(0, portIndex); - - splitValues[HOST_NAME_INDEX] = hostname; - - splitValues[PORT_NUMBER_INDEX] = portAsString; - } else { - throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!")); - } - } else { - splitValues[HOST_NAME_INDEX] = hostPortPair; - splitValues[PORT_NUMBER_INDEX] = null; - } - - return splitValues; - } - public boolean acceptsURL(String url) throws SQLException { - return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX); + if (url == null) + throw new SQLException(TSDBConstants.WrapErrMsg("url is null")); + return (url != null && url.length() > 0 && url.trim().length() > 0) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); } public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { @@ -252,133 +150,80 @@ public class TSDBDriver implements java.sql.Driver { info = new Properties(); } - if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) { + if (acceptsURL(url)) { info = parseURL(url, info); } - DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST)); - hostProp.required = true; - - DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT)); - portProp.required = false; - - DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME)); - dbProp.required = false; - dbProp.description = "Database name"; - - DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER)); - userProp.required = true; - - DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD)); - passwordProp.required = true; - - DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5]; - propertyInfo[0] = hostProp; - propertyInfo[1] = portProp; - propertyInfo[2] = dbProp; - propertyInfo[3] = userProp; - propertyInfo[4] = passwordProp; - - return propertyInfo; + return getPropertyInfo(info); } /** - * example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password + * example: jdbc:TAOS://127.0.0.1:0/db?user=root&password=your_password */ - public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException { + @Override + public Properties parseURL(String url, Properties defaults) { Properties urlProps = (defaults != null) ? defaults : new Properties(); - if (url == null) { + if (url == null || url.length() <= 0 || url.trim().length() <= 0) return null; - } - - if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) { + if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) return null; - } + // parse properties String urlForMeta = url; - - String dbProductName = url.substring(url.indexOf(":") + 1); - dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); int beginningOfSlashes = url.indexOf("//"); - url = url.substring(beginningOfSlashes + 2); - - String host = url.substring(0, url.indexOf(":")); - url = url.substring(url.indexOf(":") + 1); - urlProps.setProperty(PROPERTY_KEY_HOST, host); - - String port = url.substring(0, url.indexOf("/")); - urlProps.setProperty(PROPERTY_KEY_PORT, port); - url = url.substring(url.indexOf("/") + 1); - - if (url.indexOf("?") != -1) { - String dbName = url.substring(0, url.indexOf("?")); - urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName); - url = url.trim().substring(url.indexOf("?") + 1); - } else { - // without user & password so return - if (!url.trim().isEmpty()) { - String dbName = url.trim(); - urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName); + int index = url.indexOf("?"); + if (index != -1) { + String paramString = url.substring(index + 1, url.length()); + url = url.substring(0, index); + StringTokenizer queryParams = new StringTokenizer(paramString, "&"); + while (queryParams.hasMoreElements()) { + String oneToken = queryParams.nextToken(); + String[] pair = oneToken.split("="); + + if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) { + urlProps.setProperty(pair[0].trim(), pair[1].trim()); + } } - this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user")); - return urlProps; } - - String user = ""; - - if (url.indexOf("&") == -1) { - String[] kvPair = url.trim().split("="); - if (kvPair.length == 2) { - setPropertyValue(urlProps, kvPair); - return urlProps; + + // parse Product Name + String dbProductName = url.substring(0, beginningOfSlashes); + dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1); + dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); + + // parse database name + url = url.substring(beginningOfSlashes + 2); + int indexOfSlash = url.indexOf("/"); + if (indexOfSlash != -1) { + if (indexOfSlash + 1 < url.length()) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1)); } + url = url.substring(0, indexOfSlash); } - - String[] queryStrings = url.trim().split("&"); - for (String queryStr : queryStrings) { - String[] kvPair = queryStr.trim().split("="); - if (kvPair.length < 2) { - continue; + + // parse port + int indexOfColon = url.indexOf(":"); + if (indexOfColon != -1) { + if (indexOfColon + 1 < url.length()) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1)); } - setPropertyValue(urlProps, kvPair); + url = url.substring(0, indexOfColon); } - - user = urlProps.getProperty(PROPERTY_KEY_USER).toString(); - this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user); - - return urlProps; - } - - public void setPropertyValue(Properties property, String[] keyValuePair) { - switch (keyValuePair[0].toLowerCase()) { - case PROPERTY_KEY_USER: - property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]); - break; - case PROPERTY_KEY_PASSWORD: - property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]); - break; - case PROPERTY_KEY_TIME_ZONE: - property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]); - break; - case PROPERTY_KEY_LOCALE: - property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]); - break; - case PROPERTY_KEY_CHARSET: - property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]); - break; - case PROPERTY_KEY_CONFIG_DIR: - property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]); - break; + + if (url != null && url.length() > 0 && url.trim().length() > 0) { + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); } + + this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER)); + return urlProps; } - public int getMajorVersion() { - return 1; + return 2; } public int getMinorVersion() { - return 1; + return 0; } public boolean jdbcCompliant() { @@ -389,33 +234,4 @@ public class TSDBDriver implements java.sql.Driver { return null; } - /** - * Returns the host property - * - * @param props the java.util.Properties instance to retrieve the hostname from. - * @return the host - */ - public String host(Properties props) { - return props.getProperty(PROPERTY_KEY_HOST, "localhost"); - } - - /** - * Returns the port number property - * - * @param props the properties to get the port number from - * @return the port number - */ - public int port(Properties props) { - return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT)); - } - - /** - * Returns the database property from props - * - * @param props the Properties to look for the database property. - * @return the database name. - */ - public String database(Properties props) { - return props.getProperty(PROPERTY_KEY_DBNAME); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 0cd185de50d858aec78b60acc4055c1ae99a4cb5..f918463439fba293171827001acf6930ab271b81 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -111,8 +111,8 @@ public class TSDBJNIConnector { * @throws SQLException */ public long executeQuery(String sql) throws SQLException { - // close previous result set if the user forgets to invoke the - // free method to close previous result set. + // close previous result set if the user forgets to invoke the + // free method to close previous result set. if (!this.isResultsetClosed) { freeResultSet(taosResultSetPointer); } @@ -122,23 +122,23 @@ public class TSDBJNIConnector { pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos); } catch (Exception e) { e.printStackTrace(); - this.freeResultSet(pSql); + this.freeResultSetImp(this.taos, pSql); throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding")); } - + int code = this.getErrCode(pSql); if (code != 0) { affectedRows = -1; String msg = this.getErrMsg(pSql); - - this.freeResultSet(pSql); + + this.freeResultSetImp(this.taos, pSql); throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code); } // Try retrieving result set for the executed SQL using the current connection pointer. taosResultSetPointer = this.getResultSetImp(this.taos, pSql); isResultsetClosed = (taosResultSetPointer == TSDBConstants.JNI_NULL_POINTER); - + return pSql; } @@ -171,11 +171,11 @@ public class TSDBJNIConnector { } private native long getResultSetImp(long connection, long pSql); - + public boolean isUpdateQuery(long pSql) { - return isUpdateQueryImp(this.taos, pSql) == 1? true:false; + return isUpdateQueryImp(this.taos, pSql) == 1 ? true : false; } - + private native long isUpdateQueryImp(long connection, long pSql); /** @@ -191,7 +191,7 @@ public class TSDBJNIConnector { res = this.freeResultSetImp(this.taos, result); taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; } - + isResultsetClosed = true; return res; } @@ -243,6 +243,11 @@ public class TSDBJNIConnector { private native int fetchRowImp(long connection, long resultSet, TSDBResultSetRowData rowData); + public int fetchBlock(long resultSet, TSDBResultSetBlockData blockData) { + return this.fetchBlockImp(this.taos, resultSet, blockData); + } + + private native int fetchBlockImp(long connection, long resultSet, TSDBResultSetBlockData blockData); /** * Execute close operation from C to release connection pointer by JNI * @@ -274,7 +279,7 @@ public class TSDBJNIConnector { * Consume a subscription */ long consume(long subscription) { - return this.consumeImp(subscription); + return this.consumeImp(subscription); } private native long consumeImp(long subscription); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index 961633b8aec4ec433a56522e504fb35495ccfa78..84a3f58f4692a99737af7d93c5578fc7a5a09c27 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -47,10 +47,14 @@ public class TSDBResultSet implements ResultSet { private List columnMetaDataList = new ArrayList(); private TSDBResultSetRowData rowData; + private TSDBResultSetBlockData blockData; + private boolean batchFetch = false; private boolean lastWasNull = false; private final int COLUMN_INDEX_START_VALUE = 1; + private int rowIndex = 0; + public TSDBJNIConnector getJniConnector() { return jniConnector; } @@ -67,6 +71,14 @@ public class TSDBResultSet implements ResultSet { this.resultSetPointer = resultSetPointer; } + public void setBatchFetch(boolean batchFetch) { + this.batchFetch = batchFetch; + } + + public Boolean getBatchFetch() { + return this.batchFetch; + } + public List getColumnMetaDataList() { return columnMetaDataList; } @@ -94,8 +106,8 @@ public class TSDBResultSet implements ResultSet { public TSDBResultSet() { } - public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException { - this.jniConnector = connecter; + public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + this.jniConnector = connector; this.resultSetPointer = resultSetPointer; int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList); if (code == TSDBConstants.JNI_CONNECTION_NULL) { @@ -107,6 +119,7 @@ public class TSDBResultSet implements ResultSet { } this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size()); + this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size()); } public T unwrap(Class iface) throws SQLException { @@ -118,21 +131,42 @@ public class TSDBResultSet implements ResultSet { } public boolean next() throws SQLException { - if (rowData != null) { - this.rowData.clear(); - } + if (this.getBatchFetch()) { + if (this.blockData.forward()) { + return true; + } + + int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData); + this.blockData.reset(); + + if (code == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); + } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); + } else if (code == TSDBConstants.JNI_FETCH_END) { + return false; + } - int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData); - if (code == TSDBConstants.JNI_CONNECTION_NULL) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); - } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); - } else if (code == TSDBConstants.JNI_FETCH_END) { - return false; - } else { return true; + } else { + if (rowData != null) { + this.rowData.clear(); + } + + int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData); + if (code == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); + } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); + } else if (code == TSDBConstants.JNI_FETCH_END) { + return false; + } else { + return true; + } } } @@ -155,21 +189,30 @@ public class TSDBResultSet implements ResultSet { String res = null; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return this.blockData.getString(colIndex); } - return res; } public boolean getBoolean(int columnIndex) throws SQLException { boolean res = false; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + } else { + return this.blockData.getBoolean(colIndex); } + return res; } @@ -177,66 +220,91 @@ public class TSDBResultSet implements ResultSet { byte res = 0; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return (byte) this.blockData.getInt(colIndex); } - return res; } public short getShort(int columnIndex) throws SQLException { short res = 0; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return (short) this.blockData.getInt(colIndex); } - return res; } public int getInt(int columnIndex) throws SQLException { int res = 0; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return this.blockData.getInt(colIndex); } - return res; + } public long getLong(int columnIndex) throws SQLException { long res = 0l; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return this.blockData.getLong(colIndex); } - return res; } public float getFloat(int columnIndex) throws SQLException { float res = 0; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return (float) this.blockData.getDouble(colIndex); } - return res; } public double getDouble(int columnIndex) throws SQLException { double res = 0; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } else { + return this.blockData.getDouble(colIndex); } - return res; } /* @@ -249,25 +317,11 @@ public class TSDBResultSet implements ResultSet { */ @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - BigDecimal res = null; - int colIndex = getTrueColumnIndex(columnIndex); - - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); - } - return res; + return new BigDecimal(getLong(columnIndex)); } public byte[] getBytes(int columnIndex) throws SQLException { - byte[] res = null; - int colIndex = getTrueColumnIndex(columnIndex); - - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes(); - } - return res; + return getString(columnIndex).getBytes(); } public Date getDate(int columnIndex) throws SQLException { @@ -284,11 +338,15 @@ public class TSDBResultSet implements ResultSet { Timestamp res = null; int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - if (!lastWasNull) { - res = this.rowData.getTimestamp(colIndex); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getTimestamp(colIndex); + } + return res; + } else { + return this.blockData.getTimestamp(columnIndex); } - return res; } public InputStream getAsciiStream(int columnIndex) throws SQLException { @@ -400,8 +458,12 @@ public class TSDBResultSet implements ResultSet { public Object getObject(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - return this.rowData.get(colIndex); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + return this.rowData.get(colIndex); + } else { + return this.blockData.get(colIndex); + } } public Object getObject(String columnLabel) throws SQLException { @@ -433,8 +495,12 @@ public class TSDBResultSet implements ResultSet { public BigDecimal getBigDecimal(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); - this.lastWasNull = this.rowData.wasNull(colIndex); - return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); + if (!this.getBatchFetch()) { + this.lastWasNull = this.rowData.wasNull(colIndex); + return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); + } else { + return new BigDecimal(this.blockData.getLong(colIndex)); + } } public BigDecimal getBigDecimal(String columnLabel) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java new file mode 100644 index 0000000000000000000000000000000000000000..9352cf525350ff57525680f405d61c6b00c0cf55 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java @@ -0,0 +1,497 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.DoubleBuffer; +import java.nio.FloatBuffer; +import java.nio.IntBuffer; +import java.nio.LongBuffer; +import java.nio.ShortBuffer; +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class TSDBResultSetBlockData { + private int numOfRows = 0; + private int rowIndex = 0; + + private List columnMetaDataList; + private ArrayList colData = null; + + public TSDBResultSetBlockData(List colMeta, int numOfCols) { + this.columnMetaDataList = colMeta; + this.colData = new ArrayList(numOfCols); + } + + public TSDBResultSetBlockData() { + this.colData = new ArrayList(); + } + + public void clear() { + int size = this.colData.size(); + if (this.colData != null) { + this.colData.clear(); + } + + setNumOfCols(size); + } + + public int getNumOfRows() { + return this.numOfRows; + } + + public void setNumOfRows(int numOfRows) { + this.numOfRows = numOfRows; + } + + public int getNumOfCols() { + return this.colData.size(); + } + + public void setNumOfCols(int numOfCols) { + this.colData = new ArrayList(numOfCols); + this.colData.addAll(Collections.nCopies(numOfCols, null)); + } + + public boolean hasMore() { + return this.rowIndex < this.numOfRows; + } + + public boolean forward() { + if (this.rowIndex > this.numOfRows) { + return false; + } + + return ((++this.rowIndex) < this.numOfRows); + } + + public void reset() { + this.rowIndex = 0; + } + + public void setBoolean(int col, boolean value) { + colData.set(col, value); + } + + public void setByteArray(int col, int length, byte[] value) { + try { + switch (this.columnMetaDataList.get(col).getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + buf.order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(); + this.colData.set(col, buf); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + buf.order(ByteOrder.LITTLE_ENDIAN); + this.colData.set(col, buf); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + ShortBuffer sb = buf.order(ByteOrder.LITTLE_ENDIAN).asShortBuffer(); + this.colData.set(col, sb); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_INT: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + IntBuffer ib = buf.order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); + this.colData.set(col, ib); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer(); + this.colData.set(col, lb); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + FloatBuffer fb = buf.order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer(); + this.colData.set(col, fb); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + DoubleBuffer db = buf.order(ByteOrder.LITTLE_ENDIAN).asDoubleBuffer(); + this.colData.set(col, db); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + buf.order(ByteOrder.LITTLE_ENDIAN); + this.colData.set(col, buf); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer(); + this.colData.set(col, lb); + break; + } + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: { + ByteBuffer buf = ByteBuffer.wrap(value, 0, length); + buf.order(ByteOrder.LITTLE_ENDIAN); + this.colData.set(col, buf); + break; + } + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + private static class NullType { + private static final byte NULL_BOOL_VAL = 0x2; + private static final String NULL_STR = "null"; + + public String toString() { + return NullType.NULL_STR; + } + + public static boolean isBooleanNull(byte val) { + return val == NullType.NULL_BOOL_VAL; + } + + private static boolean isTinyIntNull(byte val) { + return val == Byte.MIN_VALUE; + } + + private static boolean isSmallIntNull(short val) { + return val == Short.MIN_VALUE; + } + + private static boolean isIntNull(int val) { + return val == Integer.MIN_VALUE; + } + + private static boolean isBigIntNull(long val) { + return val == Long.MIN_VALUE; + } + + private static boolean isFloatNull(float val) { + return Float.isNaN(val); + } + + private static boolean isDoubleNull(double val) { + return Double.isNaN(val); + } + + private static boolean isBinaryNull(byte[] val, int length) { + if (length != Byte.BYTES) { + return false; + } + + return val[0] == 0xFF; + } + + private static boolean isNcharNull(byte[] val, int length) { + if (length != Integer.BYTES) { + return false; + } + + return (val[0] & val[1] & val[2] & val[3]) == 0xFF; + } + + } + + /** + * The original type may not be a string type, but will be converted to by + * calling this method + * + * @param col column index + * @return + * @throws SQLException + */ + public String getString(int col) throws SQLException { + Object obj = get(col); + if (obj == null) { + return new NullType().toString(); + } + + return obj.toString(); + } + + public int getInt(int col) { + Object obj = get(col); + if (obj == null) { + return 0; + } + + int type = this.columnMetaDataList.get(col).getColType(); + switch (type) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: { + return (int) obj; + } + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + return ((Long) obj).intValue(); + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + return ((Double) obj).intValue(); + } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + return Integer.parseInt((String) obj); + } + } + + return 0; + } + + public boolean getBoolean(int col) throws SQLException { + Object obj = get(col); + if (obj == null) { + return Boolean.FALSE; + } + + int type = this.columnMetaDataList.get(col).getColType(); + switch (type) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: { + return ((int) obj == 0L) ? Boolean.FALSE : Boolean.TRUE; + } + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + return (((Long) obj) == 0L) ? Boolean.FALSE : Boolean.TRUE; + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + return (((Double) obj) == 0) ? Boolean.FALSE : Boolean.TRUE; + } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + if ("TRUE".compareToIgnoreCase((String) obj) == 0) { + return Boolean.TRUE; + } else if ("FALSE".compareToIgnoreCase((String) obj) == 0) { + return Boolean.TRUE; + } else { + throw new SQLDataException(); + } + } + } + + return Boolean.FALSE; + } + + public long getLong(int col) throws SQLException { + Object obj = get(col); + if (obj == null) { + return 0; + } + + int type = this.columnMetaDataList.get(col).getColType(); + switch (type) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: { + return (int) obj; + } + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + return (long) obj; + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + return ((Double) obj).longValue(); + } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + return Long.parseLong((String) obj); + } + } + + return 0; + } + + public Timestamp getTimestamp(int col) { + try { + return new Timestamp(getLong(col)); + } catch (SQLException e) { + e.printStackTrace(); + } + + return null; + } + + public double getDouble(int col) { + Object obj = get(col); + if (obj == null) { + return 0; + } + + int type = this.columnMetaDataList.get(col).getColType(); + switch (type) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: { + return (int) obj; + } + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + return (long) obj; + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + return (double) obj; + } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + return Double.parseDouble((String) obj); + } + } + + return 0; + } + + public Object get(int col) { + int fieldSize = this.columnMetaDataList.get(col).getColSize(); + + switch (this.columnMetaDataList.get(col).getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: { + ByteBuffer bb = (ByteBuffer) this.colData.get(col); + + byte val = bb.get(this.rowIndex); + if (NullType.isBooleanNull(val)) { + return null; + } + + return (val == 0x0) ? Boolean.FALSE : Boolean.TRUE; + } + + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: { + ByteBuffer bb = (ByteBuffer) this.colData.get(col); + + byte val = bb.get(this.rowIndex); + if (NullType.isTinyIntNull(val)) { + return null; + } + + return val; + } + + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: { + ShortBuffer sb = (ShortBuffer) this.colData.get(col); + short val = sb.get(this.rowIndex); + if (NullType.isSmallIntNull(val)) { + return null; + } + + return val; + } + + case TSDBConstants.TSDB_DATA_TYPE_INT: { + IntBuffer ib = (IntBuffer) this.colData.get(col); + int val = ib.get(this.rowIndex); + if (NullType.isIntNull(val)) { + return null; + } + + return val; + } + + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: { + LongBuffer lb = (LongBuffer) this.colData.get(col); + long val = lb.get(this.rowIndex); + if (NullType.isBigIntNull(val)) { + return null; + } + + return (long) val; + } + + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: { + FloatBuffer fb = (FloatBuffer) this.colData.get(col); + float val = fb.get(this.rowIndex); + if (NullType.isFloatNull(val)) { + return null; + } + + return val; + } + + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: { + DoubleBuffer lb = (DoubleBuffer) this.colData.get(col); + double val = lb.get(this.rowIndex); + if (NullType.isDoubleNull(val)) { + return null; + } + + return val; + } + + case TSDBConstants.TSDB_DATA_TYPE_BINARY: { + ByteBuffer bb = (ByteBuffer) this.colData.get(col); + bb.position(fieldSize * this.rowIndex); + + int length = bb.getShort(); + + byte[] dest = new byte[length]; + bb.get(dest, 0, length); + if (NullType.isBinaryNull(dest, length)) { + return null; + } + + return new String(dest); + } + + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: { + ByteBuffer bb = (ByteBuffer) this.colData.get(col); + bb.position(fieldSize * this.rowIndex); + + int length = bb.getShort(); + + byte[] dest = new byte[length]; + bb.get(dest, 0, length); + if (NullType.isNcharNull(dest, length)) { + return null; + } + + try { + String ss = TaosGlobalConfig.getCharset(); + return new String(dest, ss); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + } + } + + return 0; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 8efcac90001b63af53c2943c0137f3c89bc994c5..c57f19550dd14719baecb835d76263df1e6a669b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -218,5 +218,4 @@ public class TSDBResultSetRowData { public void setData(ArrayList data) { this.data = (ArrayList) data.clone(); } - } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 5c6b0545e91a13b793478efa1ec687e616d1a9ef..cdd88b825e2a8f6ba81c131bfd1214ffbe46f32c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -19,7 +19,7 @@ import java.util.ArrayList; import java.util.List; public class TSDBStatement implements Statement { - private TSDBJNIConnector connecter = null; + private TSDBJNIConnector connector = null; /** * To store batched commands @@ -45,9 +45,9 @@ public class TSDBStatement implements Statement { this.connection = connection; } - TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) { + TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) { this.connection = connection; - this.connecter = connecter; + this.connector = connector; this.isClosed = false; } @@ -65,25 +65,27 @@ public class TSDBStatement implements Statement { } // TODO make sure it is not a update query - pSql = this.connecter.executeQuery(sql); + pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + long resultSetPointer = this.connector.getResultSet(); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } // create/insert/update/delete/alter if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); return null; } - if (!this.connecter.isUpdateQuery(pSql)) { - return new TSDBResultSet(this.connecter, resultSetPointer); + if (!this.connector.isUpdateQuery(pSql)) { + TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); + res.setBatchFetch(this.connection.getBatchFetch()); + return res; } else { - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); return null; } @@ -95,28 +97,28 @@ public class TSDBStatement implements Statement { } // TODO check if current query is update query - pSql = this.connecter.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + pSql = this.connector.executeQuery(sql); + long resultSetPointer = this.connector.getResultSet(); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - this.affectedRows = this.connecter.getAffectedRows(pSql); - this.connecter.freeResultSet(pSql); + this.affectedRows = this.connector.getAffectedRows(pSql); + this.connector.freeResultSet(pSql); return this.affectedRows; } public String getErrorMsg(long pSql) { - return this.connecter.getErrMsg(pSql); + return this.connector.getErrMsg(pSql); } public void close() throws SQLException { if (!isClosed) { - if (!this.connecter.isResultsetClosed()) { - this.connecter.freeResultSet(); + if (!this.connector.isResultsetClosed()) { + this.connector.freeResultSet(); } isClosed = true; } @@ -136,7 +138,7 @@ public class TSDBStatement implements Statement { } public void setMaxRows(int max) throws SQLException { - // always set maxRows to zero, meaning unlimitted rows in a resultSet + // always set maxRows to zero, meaning unlimited rows in a resultSet } public void setEscapeProcessing(boolean enable) throws SQLException { @@ -172,15 +174,15 @@ public class TSDBStatement implements Statement { throw new SQLException("Invalid method call on a closed statement."); } boolean res = true; - pSql = this.connecter.executeQuery(sql); - long resultSetPointer = this.connecter.getResultSet(); + pSql = this.connector.executeQuery(sql); + long resultSetPointer = this.connector.getResultSet(); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { // no result set is retrieved - this.connecter.freeResultSet(pSql); + this.connector.freeResultSet(pSql); res = false; } @@ -191,10 +193,10 @@ public class TSDBStatement implements Statement { if (isClosed) { throw new SQLException("Invalid method call on a closed statement."); } - long resultSetPointer = connecter.getResultSet(); + long resultSetPointer = connector.getResultSet(); TSDBResultSet resSet = null; if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - resSet = new TSDBResultSet(connecter, resultSetPointer); + resSet = new TSDBResultSet(connector, resultSetPointer); } return resSet; } @@ -267,7 +269,7 @@ public class TSDBStatement implements Statement { } public Connection getConnection() throws SQLException { - if (this.connecter != null) + if (this.connector != null) return this.connection; throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java new file mode 100644 index 0000000000000000000000000000000000000000..b82efca3ef4defb166632e4dd347de528e52d2c6 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -0,0 +1,319 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBConstants; + +import java.sql.*; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +public class RestfulConnection implements Connection { + + private final String host; + private final int port; + private final Properties props; + private final String database; + private final String url; + + + public RestfulConnection(String host, String port, Properties props, String database, String url) { + this.host = host; + this.port = Integer.parseInt(port); + this.props = props; + this.database = database; + this.url = url; + } + + @Override + public Statement createStatement() throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed.")); + return new RestfulStatement(this, this.database); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + return null; + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return null; + } + + @Override + public String nativeSQL(String sql) throws SQLException { + return null; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + + } + + @Override + public boolean getAutoCommit() throws SQLException { + return false; + } + + @Override + public void commit() throws SQLException { + + } + + @Override + public void rollback() throws SQLException { + + } + + @Override + public void close() throws SQLException { + + } + + @Override + public boolean isClosed() throws SQLException { + return false; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + //TODO: RestfulDatabaseMetaData is not implemented + return new RestfulDatabaseMetaData(); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + + } + + @Override + public boolean isReadOnly() throws SQLException { + return false; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + + } + + @Override + public String getCatalog() throws SQLException { + return null; + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + + } + + @Override + public int getTransactionIsolation() throws SQLException { + return 0; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public void clearWarnings() throws SQLException { + + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + return null; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + return null; + } + + @Override + public Map> getTypeMap() throws SQLException { + return null; + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + + } + + @Override + public void setHoldability(int holdability) throws SQLException { + + } + + @Override + public int getHoldability() throws SQLException { + return 0; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + return null; + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + return null; + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + return null; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return null; + } + + @Override + public Clob createClob() throws SQLException { + return null; + } + + @Override + public Blob createBlob() throws SQLException { + return null; + } + + @Override + public NClob createNClob() throws SQLException { + return null; + } + + @Override + public SQLXML createSQLXML() throws SQLException { + return null; + } + + @Override + public boolean isValid(int timeout) throws SQLException { + return false; + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + + } + + @Override + public String getClientInfo(String name) throws SQLException { + return null; + } + + @Override + public Properties getClientInfo() throws SQLException { + return null; + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + return null; + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return null; + } + + @Override + public void setSchema(String schema) throws SQLException { + + } + + @Override + public String getSchema() throws SQLException { + return null; + } + + @Override + public void abort(Executor executor) throws SQLException { + + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + + } + + @Override + public int getNetworkTimeout() throws SQLException { + return 0; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + public Properties getProps() { + return props; + } + + public String getDatabase() { + return database; + } + + public String getUrl() { + return url; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java new file mode 100644 index 0000000000000000000000000000000000000000..2b4d7899fa6dc6fbd45e01aacd14e99b3a6fb536 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java @@ -0,0 +1,886 @@ +package com.taosdata.jdbc.rs; + +import java.sql.*; + +public class RestfulDatabaseMetaData implements DatabaseMetaData { + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return false; + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return false; + } + + @Override + public String getURL() throws SQLException { + return null; + } + + @Override + public String getUserName() throws SQLException { + return null; + } + + @Override + public boolean isReadOnly() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + return false; + } + + @Override + public String getDatabaseProductName() throws SQLException { + return null; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return null; + } + + @Override + public String getDriverName() throws SQLException { + return null; + } + + @Override + public String getDriverVersion() throws SQLException { + return null; + } + + @Override + public int getDriverMajorVersion() { + return 0; + } + + @Override + public int getDriverMinorVersion() { + return 0; + } + + @Override + public boolean usesLocalFiles() throws SQLException { + return false; + } + + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } + + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public String getIdentifierQuoteString() throws SQLException { + return null; + } + + @Override + public String getSQLKeywords() throws SQLException { + return null; + } + + @Override + public String getNumericFunctions() throws SQLException { + return null; + } + + @Override + public String getStringFunctions() throws SQLException { + return null; + } + + @Override + public String getSystemFunctions() throws SQLException { + return null; + } + + @Override + public String getTimeDateFunctions() throws SQLException { + return null; + } + + @Override + public String getSearchStringEscape() throws SQLException { + return null; + } + + @Override + public String getExtraNameCharacters() throws SQLException { + return null; + } + + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return false; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return false; + } + + @Override + public boolean supportsConvert() throws SQLException { + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return false; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return false; + } + + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return false; + } + + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return false; + } + + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return false; + } + + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + @Override + public boolean supportsOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return false; + } + + @Override + public String getSchemaTerm() throws SQLException { + return null; + } + + @Override + public String getProcedureTerm() throws SQLException { + return null; + } + + @Override + public String getCatalogTerm() throws SQLException { + return null; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return false; + } + + @Override + public String getCatalogSeparator() throws SQLException { + return null; + } + + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + @Override + public boolean supportsUnion() throws SQLException { + return false; + } + + @Override + public boolean supportsUnionAll() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + @Override + public int getMaxConnections() throws SQLException { + return 0; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxRowSize() throws SQLException { + return 0; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + return 0; + } + + @Override + public boolean supportsTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return false; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + return null; + } + + @Override + public ResultSet getSchemas() throws SQLException { + return null; + } + + @Override + public ResultSet getCatalogs() throws SQLException { + return null; + } + + @Override + public ResultSet getTableTypes() throws SQLException { + return null; + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { + return null; + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + return null; + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + return null; + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + return null; + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + return null; + } + + @Override + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { + return null; + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + return null; + } + + @Override + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { + return null; + } + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return false; + } + + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { + return null; + } + + @Override + public Connection getConnection() throws SQLException { + return null; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return false; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { + return null; + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return false; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return 0; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return 0; + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return 0; + } + + @Override + public int getJDBCMajorVersion() throws SQLException { + return 0; + } + + @Override + public int getJDBCMinorVersion() throws SQLException { + return 0; + } + + @Override + public int getSQLStateType() throws SQLException { + return 0; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + return false; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + return null; + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + return null; + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + return null; + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { + return null; + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { + return null; + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java new file mode 100644 index 0000000000000000000000000000000000000000..c267f660debdc6b195f6f2cd64e72b37ae0677ea --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -0,0 +1,91 @@ +package com.taosdata.jdbc.rs; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractTaosDriver; +import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; + +import java.sql.*; +import java.util.Properties; +import java.util.logging.Logger; + +public class RestfulDriver extends AbstractTaosDriver { + + private static final String URL_PREFIX = "jdbc:TAOS-RS://"; + + static { + try { + DriverManager.registerDriver(new RestfulDriver()); + } catch (SQLException e) { + throw new RuntimeException(TSDBConstants.WrapErrMsg("can not register Restful JDBC driver"), e); + } + } + + @Override + public Connection connect(String url, Properties info) throws SQLException { + // throw SQLException if url is null + if (url == null) + throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!")); + // return null if url is not be accepted + if (!acceptsURL(url)) + return null; + + Properties props = parseURL(url, info); + String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost"); + String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); + String database = props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME); + + String loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; + String result = HttpClientPoolUtil.execute(loginUrl); + JSONObject jsonResult = JSON.parseObject(result); + String status = jsonResult.getString("status"); + if (!status.equals("succ")) { + throw new SQLException(jsonResult.getString("desc")); + } + + return new RestfulConnection(host, port, props, database, url); + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + if (url == null) + throw new SQLException(TSDBConstants.WrapErrMsg("url is null")); + return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + if (info == null) { + info = new Properties(); + } + if (acceptsURL(url)) { + info = parseURL(url, info); + } + return getPropertyInfo(info); + } + + @Override + public int getMajorVersion() { + return 2; + } + + @Override + public int getMinorVersion() { + return 0; + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + return null; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java new file mode 100644 index 0000000000000000000000000000000000000000..c536ae4a8957519cdcb8d64f95fbdf2934624fff --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -0,0 +1,1180 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBConstants; +import org.apache.commons.lang3.StringUtils; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Map; + +public class RestfulResultSet implements ResultSet { + private boolean isClosed = false; + private int pos = -1; + private ArrayList> data; + private ArrayList fields; + + public RestfulResultSet(String str, String fieldData) { + data = new ArrayList<>(); + str = str.substring(2, str.length() - 2); + ArrayList strTemp = new ArrayList<>(Arrays.asList(str.split("],\\["))); + for (String s : strTemp) { + ArrayList curr = new ArrayList<>(Arrays.asList(s.split(","))); + data.add(curr); + } + if (!StringUtils.isBlank(fieldData)) { + fields = new ArrayList<>(); + fieldData = fieldData.substring(2, fieldData.length() - 2); + ArrayList fieldTemp = new ArrayList<>(Arrays.asList(fieldData.split("],\\["))); + for (String s : fieldTemp) { + String curr = Arrays.asList(s.split(",")).get(0); + fields.add(curr.substring(1, curr.length() - 1)); // 去掉双引号 + } + } + } + + @Override + public boolean next() throws SQLException { + if (isClosed) throw new SQLException(TSDBConstants.WrapErrMsg("Result is Closed!!!")); + if (pos < data.size() - 1) { + pos++; + return true; + } + return false; + } + + @Override + public void close() throws SQLException { + this.isClosed = true; + } + + @Override + public boolean wasNull() throws SQLException { + return data.isEmpty(); + } + + @Override + public String getString(int columnIndex) throws SQLException { + if (columnIndex > data.get(pos).size()) { + throw new SQLException(TSDBConstants.WrapErrMsg("Column Index out of range, " + columnIndex + " > " + data.get(pos).size())); + } + return data.get(pos).get(columnIndex - 1); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + String result = getString(columnIndex); + if (!(result.equals("true") || result.equals("false"))) { + throw new SQLException("not boolean value"); + } + return result.equals("true"); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + return Short.parseShort(getString(columnIndex)); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + String result = getString(columnIndex); + return Integer.parseInt(result); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + String result = getString(columnIndex); + return Long.parseLong(result); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + String result = getString(columnIndex); + return Float.parseFloat(result); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + String result = getString(columnIndex); + return Double.parseDouble(result); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + String strDate = getString(columnIndex); + strDate = strDate.substring(1, strDate.length() - 1); + return Timestamp.valueOf(strDate); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel) + 1); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return Boolean.parseBoolean(getString(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return 0; + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return Short.parseShort(getString(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return Integer.parseInt(getString(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return Long.parseLong(getString(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + String result = getString(columnLabel); + return Float.parseFloat(result); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return Double.parseDouble(getString(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return null; + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return new byte[0]; + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return null; + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return Timestamp.valueOf(getString(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + //TODO: SQLFeature Not Supported +// throw new SQLFeatureNotSupportedException(); + } + + @Override + public void clearWarnings() throws SQLException { + return; + //TODO: SQLFeature Not Supported +// throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getCursorName() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return new RestfulResultSetMetaData(fields); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { +// return null; + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { +// return null; + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + return fields.indexOf(columnLabel); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isAfterLast() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isFirst() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isLast() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void beforeFirst() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void afterLast() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean first() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean last() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean absolute(int row) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean relative(int rows) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean previous() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getFetchDirection() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getFetchSize() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getType() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getConcurrency() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowUpdated() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowInserted() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowDeleted() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void insertRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void deleteRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void refreshRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void cancelRowUpdates() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToInsertRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Statement getStatement() throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + return null; + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getHoldability() throws SQLException { +// return 0; + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isClosed() throws SQLException { + return false; + //TODO: SQLFeature Not Supported +// throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public T unwrap(Class iface) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + //TODO: SQLFeature Not Supported + throw new SQLFeatureNotSupportedException(); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java new file mode 100644 index 0000000000000000000000000000000000000000..5dd61391bcf0f973726c7954e330bb5054c4e91f --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java @@ -0,0 +1,129 @@ +package com.taosdata.jdbc.rs; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.List; + +public class RestfulResultSetMetaData implements ResultSetMetaData { + + private List fields; + + public RestfulResultSetMetaData(List fields) { + this.fields = fields; + } + + @Override + public int getColumnCount() throws SQLException { + return fields.size(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + return false; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + return false; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + return 0; + } + + @Override + public boolean isSigned(int column) throws SQLException { + return false; + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + return 0; + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return fields.get(column - 1); + } + + @Override + public String getColumnName(int column) throws SQLException { + return null; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return null; + } + + @Override + public int getPrecision(int column) throws SQLException { + return 0; + } + + @Override + public int getScale(int column) throws SQLException { + return 0; + } + + @Override + public String getTableName(int column) throws SQLException { + return null; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return null; + } + + @Override + public int getColumnType(int column) throws SQLException { + return 0; + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + return null; + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + return false; + } + + @Override + public boolean isWritable(int column) throws SQLException { + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + return null; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java new file mode 100644 index 0000000000000000000000000000000000000000..20510f01352c6b8a82ed300bd629d3184eb58894 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -0,0 +1,280 @@ +package com.taosdata.jdbc.rs; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; + +import java.sql.*; +import java.util.Arrays; +import java.util.List; + +public class RestfulStatement implements Statement { + + private final String catalog; + private final RestfulConnection conn; + + public RestfulStatement(RestfulConnection c, String catalog) { + this.conn = c; + this.catalog = catalog; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + + final String url = "http://" + conn.getHost() + ":"+conn.getPort()+"/rest/sql"; + + String result = HttpClientPoolUtil.execute(url, sql); + String fields = ""; + List words = Arrays.asList(sql.split(" ")); + if (words.get(0).equalsIgnoreCase("select")) { + int index = 0; + if (words.contains("from")) { + index = words.indexOf("from"); + } + if (words.contains("FROM")) { + index = words.indexOf("FROM"); + } + fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1)); + } + + JSONObject jsonObject = JSON.parseObject(result); + if (jsonObject.getString("status").equals("error")) { + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + + jsonObject.getString("desc") + "\n" + + "error code: " + jsonObject.getString("code"))); + } + String dataStr = jsonObject.getString("data"); + if ("use".equalsIgnoreCase(fields.split(" ")[0])) { + return new RestfulResultSet(dataStr, ""); + } + + JSONObject jsonField = JSON.parseObject(fields); + if (jsonField == null) { + return new RestfulResultSet(dataStr, ""); + } + if (jsonField.getString("status").equals("error")) { + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + + jsonField.getString("desc") + "\n" + + "error code: " + jsonField.getString("code"))); + } + String fieldData = jsonField.getString("data"); + + return new RestfulResultSet(dataStr, fieldData); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + return 0; + } + + @Override + public void close() throws SQLException { + + } + + @Override + public int getMaxFieldSize() throws SQLException { + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + + } + + @Override + public int getMaxRows() throws SQLException { + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + + } + + @Override + public int getQueryTimeout() throws SQLException { + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + + } + + @Override + public void cancel() throws SQLException { + + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public void clearWarnings() throws SQLException { + + } + + @Override + public void setCursorName(String name) throws SQLException { + + } + + @Override + public boolean execute(String sql) throws SQLException { + return false; + } + + @Override + public ResultSet getResultSet() throws SQLException { + return null; + } + + @Override + public int getUpdateCount() throws SQLException { + return 0; + } + + @Override + public boolean getMoreResults() throws SQLException { + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + + } + + @Override + public int getFetchDirection() throws SQLException { + return 0; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + + } + + @Override + public int getFetchSize() throws SQLException { + return 0; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return 0; + } + + @Override + public int getResultSetType() throws SQLException { + return 0; + } + + @Override + public void addBatch(String sql) throws SQLException { + + } + + @Override + public void clearBatch() throws SQLException { + + } + + @Override + public int[] executeBatch() throws SQLException { + return new int[0]; + } + + @Override + public Connection getConnection() throws SQLException { + return null; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return false; + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return null; + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return 0; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return 0; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return 0; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return false; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return false; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return false; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + return false; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + + } + + @Override + public boolean isPoolable() throws SQLException { + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return false; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java new file mode 100644 index 0000000000000000000000000000000000000000..65399b122d97254b88b6bc2ef08910d7badc5061 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java @@ -0,0 +1,222 @@ +package com.taosdata.jdbc.rs.util; + +import org.apache.commons.lang3.StringUtils; +import org.apache.http.HeaderElement; +import org.apache.http.HeaderElementIterator; +import org.apache.http.HttpEntity; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.*; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.conn.ConnectionKeepAliveStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.message.BasicHeaderElementIterator; +import org.apache.http.protocol.HTTP; +import org.apache.http.protocol.HttpContext; +import org.apache.http.util.EntityUtils; + + +public class HttpClientPoolUtil { + public static PoolingHttpClientConnectionManager cm = null; + public static CloseableHttpClient httpClient = null; + /** + * 默认content 类型 + */ + private static final String DEFAULT_CONTENT_TYPE = "application/json"; + /** + * 默认请求超时时间30s + */ + private static final int DEFAULT_TIME_OUT = 15000; + private static final int count = 32; + private static final int totalCount = 1000; + private static final int Http_Default_Keep_Time = 15000; + + /** + * 初始化连接池 + */ + public static synchronized void initPools() { + if (httpClient == null) { + cm = new PoolingHttpClientConnectionManager(); + cm.setDefaultMaxPerRoute(count); + cm.setMaxTotal(totalCount); + httpClient = HttpClients.custom().setKeepAliveStrategy(defaultStrategy).setConnectionManager(cm).build(); + } + } + + /** + * Http connection keepAlive 设置 + */ + public static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> { + HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); + int keepTime = Http_Default_Keep_Time * 1000; + while (it.hasNext()) { + HeaderElement headerElement = it.nextElement(); + String param = headerElement.getName(); + String value = headerElement.getValue(); + if (value != null && param.equalsIgnoreCase("timeout")) { + try { + return Long.parseLong(value) * 1000; + } catch (Exception e) { + new Exception( + "format KeepAlive timeout exception, exception:" + e.toString()) + .printStackTrace(); + } + } + } + return keepTime; + }; + + public static CloseableHttpClient getHttpClient() { + return httpClient; + } + + public static PoolingHttpClientConnectionManager getHttpConnectionManager() { + return cm; + } + + /** + * 执行http post请求 + * 默认采用Content-Type:application/json,Accept:application/json + * + * @param uri 请求地址 + * @param data 请求数据 + * @return responseBody + */ + public static String execute(String uri, String data) { + long startTime = System.currentTimeMillis(); + HttpEntity httpEntity = null; + HttpEntityEnclosingRequestBase method = null; + String responseBody = ""; + try { + if (httpClient == null) { + initPools(); + } + method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); + method.setEntity(new StringEntity(data)); + HttpContext context = HttpClientContext.create(); + CloseableHttpResponse httpResponse = httpClient.execute(method, context); + httpEntity = httpResponse.getEntity(); + if (httpEntity != null) { + responseBody = EntityUtils.toString(httpEntity, "UTF-8"); + } + } catch (Exception e) { + if (method != null) { + method.abort(); + } +// e.printStackTrace(); +// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString() +// + ", cost time(ms):" + (System.currentTimeMillis() - startTime)); + new Exception("execute post request exception, url:" + + uri + ", exception:" + e.toString() + + ", cost time(ms):" + (System.currentTimeMillis() - startTime)) + .printStackTrace(); + } finally { + if (httpEntity != null) { + try { + EntityUtils.consumeQuietly(httpEntity); + } catch (Exception e) { +// e.printStackTrace(); +// logger.error("close response exception, url:" + uri + ", exception:" + e.toString() +// + ", cost time(ms):" + (System.currentTimeMillis() - startTime)); + new Exception( + "close response exception, url:" + uri + + ", exception:" + e.toString() + + ", cost time(ms):" + (System.currentTimeMillis() - startTime)) + .printStackTrace(); + } + } + } + return responseBody; + } + + /** + * * 创建请求 + * + * @param uri 请求url + * @param methodName 请求的方法类型 + * @param contentType contentType类型 + * @param timeout 超时时间 + * @return HttpRequestBase 返回类型 + * @author lisc + */ + public static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) { + if (httpClient == null) { + initPools(); + } + HttpRequestBase method; + if (timeout <= 0) { + timeout = DEFAULT_TIME_OUT; + } + RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000) + .setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000) + .setExpectContinueEnabled(false).build(); + if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) { + method = new HttpPut(uri); + } else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) { + method = new HttpPost(uri); + } else if (HttpGet.METHOD_NAME.equalsIgnoreCase(methodName)) { + method = new HttpGet(uri); + } else { + method = new HttpPost(uri); + } + + if (StringUtils.isBlank(contentType)) { + contentType = DEFAULT_CONTENT_TYPE; + } + method.addHeader("Content-Type", contentType); + method.addHeader("Accept", contentType); + method.setConfig(requestConfig); + return method; + } + + /** + * 执行GET 请求 + * + * @param uri 网址 + * @return responseBody + */ + public static String execute(String uri) { + long startTime = System.currentTimeMillis(); + HttpEntity httpEntity = null; + HttpRequestBase method = null; + String responseBody = ""; + try { + if (httpClient == null) { + initPools(); + } + method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); + HttpContext context = HttpClientContext.create(); + CloseableHttpResponse httpResponse = httpClient.execute(method, context); + httpEntity = httpResponse.getEntity(); + if (httpEntity != null) { + responseBody = EntityUtils.toString(httpEntity, "UTF-8"); +// logger.info("请求URL: " + uri + "+ 返回状态码:" + httpResponse.getStatusLine().getStatusCode()); + } + } catch (Exception e) { + if (method != null) { + method.abort(); + } + e.printStackTrace(); +// logger.error("execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" +// + (System.currentTimeMillis() - startTime)); + System.out.println("log:调用 HttpClientPoolUtil execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + + (System.currentTimeMillis() - startTime)); + } finally { + if (httpEntity != null) { + try { + EntityUtils.consumeQuietly(httpEntity); + } catch (Exception e) { +// e.printStackTrace(); +// logger.error("close response exception, url:" + uri + ", exception:" + e.toString() +// + ",cost time(ms):" + (System.currentTimeMillis() - startTime)); + new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + + ",cost time(ms):" + (System.currentTimeMillis() - startTime)) + .printStackTrace(); + } + } + } + return responseBody; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index b793a47c990a930579b749b1eec95abed6ad554e..ce3735c12894807efadd1f5673fc34eee43ae01b 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -5,24 +5,24 @@ import com.taosdata.jdbc.utils.TDNodes; import org.junit.AfterClass; import org.junit.BeforeClass; -public class BaseTest { +public abstract class BaseTest { - private static boolean testCluster = false; + private static boolean testCluster = false; private static TDNodes nodes = new TDNodes(); - + @BeforeClass public static void setupEnv() { - try{ + try { if (nodes.getTDNode(1).getTaosdPid() != null) { System.out.println("Kill taosd before running JDBC test"); nodes.getTDNode(1).setRunning(1); nodes.stop(1); } - nodes.setTestCluster(testCluster); + nodes.setTestCluster(testCluster); nodes.deploy(1); nodes.start(1); } catch (Exception e) { - e.printStackTrace(); + e.printStackTrace(); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java index a0981063a5052f04b849b2187a78352a2c2560be..8adcdefb2974d5817793297091eee1c2f40a52b9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java @@ -1,47 +1,210 @@ package com.taosdata.jdbc; +import org.junit.BeforeClass; import org.junit.Test; -import java.sql.SQLException; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.sql.*; import java.util.Properties; -import static org.junit.Assert.assertEquals; +import static org.junit.Assert.*; public class TSDBDriverTest { + private static final String[] validURLs = { + "jdbc:TAOS://localhost:0", + "jdbc:TAOS://localhost", + "jdbc:TAOS://localhost:6030/test", + "jdbc:TAOS://localhost:6030", + "jdbc:TAOS://localhost:6030/", + "jdbc:TSDB://localhost:6030", + "jdbc:TSDB://localhost:6030/", + "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata", + "jdbc:TAOS://:", + "jdbc:TAOS://:/", + "jdbc:TAOS://:/test", + "jdbc:TAOS://localhost:0/?user=root&password=taosdata" + }; + private static boolean islibLoaded = false; + private static boolean isTaosdActived; + + private Connection conn; + + @BeforeClass + public static void before() { + String osName = System.getProperty("os.name").toLowerCase(); + if (!osName.equals("linux")) + return; + // try to load taos lib + try { + System.loadLibrary("taos"); + islibLoaded = true; + } catch (UnsatisfiedLinkError error) { + System.out.println("load tdengine lib failed."); + error.printStackTrace(); + } + // check taosd is activated + try { + String[] cmd = {"/bin/bash", "-c", "ps -ef | grep taosd | grep -v \"grep\""}; + Process exec = Runtime.getRuntime().exec(cmd); + BufferedReader reader = new BufferedReader(new InputStreamReader(exec.getInputStream())); + int lineCnt = 0; + while (reader.readLine() != null) { + lineCnt++; + } + if (lineCnt > 0) + isTaosdActived = true; + } catch (IOException e) { + e.printStackTrace(); + } + + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } + + @Test + public void testConnectWithJdbcURL() { + final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; + try { + if (islibLoaded && isTaosdActived) { + conn = DriverManager.getConnection(url); + assertNotNull("failure - connection should not be null", conn); + } + } catch (SQLException e) { + e.printStackTrace(); + fail("failure - should not throw Exception"); + } + } + + @Test + public void testConnectWithProperties() { + final String jdbcUrl = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + try { + if (islibLoaded && isTaosdActived) { + conn = DriverManager.getConnection(jdbcUrl, connProps); + assertNotNull("failure - connection should not be null", conn); + } + } catch (SQLException e) { + e.printStackTrace(); + fail("failure - should not throw Exception"); + } + } + @Test - public void urlParserTest() throws SQLException { + public void testConnectWithConfigFile() { + String jdbcUrl = "jdbc:TAOS://:/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + try { + if (islibLoaded && isTaosdActived) { + conn = DriverManager.getConnection(jdbcUrl, connProps); + assertNotNull("failure - connection should not be null", conn); + } + } catch (SQLException e) { + e.printStackTrace(); + fail("failure - should not throw Exception"); + } + } + + @Test(expected = SQLException.class) + public void testAcceptsURL() throws SQLException { + Driver driver = new TSDBDriver(); + for (String url : validURLs) { + assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url)); + } + driver.acceptsURL(null); + fail("acceptsURL throws exception when parameter is null"); + } + + @Test + public void testParseURL() { TSDBDriver driver = new TSDBDriver(); - String url = "jdbc:TSDB://127.0.0.1:0/db"; - - Properties properties = new Properties(); - driver.parseURL(url, properties); - assertEquals(properties.get("host"), "127.0.0.1"); - assertEquals(properties.get("port"), "0"); - assertEquals(properties.get("dbname"), "db"); - assertEquals(properties.get("user"), "root"); - assertEquals(properties.get("password"), "your_password"); - - url = "jdbc:TSDB://127.0.0.1:0/log?charset=UTF-8"; - properties = new Properties(); - driver.parseURL(url, properties); - assertEquals(properties.get("host"), "127.0.0.1"); - assertEquals(properties.get("port"), "0"); - assertEquals(properties.get("dbname"), "log"); - assertEquals(properties.get("charset"), "UTF-8"); - - url = "jdbc:TSDB://127.0.0.1:0/"; - properties = new Properties(); - driver.parseURL(url, properties); - assertEquals(properties.get("host"), "127.0.0.1"); - assertEquals(properties.get("port"), "0"); - assertEquals(properties.get("dbname"), null); - - url = "jdbc:TSDB://127.0.0.1:0/db"; - properties = new Properties(); - driver.parseURL(url, properties); - assertEquals(properties.get("host"), "127.0.0.1"); - assertEquals(properties.get("port"), "0"); - assertEquals(properties.get("dbname"), "db"); + + String url = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata&charset=UTF-8"; + Properties config = new Properties(); + Properties actual = driver.parseURL(url, config); + assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.get("host")); + assertEquals("failure - port should be 0", "0", actual.get("port")); + assertEquals("failure - dbname should be db", "db", actual.get("dbname")); + assertEquals("failure - user should be root", "root", actual.get("user")); + assertEquals("failure - password should be taosdata", "taosdata", actual.get("password")); + assertEquals("failure - charset should be UTF-8", "UTF-8", actual.get("charset")); + + url = "jdbc:TAOS://127.0.0.1:0"; + config = new Properties(); + actual = driver.parseURL(url, config); + assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.getProperty("host")); + assertEquals("failure - port should be 0", "0", actual.get("port")); + assertNull("failure - dbname should be null", actual.get("dbname")); + + url = "jdbc:TAOS://127.0.0.1:0/db"; + config = new Properties(); + actual = driver.parseURL(url, config); + assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.getProperty("host")); + assertEquals("failure - port should be 0", "0", actual.get("port")); + assertEquals("failure - dbname should be db", "db", actual.get("dbname")); + + url = "jdbc:TAOS://:/?"; + config = new Properties(); + config.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + config.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); + actual = driver.parseURL(url, config); + assertEquals("failure - user should be root", "root", actual.getProperty("user")); + assertEquals("failure - password should be taosdata", "taosdata", actual.getProperty("password")); + assertNull("failure - host should be null", actual.getProperty("host")); + assertNull("failure - port should be null", actual.getProperty("port")); + assertNull("failure - dbname should be null", actual.getProperty("dbname")); + } + + @Test + public void testGetPropertyInfo() throws SQLException { + Driver driver = new TSDBDriver(); + final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + DriverPropertyInfo[] propertyInfo = driver.getPropertyInfo(url, connProps); + for (DriverPropertyInfo info : propertyInfo) { + if (info.name.equals(TSDBDriver.PROPERTY_KEY_HOST)) + assertEquals("failure - host should be localhost", "localhost", info.value); + if (info.name.equals(TSDBDriver.PROPERTY_KEY_PORT)) + assertEquals("failure - port should be 6030", "6030", info.value); + if (info.name.equals(TSDBDriver.PROPERTY_KEY_DBNAME)) + assertEquals("failure - dbname should be test", "log", info.value); + if (info.name.equals(TSDBDriver.PROPERTY_KEY_USER)) + assertEquals("failure - user should be root", "root", info.value); + if (info.name.equals(TSDBDriver.PROPERTY_KEY_PASSWORD)) + assertEquals("failure - password should be root", "taosdata", info.value); + } + } + + @Test + public void testGetMajorVersion() { + assertEquals("failure - getMajorVersion should be 2", 2, new TSDBDriver().getMajorVersion()); + } + + @Test + public void testGetMinorVersion() { + assertEquals("failure - getMinorVersion should be 0", 0, new TSDBDriver().getMinorVersion()); + } + + @Test + public void testJdbcCompliant() { + assertFalse("failure - jdbcCompliant should be false", new TSDBDriver().jdbcCompliant()); } + + @Test + public void testGetParentLogger() throws SQLFeatureNotSupportedException { + assertNull("failure - getParentLogger should be be null", new TSDBDriver().getParentLogger()); + } + } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/FailOverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/FailOverTest.java new file mode 100644 index 0000000000000000000000000000000000000000..83295df5274a669ca2fc7fdbba506a97a01cc55c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/FailOverTest.java @@ -0,0 +1,36 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Test; + +import java.sql.*; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.concurrent.TimeUnit; + +public class FailOverTest { + + private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss"); + + @Test + public void testFailOver() throws ClassNotFoundException { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + final String url = "jdbc:TAOS://:/?user=root&password=taosdata"; + + long end = System.currentTimeMillis() + 1000 * 60 * 5; + while (System.currentTimeMillis() < end) { + try (Connection conn = DriverManager.getConnection(url)) { + Statement stmt = conn.createStatement(); + ResultSet resultSet = stmt.executeQuery("select server_status()"); + resultSet.next(); + int status = resultSet.getInt("server_status()"); + System.out.println(">>>>>>>>>" + sdf.format(new Date()) + " status : " + status); + stmt.close(); + TimeUnit.SECONDS.sleep(5); + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + } + + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a91d1c2d6b84f1c79cf106a44645abaf87a7a3ab --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java @@ -0,0 +1,40 @@ +package com.taosdata.jdbc.rs; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.*; + +public class RestfulDriverTest { + + @Test + public void testCase001() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://master:6041/?user=root&password=taosdata"); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("select * from log.log"); + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String column = metaData.getColumnLabel(i); + String value = resultSet.getString(i); + System.out.print(column + ":" + value + "\t"); + } + System.out.println(); + } + statement.close(); + connection.close(); + } catch (SQLException | ClassNotFoundException e) { + e.printStackTrace(); + } + } + + @Test + public void testAcceptUrl() throws SQLException { + Driver driver = new RestfulDriver(); + boolean isAccept = driver.acceptsURL("jdbc:TAOS-RS://master:6041"); + Assert.assertTrue(isAccept); + } + +} diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 58e7b6acf1f8424c8b8f72578a9ece027d1a9447..0d8c07041aa741793b7a1b8db20c3a3b470cf193 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -3,7 +3,6 @@ PROJECT(TDengine) IF (TD_LINUX_64) find_program(HAVE_ODBCINST NAMES odbcinst) - IF (HAVE_ODBCINST) include(CheckSymbolExists) # shall we revert CMAKE_REQUIRED_LIBRARIES and how? @@ -14,20 +13,43 @@ IF (TD_LINUX_64) message(WARNING "unixodbc-dev is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc-dev") else () message(STATUS "unixodbc/unixodbc-dev are installed, and odbc connector will be built") - AUX_SOURCE_DIRECTORY(src SRC) - - # generate dynamic library (*.so) - ADD_LIBRARY(todbc SHARED ${SRC}) - SET_TARGET_PROPERTIES(todbc PROPERTIES CLEAN_DIRECT_OUTPUT 1) - SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1) - TARGET_LINK_LIBRARIES(todbc taos) - - install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/src/install.sh ${CMAKE_BINARY_DIR})") - - ADD_SUBDIRECTORY(tests) + find_package(FLEX) + if(NOT FLEX_FOUND) + message(FATAL_ERROR "you need to install flex first") + else () + if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0) + message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case") + else () + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion") + SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion") + ADD_SUBDIRECTORY(src) + ADD_SUBDIRECTORY(tools) + ADD_SUBDIRECTORY(tests) + endif () + endif() endif() ELSE () message(WARNING "unixodbc is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc") ENDIF () ENDIF () +IF (TD_WINDOWS_64) + find_package(ODBC) + if (NOT ODBC_FOUND) + message(FATAL_ERROR "you need to install ODBC first") + else () + message(STATUS "ODBC_INCLUDE_DIRS: ${ODBC_INCLUDE_DIRS}") + message(STATUS "ODBC_LIBRARIES: ${ODBC_LIBRARIES}") + message(STATUS "ODBC_CONFIG: ${ODBC_CONFIG}") + endif () + find_package(FLEX) + if(NOT FLEX_FOUND) + message(WARNING "you need to install flex first\n" + "you may go to: https://github.com/lexxmark/winflexbison\n" + "or download from: https://github.com/lexxmark/winflexbison/releases") + else () + ADD_SUBDIRECTORY(src) + ADD_SUBDIRECTORY(tools) + ADD_SUBDIRECTORY(tests) + endif() +ENDIF () diff --git a/src/connector/odbc/README.md b/src/connector/odbc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e026884a0766772ac315acd3d0cac6535fb77557 --- /dev/null +++ b/src/connector/odbc/README.md @@ -0,0 +1,88 @@ + +# ODBC Driver # + +- **very initial implementation of ODBC driver for TAOS + +- **currently partially supported ODBC functions are: ` +SQLAllocEnv +SQLFreeEnv +SQLAllocConnect +SQLFreeConnect +SQLConnect +SQLDisconnect +SQLAllocStmt +SQLAllocHandle +SQLFreeStmt +SQLExecDirect +SQLExecDirectW +SQLNumResultCols +SQLRowCount +SQLColAttribute +SQLGetData +SQLFetch +SQLPrepare +SQLExecute +SQLGetDiagField +SQLGetDiagRec +SQLBindParameter +SQLDriverConnect +SQLSetConnectAttr +SQLDescribeCol +SQLNumParams +SQLSetStmtAttr +ConfigDSN +` + +- **internationalized, you can specify different charset/code page for easy going. eg.: insert `utf-8.zh_cn` characters into database located in linux machine, while query them out in `gb2312/gb18030/...` code page in your chinese windows machine, or vice-versa. and much fun, insert `gb2312/gb18030/...` characters into database located in linux box from +your japanese windows box, and query them out in your local chinese windows machine. + +- **enable ODBC-aware software to communicate with TAOS. + +- **enable any language with ODBC-bindings/ODBC-plugings to communicate with TAOS + +- **still going on... + +# Building and Testing +**Note**: all `work` is done in TDengine's project directory + + +# Building under Linux, use Ubuntu as example +``` +sudo apt install unixodbc unixodbc-dev flex +rm -rf debug && cmake -B debug && cmake --build debug && cmake --install debug && echo yes +``` +# Building under Windows, use Windows 10 as example +- install windows `flex` port. We use [https://github.com/lexxmark/winflexbison](url) at the moment. Please be noted to append `` to your `PATH`. +- install Microsoft Visual Studio, take VS2015 as example here +- `"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64` +- `rmdir /s /q debug` +- `cmake -G "NMake Makefiles" -B debug` +- `cmake --build debug` +- `cmake --install debug` +- open your `Command Prompt` with Administrator's priviledge +- remove previously installed TAOS ODBC driver: run `C:\TDengine\todbcinst -u -f -n TAOS` +- install TAOS ODBC driver that was just built: run `C:\TDengine\todbcinst -i -n TAOS -p C:\TDengine\driver` +- add a new user dsn: run `odbcconf CONFIGDSN TAOS "DSN=TAOS_DSN|Server=:` + +# Test +we highly suggest that you build both in linux(ubuntu) and windows(windows 10) platform, because currently TAOS only has it's server-side port on linux platform. +**Note1**: content within <> shall be modified to match your environment +**Note2**: `.stmts` source files are all encoded in `UTF-8` +## start taosd in linux, suppose charset is `UTF-8` as default +``` +taosd -c ./debug/test/cfg +``` +## create data in linux +``` +./debug/build/bin/tcodbc --dsn TAOS_DSN --uid --pwd --sts ./src/connector/odbc/tests/create_data.stmts +-- +./debug/build/bin/tcodbc --dcs 'Driver=TAOS;UID=;PWD=;Server=:;client_enc=UTF-8' ./src/connector/odbc/tests/create_data.stmts +``` +## query data in windows +``` +.\debug\build\bin\tcodbc --dsn TAOS_DSN --uid --pwd --sts .\src\connector\odbc\tests\query_data.stmts +-- +.\debug\build\bin\tcodbc --dcs "Driver=TAOS;UID=;PWD=;Server=:;client_enc=UTF-8" .\src\connector\odbc\tests\query_data.stmts +``` + + diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2699e1bc90e162c80d27d690e1f7163747616526 --- /dev/null +++ b/src/connector/odbc/src/CMakeLists.txt @@ -0,0 +1,54 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +IF (TD_LINUX_64) + FLEX_TARGET(todbcFlexScanner + todbc_scanner.l + ${CMAKE_CURRENT_BINARY_DIR}/todbc_scanner.c + ) + set(todbc_flex_scanner_src + ${FLEX_todbcFlexScanner_OUTPUTS} + ) + AUX_SOURCE_DIRECTORY(. SRC) + + # generate dynamic library (*.so) + ADD_LIBRARY(todbc SHARED ${SRC} ${todbc_flex_scanner_src}) + SET_TARGET_PROPERTIES(todbc PROPERTIES CLEAN_DIRECT_OUTPUT 1) + SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1) + TARGET_LINK_LIBRARIES(todbc taos odbcinst) + target_include_directories(todbc PUBLIC .) + + install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/install.sh ${CMAKE_BINARY_DIR})") +ENDIF () + +IF (TD_WINDOWS_64) + FLEX_TARGET(todbcFlexScanner + todbc_scanner.l + ${CMAKE_CURRENT_BINARY_DIR}/todbc_scanner.c + ) + set(todbc_flex_scanner_src + ${FLEX_todbcFlexScanner_OUTPUTS} + ) + AUX_SOURCE_DIRECTORY(. SRC) + + # generate dynamic library (*.dll) + ADD_LIBRARY(todbc SHARED + ${SRC} + ${todbc_flex_scanner_src} + ${CMAKE_CURRENT_BINARY_DIR}/todbc.rc + todbc.def) + TARGET_LINK_LIBRARIES(todbc taos_static odbccp32 legacy_stdio_definitions) + target_include_directories(todbc PUBLIC .) + target_compile_definitions(todbc PRIVATE "todbc_EXPORT") + + CONFIGURE_FILE("todbc.rc.in" + "${CMAKE_CURRENT_BINARY_DIR}/todbc.rc") + SET_TARGET_PROPERTIES(todbc PROPERTIES LINK_FLAGS + /DEF:todbc.def) + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /GL") + SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /GL") + + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.lib DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.exp DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.dll DESTINATION driver) +ENDIF () diff --git a/src/connector/odbc/src/install.sh b/src/connector/odbc/src/install.sh index b8c04677c7199384f7bc0b66515eb04d0fe560fc..02f31de70ed76e150fbef5d388cbd8a3e9ba73b3 100755 --- a/src/connector/odbc/src/install.sh +++ b/src/connector/odbc/src/install.sh @@ -9,16 +9,18 @@ rm -f "${BLD_DIR}/template.dsn" cat > "${BLD_DIR}/template.ini" < "${BLD_DIR}/template.dsn" < +#include #include -#include +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +#define UTF8_ENC "UTF-8" +#define UTF16_ENC "UCS-2LE" +#define UNICODE_ENC "UCS-4LE" +#define GB18030_ENC "GB18030" #define GET_REF(obj) atomic_load_64(&obj->refcount) #define INC_REF(obj) atomic_add_fetch_64(&obj->refcount, 1) @@ -41,15 +58,22 @@ do { obj->err.err_no = eno; \ const char* estr = tstrerror(eno); \ if (!estr) estr = "Unknown error"; \ - int n = snprintf(NULL, 0, "%s: @[%d][TSDB:%x]" err_fmt "", estr, __LINE__, eno, ##__VA_ARGS__); \ + int n = snprintf(NULL, 0, "[TSDB:%x]%s: @%s[%d]" err_fmt "", \ + eno, estr, \ + basename((char*)__FILE__), __LINE__, \ + ##__VA_ARGS__); \ if (n<0) break; \ - char *err_str = (char*)realloc(obj->err.err_str, n+1); \ + char *err_str = (char*)realloc(obj->err.err_str, (size_t)n+1); \ if (!err_str) break; \ obj->err.err_str = err_str; \ - snprintf(obj->err.err_str, n+1, "%s: @[%d][TSDB:%x]" err_fmt "", estr, __LINE__, eno, ##__VA_ARGS__); \ + snprintf(obj->err.err_str, (size_t)n+1, "[TSDB:%x]%s: @%s[%d]" err_fmt "", \ + eno, estr, \ + basename((char*)__FILE__), __LINE__, \ + ##__VA_ARGS__); \ snprintf((char*)obj->err.sql_state, sizeof(obj->err.sql_state), "%s", sqlstate); \ } while (0) + #define CLR_ERROR(obj) \ do { \ obj->err.err_no = TSDB_CODE_SUCCESS; \ @@ -57,14 +81,13 @@ do { obj->err.sql_state[0] = '\0'; \ } while (0) -#define FILL_ERROR(obj) \ -do { \ - size_t n = sizeof(obj->err.sql_state); \ - if (Sqlstate) strncpy((char*)Sqlstate, (char*)obj->err.sql_state, n); \ - if (NativeError) *NativeError = obj->err.err_no; \ - snprintf((char*)MessageText, BufferLength, "%s", obj->err.err_str); \ - if (TextLength && obj->err.err_str) *TextLength = strlen(obj->err.err_str); \ - if (TextLength && obj->err.err_str) *TextLength = utf8_chars(obj->err.err_str); \ +#define FILL_ERROR(obj) \ +do { \ + size_t n = sizeof(obj->err.sql_state); \ + if (Sqlstate) strncpy((char*)Sqlstate, (char*)obj->err.sql_state, n); \ + if (NativeError) *NativeError = obj->err.err_no; \ + snprintf((char*)MessageText, (size_t)BufferLength, "%s", obj->err.err_str); \ + if (TextLength && obj->err.err_str) *TextLength = (SQLSMALLINT)utf8_chars(obj->err.err_str); \ } while (0) #define FREE_ERROR(obj) \ @@ -87,7 +110,7 @@ do { SET_ERROR(obj, sqlstate, TSDB_CODE_QRY_INVALID_QHANDLE, err_fmt, ##__VA_ARGS__); \ } while (0); -#define SDUP(s,n) (s ? (s[n] ? (const char*)strndup((const char*)s,n) : (const char*)s) : strdup("")) +#define SDUP(s,n) (s ? (s[(size_t)n] ? (const char*)strndup((const char*)s,(size_t)n) : (const char*)s) : strdup("")) #define SFRE(x,s,n) \ do { \ if (x==(const char*)s) break; \ @@ -124,6 +147,15 @@ do { \ r_091c = SQL_SUCCESS; \ } while (0) +#define NORM_STR_LENGTH(obj, ptr, len) \ +do { \ + if ((len) < 0 && (len)!=SQL_NTS) { \ + SET_ERROR((obj), "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); \ + return SQL_ERROR; \ + } \ + if (len==SQL_NTS) len = (ptr) ? (SQLSMALLINT)strlen((const char*)(ptr)) : 0; \ +} while (0) + #define PROFILING 0 #define PROFILE(statement) \ @@ -138,22 +170,67 @@ do { \ gettimeofday(&tv1, NULL); \ double delta = difftime(tv1.tv_sec, tv0.tv_sec); \ delta *= 1000000; \ - delta += (tv1.tv_usec-tv0.tv_usec); \ + delta += (double)(tv1.tv_usec-tv0.tv_usec); \ delta /= 1000000; \ D("%s: elapsed: [%.6f]s", #statement, delta); \ } while (0) - -#define CHK_CONV(statement) \ -do { \ - const char *sqlstate = statement; \ - if (sqlstate) { \ - SET_ERROR(sql, sqlstate, TSDB_CODE_ODBC_OUT_OF_RANGE, \ - "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", \ - sql_c_type(valueType), valueType, valueType, \ - taos_data_type(type), type, type, idx+1); \ - return SQL_ERROR; \ - } \ +#define CHK_CONV(todb, statement) \ +do { \ + TSDB_CONV_CODE code_0c80 = (statement); \ + switch (code_0c80) { \ + case TSDB_CONV_OK: return SQL_SUCCESS; \ + case TSDB_CONV_OOM: \ + case TSDB_CONV_NOT_AVAIL: { \ + SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_OOR: { \ + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_OOR, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_CHAR_NOT_NUM: \ + case TSDB_CONV_CHAR_NOT_TS: { \ + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_NOT_VALID_TS: { \ + SET_ERROR(sql, "22007", TSDB_CODE_ODBC_CONV_NOT_VALID_TS, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_TRUNC_FRACTION: { \ + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC_FRAC, ""); \ + return todb ? SQL_ERROR : SQL_SUCCESS_WITH_INFO; \ + } break; \ + case TSDB_CONV_TRUNC: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_TRUNC, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_SRC_TOO_LARGE: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_SRC_BAD_SEQ: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_SRC_INCOMPLETE: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_SRC_GENERAL: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_SRC_GENERAL, ""); \ + return SQL_ERROR; \ + } break; \ + case TSDB_CONV_BAD_CHAR: { \ + SET_ERROR(sql, "22001", TSDB_CODE_ODBC_CONV_TRUNC, ""); \ + return SQL_ERROR; \ + } break; \ + default: { \ + DASSERTX(0, "internal logic error: %d", code_0c80); \ + return SQL_ERROR; /* never reached here */ \ + } break; \ + } \ } while (0) typedef struct env_s env_t; @@ -185,6 +262,9 @@ struct env_s { uint64_t refcount; unsigned int destroying:1; + char env_locale[64]; + char env_charset[64]; + taos_error_t err; }; @@ -192,6 +272,16 @@ struct conn_s { uint64_t refcount; env_t *env; + char client_enc[64]; // ODBC client that communicates with this driver + char server_enc[64]; // taos dynamic library that's loaded by this driver + + tsdb_conv_t *client_to_server; + tsdb_conv_t *server_to_client; + tsdb_conv_t *utf8_to_client; + tsdb_conv_t *utf16_to_utf8; + tsdb_conv_t *utf16_to_server; + tsdb_conv_t *client_to_utf8; + TAOS *taos; taos_error_t err; @@ -229,50 +319,98 @@ struct c_target_s { static pthread_once_t init_once = PTHREAD_ONCE_INIT; static void init_routine(void); -// conversions - -const char* tsdb_int64_to_bit(int64_t src, int8_t *dst); -const char* tsdb_int64_to_tinyint(int64_t src, int8_t *dst); -const char* tsdb_int64_to_smallint(int64_t src, int16_t *dst); -const char* tsdb_int64_to_int(int64_t src, int32_t *dst); -const char* tsdb_int64_to_bigint(int64_t src, int64_t *dst); -const char* tsdb_int64_to_ts(int64_t src, int64_t *dst); -const char* tsdb_int64_to_float(int64_t src, float *dst); -const char* tsdb_int64_to_double(int64_t src, double *dst); -const char* tsdb_int64_to_char(int64_t src, char *dst, size_t dlen); - -const char* tsdb_double_to_bit(double src, int precision, int8_t *dst); -const char* tsdb_double_to_tinyint(double src, int precision, int8_t *dst); -const char* tsdb_double_to_smallint(double src, int precision, int16_t *dst); -const char* tsdb_double_to_int(double src, int precision, int32_t *dst); -const char* tsdb_double_to_bigint(double src, int precision, int64_t *dst); -const char* tsdb_double_to_ts(double src, int precision, int64_t *dst); -const char* tsdb_double_to_float(double src, int precision, float *dst); -const char* tsdb_double_to_double(double src, int precision, double *dst); -const char* tsdb_double_to_char(double src, int precision, char *dst, size_t dlen); - -const char* tsdb_chars_to_bit(const char *src, int8_t *dst); -const char* tsdb_chars_to_tinyint(const char *src, int8_t *dst); -const char* tsdb_chars_to_smallint(const char *src, int16_t *dst); -const char* tsdb_chars_to_int(const char *src, int32_t *dst); -const char* tsdb_chars_to_bigint(const char *src, int64_t *dst); -const char* tsdb_chars_to_ts(const char *src, int64_t *dst); -const char* tsdb_chars_to_float(const char *src, float *dst); -const char* tsdb_chars_to_double(const char *src, double *dst); -const char* tsdb_chars_to_char(const char *src, char *dst, size_t dlen); - - -static int do_field_display_size(TAOS_FIELD *field); +static size_t do_field_display_size(TAOS_FIELD *field); + +static tsdb_conv_t* tsdb_conn_client_to_server(conn_t *conn) { + if (!conn->client_to_server) { + conn->client_to_server = tsdb_conv_open(conn->client_enc, conn->server_enc); + } + return conn->client_to_server; +} + +static tsdb_conv_t* tsdb_conn_server_to_client(conn_t *conn) { + if (!conn->server_to_client) { + conn->server_to_client = tsdb_conv_open(conn->server_enc, conn->client_enc); + } + return conn->server_to_client; +} + +static tsdb_conv_t* tsdb_conn_utf8_to_client(conn_t *conn) { + if (!conn->utf8_to_client) { + conn->utf8_to_client = tsdb_conv_open(UTF8_ENC, conn->client_enc); + } + return conn->utf8_to_client; +} + +static tsdb_conv_t* tsdb_conn_utf16_to_utf8(conn_t *conn) { + if (!conn->utf16_to_utf8) { + conn->utf16_to_utf8 = tsdb_conv_open(UTF16_ENC, UTF8_ENC); + } + return conn->utf16_to_utf8; +} + +static tsdb_conv_t* tsdb_conn_utf16_to_server(conn_t *conn) { + if (!conn->utf16_to_server) { + conn->utf16_to_server = tsdb_conv_open(UTF16_ENC, conn->server_enc); + } + return conn->utf16_to_server; +} + +static tsdb_conv_t* tsdb_conn_client_to_utf8(conn_t *conn) { + if (!conn->client_to_utf8) { + conn->client_to_utf8 = tsdb_conv_open(conn->client_enc, UTF8_ENC); + } + return conn->client_to_utf8; +} + +static void tsdb_conn_close_convs(conn_t *conn) { + if (conn->client_to_server) { + tsdb_conv_close(conn->client_to_server); + conn->client_to_server = NULL; + } + if (conn->server_to_client) { + tsdb_conv_close(conn->server_to_client); + conn->server_to_client = NULL; + } + if (conn->utf8_to_client) { + tsdb_conv_close(conn->utf8_to_client); + conn->utf8_to_client = NULL; + } + if (conn->utf16_to_utf8) { + tsdb_conv_close(conn->utf16_to_utf8); + conn->utf16_to_utf8 = NULL; + } + if (conn->utf16_to_server) { + tsdb_conv_close(conn->utf16_to_server); + conn->utf16_to_server = NULL; + } + if (conn->client_to_utf8) { + tsdb_conv_close(conn->client_to_utf8); + conn->client_to_utf8 = NULL; + } +} + +#define SFREE(buffer, v, src) \ +do { \ + const char *v_096a = (const char*)(v); \ + const char *src_6a = (const char*)(src); \ + if (v_096a && v_096a!=src_6a && !is_owned_by_stack_buffer((buffer), v_096a)) { \ + free((char*)v_096a); \ + } \ +} while (0) static SQLRETURN doSQLAllocEnv(SQLHENV *EnvironmentHandle) { pthread_once(&init_once, init_routine); env_t *env = (env_t*)calloc(1, sizeof(*env)); - if (!env) return SQL_ERROR; + if (!env) return SQL_INVALID_HANDLE; DASSERT(INC_REF(env)>0); + snprintf(env->env_locale, sizeof(env->env_locale), "%s", tsLocale); + snprintf(env->env_charset, sizeof(env->env_charset), "%s", tsCharset); + *EnvironmentHandle = env; CLR_ERROR(env); @@ -289,7 +427,7 @@ SQLRETURN SQL_API SQLAllocEnv(SQLHENV *EnvironmentHandle) static SQLRETURN doSQLFreeEnv(SQLHENV EnvironmentHandle) { env_t *env = (env_t*)EnvironmentHandle; - if (!env) return SQL_ERROR; + if (!env) return SQL_INVALID_HANDLE; DASSERT(GET_REF(env)==1); @@ -317,7 +455,12 @@ static SQLRETURN doSQLAllocConnect(SQLHENV EnvironmentHandle, SQLHDBC *ConnectionHandle) { env_t *env = (env_t*)EnvironmentHandle; - if (!env) return SQL_ERROR; + if (!env) return SQL_INVALID_HANDLE; + + if (!ConnectionHandle) { + SET_ERROR(env, "HY009", TSDB_CODE_ODBC_BAD_ARG, "ConnectionHandle [%p] not valid", ConnectionHandle); + return SQL_ERROR; + } DASSERT(INC_REF(env)>1); @@ -330,6 +473,10 @@ static SQLRETURN doSQLAllocConnect(SQLHENV EnvironmentHandle, } conn->env = env; + + snprintf(conn->client_enc, sizeof(conn->client_enc), "%s", conn->env->env_charset); + snprintf(conn->server_enc, sizeof(conn->server_enc), "%s", conn->env->env_charset); + *ConnectionHandle = conn; DASSERT(INC_REF(conn)>0); @@ -353,7 +500,7 @@ SQLRETURN SQL_API SQLAllocConnect(SQLHENV EnvironmentHandle, static SQLRETURN doSQLFreeConnect(SQLHDBC ConnectionHandle) { conn_t *conn = (conn_t*)ConnectionHandle; - if (!conn) return SQL_ERROR; + if (!conn) return SQL_INVALID_HANDLE; DASSERT(GET_REF(conn)==1); @@ -370,6 +517,7 @@ static SQLRETURN doSQLFreeConnect(SQLHDBC ConnectionHandle) conn->env = NULL; FREE_ERROR(conn); + tsdb_conn_close_convs(conn); free(conn); } while (0); @@ -388,6 +536,8 @@ static SQLRETURN doSQLConnect(SQLHDBC ConnectionHandle, SQLCHAR *UserName, SQLSMALLINT NameLength2, SQLCHAR *Authentication, SQLSMALLINT NameLength3) { + stack_buffer_t buffer; buffer.next = 0; + conn_t *conn = (conn_t*)ConnectionHandle; if (!conn) return SQL_ERROR; @@ -396,28 +546,58 @@ static SQLRETURN doSQLConnect(SQLHDBC ConnectionHandle, return SQL_ERROR; } - const char *serverName = SDUP(ServerName, NameLength1); - const char *userName = SDUP(UserName, NameLength2); - const char *auth = SDUP(Authentication, NameLength3); + NORM_STR_LENGTH(conn, ServerName, NameLength1); + NORM_STR_LENGTH(conn, UserName, NameLength2); + NORM_STR_LENGTH(conn, Authentication, NameLength3); + + if (NameLength1>SQL_MAX_DSN_LENGTH) { + SET_ERROR(conn, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } + + tsdb_conv_t *client_to_server = tsdb_conn_client_to_server(conn); + const char *dsn = NULL; + const char *uid = NULL; + const char *pwd = NULL; + const char *svr = NULL; + char server[4096]; server[0] = '\0'; do { - if ((ServerName && !serverName) || (UserName && !userName) || (Authentication && !auth)) { + tsdb_conv(client_to_server, &buffer, (const char*)ServerName, (size_t)NameLength1, &dsn, NULL); + tsdb_conv(client_to_server, &buffer, (const char*)UserName, (size_t)NameLength2, &uid, NULL); + tsdb_conv(client_to_server, &buffer, (const char*)Authentication, (size_t)NameLength3, &pwd, NULL); + int n = SQLGetPrivateProfileString(dsn, "Server", "", server, sizeof(server)-1, "Odbc.ini"); + if (n<=0) { + snprintf(server, sizeof(server), "localhost:6030"); // all 7-bit ascii + } + tsdb_conv(client_to_server, &buffer, (const char*)server, (size_t)strlen(server), &svr, NULL); + + if ((!dsn) || (!uid) || (!pwd) || (!svr)) { SET_ERROR(conn, "HY001", TSDB_CODE_ODBC_OOM, ""); break; } + char *ip = NULL; + int port = 0; + char *p = strchr(svr, ':'); + if (p) { + ip = strndup(svr, (size_t)(p-svr)); + port = atoi(p+1); + } + // TODO: data-race // TODO: shall receive ip/port from odbc.ini - conn->taos = taos_connect("localhost", userName, auth, NULL, 0); + conn->taos = taos_connect(ip, uid, pwd, NULL, (uint16_t)port); if (!conn->taos) { - SET_ERROR(conn, "08001", terrno, "failed to connect to data source"); + SET_ERROR(conn, "08001", terrno, "failed to connect to data source for DSN[%s] @[%s:%d]", dsn, ip, port); break; } } while (0); - SFRE(serverName, ServerName, NameLength1); - SFRE(userName, UserName, NameLength2); - SFRE(auth, Authentication, NameLength3); + tsdb_conv_free(client_to_server, dsn, &buffer, (const char*)ServerName); + tsdb_conv_free(client_to_server, uid, &buffer, (const char*)UserName); + tsdb_conv_free(client_to_server, pwd, &buffer, (const char*)Authentication); + tsdb_conv_free(client_to_server, svr, &buffer, (const char*)server); return conn->taos ? SQL_SUCCESS : SQL_ERROR; } @@ -437,7 +617,7 @@ SQLRETURN SQL_API SQLConnect(SQLHDBC ConnectionHandle, static SQLRETURN doSQLDisconnect(SQLHDBC ConnectionHandle) { conn_t *conn = (conn_t*)ConnectionHandle; - if (!conn) return SQL_ERROR; + if (!conn) return SQL_INVALID_HANDLE; if (conn->taos) { taos_close(conn->taos); @@ -455,10 +635,15 @@ SQLRETURN SQL_API SQLDisconnect(SQLHDBC ConnectionHandle) } static SQLRETURN doSQLAllocStmt(SQLHDBC ConnectionHandle, - SQLHSTMT *StatementHandle) + SQLHSTMT *StatementHandle) { conn_t *conn = (conn_t*)ConnectionHandle; - if (!conn) return SQL_ERROR; + if (!conn) return SQL_INVALID_HANDLE; + + if (!StatementHandle) { + SET_ERROR(conn, "HY009", TSDB_CODE_ODBC_BAD_ARG, "StatementHandle [%p] not valid", StatementHandle); + return SQL_ERROR; + } DASSERT(INC_REF(conn)>1); @@ -495,20 +680,17 @@ static SQLRETURN doSQLAllocHandle(SQLSMALLINT HandleType, SQLHANDLE InputHandle, switch (HandleType) { case SQL_HANDLE_ENV: { SQLHENV env = {0}; + if (!OutputHandle) return SQL_ERROR; SQLRETURN r = doSQLAllocEnv(&env); - if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = env; + if (r==SQL_SUCCESS) *OutputHandle = env; return r; } break; case SQL_HANDLE_DBC: { - SQLHDBC dbc = {0}; - SQLRETURN r = doSQLAllocConnect(InputHandle, &dbc); - if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = dbc; + SQLRETURN r = doSQLAllocConnect(InputHandle, OutputHandle); return r; } break; case SQL_HANDLE_STMT: { - SQLHSTMT stmt = {0}; - SQLRETURN r = doSQLAllocStmt(InputHandle, &stmt); - if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = stmt; + SQLRETURN r = doSQLAllocStmt(InputHandle, OutputHandle); return r; } break; default: { @@ -528,12 +710,20 @@ static SQLRETURN doSQLFreeStmt(SQLHSTMT StatementHandle, SQLUSMALLINT Option) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; - - if (Option == SQL_CLOSE) return SQL_SUCCESS; - if (Option != SQL_DROP) { - SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "free statement with Option[%x] not supported yet", Option); - return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; + + switch (Option) { + case SQL_CLOSE: return SQL_SUCCESS; + case SQL_DROP: break; + case SQL_UNBIND: + case SQL_RESET_PARAMS: { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "free statement with Option[%x] not supported yet", Option); + return SQL_ERROR; + } break; + default: { + SET_ERROR(sql, "HY092", TSDB_CODE_ODBC_OUT_OF_RANGE, "free statement with Option[%x] not supported yet", Option); + return SQL_ERROR; + } break; } DASSERT(GET_REF(sql)==1); @@ -560,6 +750,7 @@ static SQLRETURN doSQLFreeStmt(SQLHSTMT StatementHandle, sql->conn = NULL; FREE_ERROR(sql); + free(sql); return SQL_SUCCESS; @@ -573,15 +764,32 @@ SQLRETURN SQL_API SQLFreeStmt(SQLHSTMT StatementHandle, return r; } +static SQLRETURN do_exec_direct(sql_t *sql, TSDB_CONV_CODE code, const char *statement) { + if (code) CHK_CONV(1, code); + DASSERT(code==TSDB_CONV_OK); + + SQLRETURN r = SQL_ERROR; + do { + sql->rs = taos_query(sql->conn->taos, statement); + CHK_RS(r, sql, "failed to execute"); + } while (0); + + return r; +} + static SQLRETURN doSQLExecDirect(SQLHSTMT StatementHandle, SQLCHAR *StatementText, SQLINTEGER TextLength) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); + conn_t *conn = sql->conn; + + NORM_STR_LENGTH(sql, StatementText, TextLength); + if (sql->rs) { taos_free_result(sql->rs); sql->rs = NULL; @@ -599,19 +807,15 @@ static SQLRETURN doSQLExecDirect(SQLHSTMT StatementHandle, } sql->n_params = 0; - const char *stxt = SDUP(StatementText, TextLength); - - SQLRETURN r = SQL_ERROR; + SQLRETURN r = SQL_SUCCESS; + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_server = tsdb_conn_client_to_server(conn); + const char *stxt = NULL; do { - if (!stxt) { - SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); - break; - } - sql->rs = taos_query(sql->conn->taos, stxt); - CHK_RS(r, sql, "failed to execute"); + TSDB_CONV_CODE code = tsdb_conv(client_to_server, &buffer, (const char*)StatementText, (size_t)TextLength, &stxt, NULL); + r = do_exec_direct(sql, code, stxt); } while (0); - - SFRE(stxt, StatementText, TextLength); + tsdb_conv_free(client_to_server, stxt, &buffer, (const char*)StatementText); return r; } @@ -624,18 +828,50 @@ SQLRETURN SQL_API SQLExecDirect(SQLHSTMT StatementHandle, return r; } +static SQLRETURN doSQLExecDirectW(SQLHSTMT hstmt, SQLWCHAR *szSqlStr, SQLINTEGER cbSqlStr) +{ + sql_t *sql = (sql_t*)hstmt; + if (!sql) return SQL_INVALID_HANDLE; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + conn_t *conn = sql->conn; + + if (!szSqlStr) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "szSqlStr [%p] not allowed", szSqlStr); + return SQL_ERROR; + } + if (cbSqlStr < 0) { + SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } + + SQLRETURN r = SQL_SUCCESS; + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_server = tsdb_conn_utf16_to_server(conn); + const char *stxt = NULL; + do { + size_t slen = (size_t)cbSqlStr * sizeof(*szSqlStr); + TSDB_CONV_CODE code = tsdb_conv(utf16_to_server, &buffer, (const char*)szSqlStr, slen, &stxt, NULL); + r = do_exec_direct(sql, code, stxt); + } while (0); + tsdb_conv_free(utf16_to_server, stxt, &buffer, (const char*)szSqlStr); + + return r; +} + SQLRETURN SQL_API SQLExecDirectW(SQLHSTMT hstmt, SQLWCHAR *szSqlStr, SQLINTEGER cbSqlStr) { - size_t bytes = 0; - SQLCHAR *utf8 = wchars_to_chars(szSqlStr, cbSqlStr, &bytes); - return SQLExecDirect(hstmt, utf8, bytes); + SQLRETURN r = doSQLExecDirectW(hstmt, szSqlStr, cbSqlStr); + return r; } static SQLRETURN doSQLNumResultCols(SQLHSTMT StatementHandle, SQLSMALLINT *ColumnCount) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -654,7 +890,7 @@ static SQLRETURN doSQLNumResultCols(SQLHSTMT StatementHandle, int fields = taos_field_count(sql->rs); if (ColumnCount) { - *ColumnCount = fields; + *ColumnCount = (SQLSMALLINT)fields; } return SQL_SUCCESS; @@ -677,7 +913,22 @@ static SQLRETURN doSQLRowCount(SQLHSTMT StatementHandle, CHK_CONN(sql); CHK_CONN_TAOS(sql); - if (sql->is_insert) { + // ref: https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlrowcount-function?view=sql-server-ver15 + // Summary + // SQLRowCount returns the number of rows affected by an UPDATE, INSERT, or DELETE statement; + // an SQL_ADD, SQL_UPDATE_BY_BOOKMARK, or SQL_DELETE_BY_BOOKMARK operation in SQLBulkOperations; + // or an SQL_UPDATE or SQL_DELETE operation in SQLSetPos. + + // how to fetch affected rows from taos? + // taos_affected_rows? + + if (1) { + SET_ERROR(sql, "IM001", TSDB_CODE_ODBC_NOT_SUPPORT, ""); + // if (RowCount) *RowCount = 0; + return SQL_SUCCESS_WITH_INFO; + } + + if (!sql->is_insert) { if (RowCount) *RowCount = 0; return SQL_SUCCESS; } @@ -735,11 +986,12 @@ static SQLRETURN doSQLColAttribute(SQLHSTMT StatementHandle, switch (FieldIdentifier) { case SQL_COLUMN_DISPLAY_SIZE: { - *NumericAttribute = do_field_display_size(field); + *NumericAttribute = (SQLLEN)do_field_display_size(field); } break; case SQL_COLUMN_LABEL: { + // todo: check BufferLength size_t n = sizeof(field->name); - strncpy(CharacterAttribute, field->name, (n>BufferLength ? BufferLength : n)); + strncpy(CharacterAttribute, field->name, (n>BufferLength ? (size_t)BufferLength : n)); } break; case SQL_COLUMN_UNSIGNED: { *NumericAttribute = SQL_FALSE; @@ -766,75 +1018,19 @@ SQLRETURN SQL_API SQLColAttribute(SQLHSTMT StatementHandle, return r; } -static SQLRETURN conv_tsdb_bool_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_bool_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); -static SQLRETURN conv_tsdb_v1_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v1_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); -static SQLRETURN conv_tsdb_v2_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v2_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); -static SQLRETURN conv_tsdb_v4_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v4_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); -static SQLRETURN conv_tsdb_v8_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); -static SQLRETURN conv_tsdb_v8_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); -static SQLRETURN conv_tsdb_v8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); -static SQLRETURN conv_tsdb_v8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); -static SQLRETURN conv_tsdb_v8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); -static SQLRETURN conv_tsdb_f4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); -static SQLRETURN conv_tsdb_f4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); -static SQLRETURN conv_tsdb_f4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); -static SQLRETURN conv_tsdb_f4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); -static SQLRETURN conv_tsdb_f8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); -static SQLRETURN conv_tsdb_f8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); -static SQLRETURN conv_tsdb_f8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); -static SQLRETURN conv_tsdb_ts_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); -static SQLRETURN conv_tsdb_ts_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); -static SQLRETURN conv_tsdb_ts_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); -static SQLRETURN conv_tsdb_ts_to_c_ts(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); -static SQLRETURN conv_tsdb_bin_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin); -static SQLRETURN conv_tsdb_bin_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin); -static SQLRETURN conv_tsdb_str_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_v1(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_v2(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_v4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_f4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_f8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); -static SQLRETURN conv_tsdb_str_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); - static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, SQLUSMALLINT ColumnNumber, SQLSMALLINT TargetType, SQLPOINTER TargetValue, SQLLEN BufferLength, SQLLEN *StrLen_or_Ind) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); + conn_t *conn = sql->conn; + if (!sql->rs) { SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); return SQL_ERROR; @@ -845,8 +1041,6 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, return SQL_ERROR; } - DASSERT(TargetValue); - int nfields = taos_field_count(sql->rs); TAOS_FIELD *fields = taos_fetch_fields(sql->rs); @@ -859,14 +1053,20 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "NULL TargetValue not allowed for col [%d]", ColumnNumber); return SQL_ERROR; } + if (BufferLength<0) { + SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } TAOS_FIELD *field = fields + ColumnNumber-1; void *row = sql->row[ColumnNumber-1]; if (!row) { - if (StrLen_or_Ind) { - *StrLen_or_Ind = SQL_NULL_DATA; + if (!StrLen_or_Ind) { + SET_ERROR(sql, "22002", TSDB_CODE_ODBC_BAD_ARG, "NULL StrLen_or_Ind not allowed for col [%d]", ColumnNumber); + return SQL_ERROR; } + *StrLen_or_Ind = SQL_NULL_DATA; return SQL_SUCCESS; } @@ -878,89 +1078,49 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, target.soi = StrLen_or_Ind; switch (field->type) { - case TSDB_DATA_TYPE_BOOL: { - int8_t v = *(int8_t*)row; - if (v) v = 1; - switch (target.ct) { - case SQL_C_BIT: return conv_tsdb_bool_to_c_bit(sql, &target, field, v); - case SQL_C_TINYINT: return conv_tsdb_bool_to_c_tinyint(sql, &target, field, v); - case SQL_C_SHORT: return conv_tsdb_bool_to_c_short(sql, &target, field, v); - case SQL_C_LONG: return conv_tsdb_bool_to_c_long(sql, &target, field, v); - case SQL_C_SBIGINT: return conv_tsdb_bool_to_c_sbigint(sql, &target, field, v); - case SQL_C_FLOAT: return conv_tsdb_bool_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_bool_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_bool_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_bool_to_c_binary(sql, &target, field, v); - default: { - SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, - "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", - taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); - return SQL_ERROR; - } - } - } break; - case TSDB_DATA_TYPE_TINYINT: { - int8_t v = *(int8_t*)row; - switch (target.ct) { - case SQL_C_TINYINT: return conv_tsdb_v1_to_c_tinyint(sql, &target, field, v); - case SQL_C_SHORT: return conv_tsdb_v1_to_c_short(sql, &target, field, v); - case SQL_C_LONG: return conv_tsdb_v1_to_c_long(sql, &target, field, v); - case SQL_C_SBIGINT: return conv_tsdb_v1_to_c_sbigint(sql, &target, field, v); - case SQL_C_FLOAT: return conv_tsdb_v1_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_v1_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_v1_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_v1_to_c_binary(sql, &target, field, v); - default: { - SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, - "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", - taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); - return SQL_ERROR; - } - } - } break; - case TSDB_DATA_TYPE_SMALLINT: { - int16_t v = *(int16_t*)row; - switch (target.ct) { - case SQL_C_SHORT: return conv_tsdb_v2_to_c_short(sql, &target, field, v); - case SQL_C_LONG: return conv_tsdb_v2_to_c_long(sql, &target, field, v); - case SQL_C_SBIGINT: return conv_tsdb_v2_to_c_sbigint(sql, &target, field, v); - case SQL_C_FLOAT: return conv_tsdb_v2_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_v2_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_v2_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_v2_to_c_binary(sql, &target, field, v); - default: { - SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, - "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", - taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); - return SQL_ERROR; - } - } - } break; - case TSDB_DATA_TYPE_INT: { - int32_t v = *(int32_t*)row; - switch (target.ct) { - case SQL_C_LONG: return conv_tsdb_v4_to_c_long(sql, &target, field, v); - case SQL_C_SBIGINT: return conv_tsdb_v4_to_c_sbigint(sql, &target, field, v); - case SQL_C_FLOAT: return conv_tsdb_v4_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_v4_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_v4_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_v4_to_c_binary(sql, &target, field, v); - default: { - SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, - "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", - taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); - return SQL_ERROR; - } - } - } break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_BIGINT: { - int64_t v = *(int64_t*)row; + int64_t v; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: v = *(int8_t*)row; if (v) v = 1; break; + case TSDB_DATA_TYPE_TINYINT: v = *(int8_t*)row; break; + case TSDB_DATA_TYPE_SMALLINT: v = *(int16_t*)row; break; + case TSDB_DATA_TYPE_INT: v = *(int32_t*)row; break; + case TSDB_DATA_TYPE_BIGINT: // fall through + default: v = *(int64_t*)row; break; + } switch (target.ct) { - case SQL_C_SBIGINT: return conv_tsdb_v8_to_c_sbigint(sql, &target, field, v); - case SQL_C_FLOAT: return conv_tsdb_v8_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_v8_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_v8_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_v8_to_c_binary(sql, &target, field, v); + case SQL_C_BIT: { + CHK_CONV(0, tsdb_int64_to_bit(v, TargetValue)); + } break; + case SQL_C_TINYINT: { + CHK_CONV(0, tsdb_int64_to_tinyint(v, TargetValue)); + } break; + case SQL_C_SHORT: { + CHK_CONV(0, tsdb_int64_to_smallint(v, TargetValue)); + } break; + case SQL_C_LONG: { + CHK_CONV(0, tsdb_int64_to_int(v, TargetValue)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(0, tsdb_int64_to_bigint(v, TargetValue)); + } break; + case SQL_C_FLOAT: { + CHK_CONV(0, tsdb_int64_to_float(v, TargetValue)); + } break; + case SQL_C_DOUBLE: { + CHK_CONV(0, tsdb_int64_to_double(v, TargetValue)); + } break; + case SQL_C_CHAR: { + tsdb_conv_t *utf8_to_client = tsdb_conn_utf8_to_client(conn); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write_int64(utf8_to_client, v, (char*)TargetValue, &len); + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)len; + CHK_CONV(0, code); + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -972,10 +1132,21 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, case TSDB_DATA_TYPE_FLOAT: { float v = *(float*)row; switch (target.ct) { - case SQL_C_FLOAT: return conv_tsdb_f4_to_c_float(sql, &target, field, v); - case SQL_C_DOUBLE: return conv_tsdb_f4_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_f4_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_f4_to_c_binary(sql, &target, field, v); + case SQL_C_FLOAT: { + *(float*)TargetValue = v; + return SQL_SUCCESS; + } break; + case SQL_C_DOUBLE: { + *(double*)TargetValue = v; + return SQL_SUCCESS; + } break; + case SQL_C_CHAR: { + tsdb_conv_t *utf8_to_client = tsdb_conn_utf8_to_client(conn); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write_double(utf8_to_client, v, (char*)TargetValue, &len); + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)len; + CHK_CONV(0, code); + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -987,9 +1158,17 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, case TSDB_DATA_TYPE_DOUBLE: { double v = *(double*)row; switch (target.ct) { - case SQL_C_DOUBLE: return conv_tsdb_f8_to_c_double(sql, &target, field, v); - case SQL_C_CHAR: return conv_tsdb_f8_to_c_char(sql, &target, field, v); - case SQL_C_BINARY: return conv_tsdb_f8_to_c_binary(sql, &target, field, v); + case SQL_C_DOUBLE: { + *(double*)TargetValue = v; + return SQL_SUCCESS; + } break; + case SQL_C_CHAR: { + tsdb_conv_t *utf8_to_client = tsdb_conn_utf8_to_client(conn); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write_double(utf8_to_client, v, (char*)TargetValue, &len); + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)len; + CHK_CONV(0, code); + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -1002,21 +1181,31 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, SQL_TIMESTAMP_STRUCT ts = {0}; int64_t v = *(int64_t*)row; time_t t = v/1000; - struct tm tm = {0}; - localtime_r(&t, &tm); - ts.year = tm.tm_year + 1900; - ts.month = tm.tm_mon + 1; - ts.day = tm.tm_mday; - ts.hour = tm.tm_hour; - ts.minute = tm.tm_min; - ts.second = tm.tm_sec; - ts.fraction = v%1000 * 1000000; + struct tm vtm = {0}; + localtime_r(&t, &vtm); + ts.year = (SQLSMALLINT)(vtm.tm_year + 1900); + ts.month = (SQLUSMALLINT)(vtm.tm_mon + 1); + ts.day = (SQLUSMALLINT)(vtm.tm_mday); + ts.hour = (SQLUSMALLINT)(vtm.tm_hour); + ts.minute = (SQLUSMALLINT)(vtm.tm_min); + ts.second = (SQLUSMALLINT)(vtm.tm_sec); + ts.fraction = (SQLUINTEGER)(v%1000 * 1000000); switch (target.ct) { - case SQL_C_SBIGINT: return conv_tsdb_ts_to_c_v8(sql, &target, field, &ts); - case SQL_C_CHAR: return conv_tsdb_ts_to_c_str(sql, &target, field, &ts); - case SQL_C_BINARY: return conv_tsdb_ts_to_c_bin(sql, &target, field, &ts); - case SQL_C_TYPE_TIMESTAMP: - case SQL_C_TIMESTAMP: return conv_tsdb_ts_to_c_ts(sql, &target, field, &ts); + case SQL_C_SBIGINT: { + *(int64_t*)TargetValue = v; + return SQL_SUCCESS; + } break; + case SQL_C_CHAR: { + tsdb_conv_t *utf8_to_client = tsdb_conn_utf8_to_client(conn); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write_timestamp(utf8_to_client, ts, (char*)TargetValue, &len); + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)len; + CHK_CONV(0, code); + } break; + case SQL_C_TYPE_TIMESTAMP: { + *(SQL_TIMESTAMP_STRUCT*)TargetValue = ts; + return SQL_SUCCESS; + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -1026,10 +1215,34 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, } } break; case TSDB_DATA_TYPE_BINARY: { - const unsigned char *bin = (const unsigned char *)row; + size_t field_bytes = (size_t)field->bytes; + field_bytes -= VARSTR_HEADER_SIZE; switch (target.ct) { - case SQL_C_CHAR: return conv_tsdb_bin_to_c_str(sql, &target, field, bin); - case SQL_C_BINARY: return conv_tsdb_bin_to_c_bin(sql, &target, field, bin); + case SQL_C_CHAR: { + // taos cares nothing about what would be stored in 'binary' as most sql implementations do + // but the client requires to fetch it as a SQL_C_CHAR + // thus, we first try to decode binary to client charset + // if failed, we then do hex-serialization + + tsdb_conv_t *server_to_client = tsdb_conn_server_to_client(conn); + size_t slen = strnlen((const char*)row, field_bytes); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write(server_to_client, + (const char*)row, &slen, + (char*)TargetValue, &len); + if (code==TSDB_CONV_OK) { + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)((size_t)BufferLength - len); + CHK_CONV(0, code); + // code never reached here + } + + // todo: hex-serialization + const char *bad = ""; + int n = snprintf((char*)TargetValue, (size_t)BufferLength, "%s", bad); + // what if n < 0 ? + if (StrLen_or_Ind) *StrLen_or_Ind = n; + CHK_CONV(0, n>=BufferLength ? TSDB_CONV_TRUNC : TSDB_CONV_OK); + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -1039,17 +1252,19 @@ static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, } } break; case TSDB_DATA_TYPE_NCHAR: { - const char *str = (const char *)row; + size_t field_bytes = (size_t)field->bytes; + field_bytes -= VARSTR_HEADER_SIZE; switch (target.ct) { - case SQL_C_BIT: return conv_tsdb_str_to_c_bit(sql, &target, field, str); - case SQL_C_TINYINT: return conv_tsdb_str_to_c_v1(sql, &target, field, str); - case SQL_C_SHORT: return conv_tsdb_str_to_c_v2(sql, &target, field, str); - case SQL_C_LONG: return conv_tsdb_str_to_c_v4(sql, &target, field, str); - case SQL_C_SBIGINT: return conv_tsdb_str_to_c_v8(sql, &target, field, str); - case SQL_C_FLOAT: return conv_tsdb_str_to_c_f4(sql, &target, field, str); - case SQL_C_DOUBLE: return conv_tsdb_str_to_c_f8(sql, &target, field, str); - case SQL_C_CHAR: return conv_tsdb_str_to_c_str(sql, &target, field, str); - case SQL_C_BINARY: return conv_tsdb_str_to_c_bin(sql, &target, field, str); + case SQL_C_CHAR: { + tsdb_conv_t *server_to_client = tsdb_conn_server_to_client(conn); + size_t slen = strnlen((const char*)row, field_bytes); + size_t len = (size_t)BufferLength; + TSDB_CONV_CODE code = tsdb_conv_write(server_to_client, + (const char*)row, &slen, + (char*)TargetValue, &len); + if (StrLen_or_Ind) *StrLen_or_Ind = (SQLLEN)((size_t)BufferLength - len); + CHK_CONV(0, code); + } break; default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", @@ -1083,7 +1298,7 @@ SQLRETURN SQL_API SQLGetData(SQLHSTMT StatementHandle, static SQLRETURN doSQLFetch(SQLHSTMT StatementHandle) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -1107,12 +1322,18 @@ SQLRETURN SQL_API SQLFetch(SQLHSTMT StatementHandle) static SQLRETURN doSQLPrepare(SQLHSTMT StatementHandle, SQLCHAR *StatementText, SQLINTEGER TextLength) { + stack_buffer_t buffer; buffer.next = 0; + sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); + conn_t *conn = sql->conn; + + NORM_STR_LENGTH(sql, StatementText, TextLength); + if (sql->rs) { taos_free_result(sql->rs); sql->rs = NULL; @@ -1138,9 +1359,17 @@ static SQLRETURN doSQLPrepare(SQLHSTMT StatementHandle, break; } + tsdb_conv_t *client_to_server = tsdb_conn_client_to_server(conn); + const char *stxt = NULL; int ok = 0; do { - int r = taos_stmt_prepare(sql->stmt, (const char *)StatementText, TextLength); + tsdb_conv(client_to_server, &buffer, (const char*)StatementText, (size_t)TextLength, &stxt, NULL); + if ((!stxt)) { + SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + + int r = taos_stmt_prepare(sql->stmt, stxt, (unsigned long)strlen(stxt)); if (r) { SET_ERROR(sql, "HY000", r, "failed to prepare a TAOS statement"); break; @@ -1164,7 +1393,7 @@ static SQLRETURN doSQLPrepare(SQLHSTMT StatementHandle, DASSERT(params>=0); if (params>0) { - param_bind_t *ar = (param_bind_t*)calloc(1, params * sizeof(*ar)); + param_bind_t *ar = (param_bind_t*)calloc(1, ((size_t)params) * sizeof(*ar)); if (!ar) { SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); break; @@ -1177,6 +1406,8 @@ static SQLRETURN doSQLPrepare(SQLHSTMT StatementHandle, ok = 1; } while (0); + tsdb_conv_free(client_to_server, stxt, &buffer, (const char*)StatementText); + if (!ok) { taos_stmt_close(sql->stmt); sql->stmt = NULL; @@ -1206,57 +1437,86 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "parameter [@%d] not bound yet", idx+1); return SQL_ERROR; } + if (param->ParameterValue==NULL) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "ParameterValue [@%p] not allowed", param->ParameterValue); + return SQL_ERROR; + } + if (param->StrLen_or_Ind==NULL) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "StrLen_or_Ind [@%p] not allowed", param->StrLen_or_Ind); + return SQL_ERROR; + } - SQLPOINTER paramValue = param->ParameterValue; - SQLSMALLINT valueType = param->ValueType; - SQLLEN *soi = param->StrLen_or_Ind; + conn_t *conn = sql->conn; - size_t offset = idx_row * sql->rowlen + sql->ptr_offset; + unsigned char *paramValue = param->ParameterValue; + SQLSMALLINT valueType = param->ValueType; + SQLLEN *soi = param->StrLen_or_Ind; - if (paramValue) paramValue += offset; - if (soi) soi = (SQLLEN*)((char*)soi + offset); + size_t offset = ((size_t)idx_row) * sql->rowlen + sql->ptr_offset; + paramValue += offset; + soi = (SQLLEN*)((char*)soi + offset); - if (soi && *soi == SQL_NULL_DATA) { + + if (*soi == SQL_NULL_DATA) { bind->is_null = (int*)&yes; return SQL_SUCCESS; } bind->is_null = (int*)&no; - int type = 0; - int bytes = 0; + int tsdb_type = 0; // taos internal data tsdb_type to be bound to + int tsdb_bytes = 0; // we don't rely much on 'tsdb_bytes' here, we delay until taos to check it internally if (sql->is_insert) { - int r = taos_stmt_get_param(sql->stmt, idx, &type, &bytes); + int r = taos_stmt_get_param(sql->stmt, idx, &tsdb_type, &tsdb_bytes); if (r) { SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_OUT_OF_RANGE, "parameter [@%d] not valid", idx+1); return SQL_ERROR; } } else { + // we don't have correspondent data type from taos api + // we have to give a good guess here switch (valueType) { + case SQL_C_BIT: { + tsdb_type = TSDB_DATA_TYPE_BOOL; + } break; + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + tsdb_type = TSDB_DATA_TYPE_TINYINT; + } break; + case SQL_C_SSHORT: + case SQL_C_SHORT: { + tsdb_type = TSDB_DATA_TYPE_SMALLINT; + } break; + case SQL_C_SLONG: case SQL_C_LONG: { - type = TSDB_DATA_TYPE_INT; + tsdb_type = TSDB_DATA_TYPE_INT; + } break; + case SQL_C_SBIGINT: { + tsdb_type = TSDB_DATA_TYPE_BIGINT; + } break; + case SQL_C_FLOAT: { + tsdb_type = TSDB_DATA_TYPE_FLOAT; + } break; + case SQL_C_DOUBLE: { + tsdb_type = TSDB_DATA_TYPE_DOUBLE; + } break; + case SQL_C_TIMESTAMP: { + tsdb_type = TSDB_DATA_TYPE_TIMESTAMP; + } break; + case SQL_C_CHAR: { + tsdb_type = TSDB_DATA_TYPE_BINARY; + tsdb_bytes = SQL_NTS; } break; case SQL_C_WCHAR: { - type = TSDB_DATA_TYPE_NCHAR; - bytes = SQL_NTS; + tsdb_type = TSDB_DATA_TYPE_NCHAR; + tsdb_bytes = SQL_NTS; } break; - case SQL_C_CHAR: - case SQL_C_SHORT: - case SQL_C_SSHORT: case SQL_C_USHORT: - case SQL_C_SLONG: case SQL_C_ULONG: - case SQL_C_FLOAT: - case SQL_C_DOUBLE: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: case SQL_C_TIME: - case SQL_C_TIMESTAMP: case SQL_C_TYPE_DATE: case SQL_C_TYPE_TIME: case SQL_C_TYPE_TIMESTAMP: @@ -1273,37 +1533,59 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin } // ref: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/converting-data-from-c-to-sql-data-types?view=sql-server-ver15 - switch (type) { + switch (tsdb_type) { case TSDB_DATA_TYPE_BOOL: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.b); bind->buffer = &bind->u.b; bind->length = &bind->buffer_length; switch (valueType) { - case SQL_C_LONG: { - CHK_CONV(tsdb_int64_to_bit(*(int32_t*)paramValue, &bind->u.b)); - } break; case SQL_C_BIT: { - CHK_CONV(tsdb_int64_to_bit(*(int8_t*)paramValue, &bind->u.b)); + CHK_CONV(1, tsdb_int64_to_bit(*(int8_t*)paramValue, &bind->u.b)); } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SHORT: - case SQL_C_SSHORT: - case SQL_C_USHORT: - case SQL_C_SLONG: - case SQL_C_ULONG: - case SQL_C_FLOAT: - case SQL_C_DOUBLE: case SQL_C_TINYINT: - case SQL_C_STINYINT: - case SQL_C_UTINYINT: - case SQL_C_SBIGINT: - case SQL_C_UBIGINT: - case SQL_C_BINARY: - case SQL_C_DATE: - case SQL_C_TIME: - case SQL_C_TIMESTAMP: + case SQL_C_STINYINT: { + CHK_CONV(1, tsdb_int64_to_bit(*(int8_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_SHORT: + case SQL_C_SSHORT: { + CHK_CONV(1, tsdb_int64_to_bit(*(int16_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_LONG: + case SQL_C_SLONG: { + CHK_CONV(1, tsdb_int64_to_bit(*(int32_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_bit(*(int64_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_FLOAT: { + CHK_CONV(1, tsdb_double_to_bit(*(float*)paramValue, &bind->u.b)); + } break; + case SQL_C_DOUBLE: { + CHK_CONV(1, tsdb_double_to_bit(*(double*)paramValue, &bind->u.b)); + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_bit(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.b)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_bit(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.b)); + } break; + case SQL_C_USHORT: + case SQL_C_ULONG: + case SQL_C_UTINYINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: case SQL_C_TYPE_DATE: case SQL_C_TYPE_TIME: case SQL_C_TYPE_TIMESTAMP: @@ -1313,39 +1595,54 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_TINYINT: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.v1); bind->buffer = &bind->u.v1; bind->length = &bind->buffer_length; switch (valueType) { + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_tinyint(*(int8_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_STINYINT: case SQL_C_TINYINT: { - CHK_CONV(tsdb_int64_to_tinyint(*(int8_t*)paramValue, &bind->u.v1)); + CHK_CONV(1, tsdb_int64_to_tinyint(*(int8_t*)paramValue, &bind->u.v1)); } break; + case SQL_C_SSHORT: case SQL_C_SHORT: { - CHK_CONV(tsdb_int64_to_tinyint(*(int16_t*)paramValue, &bind->u.v1)); + CHK_CONV(1, tsdb_int64_to_tinyint(*(int16_t*)paramValue, &bind->u.v1)); } break; + case SQL_C_SLONG: case SQL_C_LONG: { - CHK_CONV(tsdb_int64_to_tinyint(*(int32_t*)paramValue, &bind->u.v1)); + CHK_CONV(1, tsdb_int64_to_tinyint(*(int32_t*)paramValue, &bind->u.v1)); } break; case SQL_C_SBIGINT: { - CHK_CONV(tsdb_int64_to_tinyint(*(int64_t*)paramValue, &bind->u.v1)); + CHK_CONV(1, tsdb_int64_to_tinyint(*(int64_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_tinyint(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v1)); + // CHK_CONV(1, tsdb_chars_to_tinyint((const char *)paramValue, (size_t)*soi, &bind->u.v1)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_tinyint(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v1)); } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SSHORT: case SQL_C_USHORT: - case SQL_C_SLONG: case SQL_C_ULONG: case SQL_C_FLOAT: case SQL_C_DOUBLE: - case SQL_C_BIT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: case SQL_C_UBIGINT: case SQL_C_BINARY: @@ -1361,36 +1658,55 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_SMALLINT: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.v2); bind->buffer = &bind->u.v2; bind->length = &bind->buffer_length; switch (valueType) { - case SQL_C_LONG: { - CHK_CONV(tsdb_int64_to_smallint(*(int32_t*)paramValue, &bind->u.v2)); + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_smallint(*(int8_t*)paramValue, &bind->u.v2)); } break; - case SQL_C_SHORT: { - CHK_CONV(tsdb_int64_to_smallint(*(int16_t*)paramValue, &bind->u.v2)); + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + CHK_CONV(1, tsdb_int64_to_smallint(*(int8_t*)paramValue, &bind->u.v2)); } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: case SQL_C_SSHORT: - case SQL_C_USHORT: + case SQL_C_SHORT: { + CHK_CONV(1, tsdb_int64_to_smallint(*(int16_t*)paramValue, &bind->u.v2)); + } break; case SQL_C_SLONG: + case SQL_C_LONG: { + CHK_CONV(1, tsdb_int64_to_smallint(*(int32_t*)paramValue, &bind->u.v2)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_smallint(*(int64_t*)paramValue, &bind->u.v2)); + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_smallint(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v2)); + // CHK_CONV(1, tsdb_chars_to_smallint((const char*)paramValue, (size_t)*soi, &bind->u.v2)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_smallint(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v2)); + } break; + case SQL_C_USHORT: case SQL_C_ULONG: case SQL_C_FLOAT: case SQL_C_DOUBLE: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: @@ -1405,34 +1721,55 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_INT: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.v4); bind->buffer = &bind->u.v4; bind->length = &bind->buffer_length; switch (valueType) { - case SQL_C_LONG: { - CHK_CONV(tsdb_int64_to_int(*(int32_t*)paramValue, &bind->u.v4)); + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_int(*(int8_t*)paramValue, &bind->u.v4)); + } break; + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + CHK_CONV(1, tsdb_int64_to_int(*(int8_t*)paramValue, &bind->u.v4)); } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SHORT: case SQL_C_SSHORT: - case SQL_C_USHORT: + case SQL_C_SHORT: { + CHK_CONV(1, tsdb_int64_to_int(*(int16_t*)paramValue, &bind->u.v4)); + } break; case SQL_C_SLONG: + case SQL_C_LONG: { + CHK_CONV(1, tsdb_int64_to_int(*(int32_t*)paramValue, &bind->u.v4)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_int(*(int64_t*)paramValue, &bind->u.v4)); + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_int(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v4)); + // CHK_CONV(1, tsdb_chars_to_int((const char*)paramValue, (size_t)*soi, &bind->u.v4)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_int(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v4)); + } break; + case SQL_C_USHORT: case SQL_C_ULONG: case SQL_C_FLOAT: case SQL_C_DOUBLE: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: @@ -1447,35 +1784,54 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_BIGINT: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.v8); bind->buffer = &bind->u.v8; bind->length = &bind->buffer_length; switch (valueType) { - case SQL_C_SBIGINT: { - bind->u.v8 = *(int64_t*)paramValue; + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_bigint(*(int8_t*)paramValue, &bind->u.v8)); } break; - case SQL_C_LONG: { - bind->u.v8 = *(int32_t*)paramValue; + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + CHK_CONV(1, tsdb_int64_to_bigint(*(int8_t*)paramValue, &bind->u.v8)); } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SHORT: case SQL_C_SSHORT: - case SQL_C_USHORT: + case SQL_C_SHORT: { + CHK_CONV(1, tsdb_int64_to_bigint(*(int16_t*)paramValue, &bind->u.v8)); + } break; case SQL_C_SLONG: + case SQL_C_LONG: { + CHK_CONV(1, tsdb_int64_to_bigint(*(int32_t*)paramValue, &bind->u.v8)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_bigint(*(int64_t*)paramValue, &bind->u.v8)); + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_bigint(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v8)); + // CHK_CONV(1, tsdb_chars_to_bigint((const char*)paramValue, (size_t)*soi, &bind->u.v8)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_bigint(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v8)); + } break; + case SQL_C_USHORT: case SQL_C_ULONG: case SQL_C_FLOAT: case SQL_C_DOUBLE: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: case SQL_C_UBIGINT: case SQL_C_BINARY: @@ -1491,36 +1847,58 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_FLOAT: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.f4); bind->buffer = &bind->u.f4; bind->length = &bind->buffer_length; switch (valueType) { - case SQL_C_DOUBLE: { - bind->u.f4 = *(double*)paramValue; + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_float(*(int8_t*)paramValue, &bind->u.f4)); + } break; + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + CHK_CONV(1, tsdb_int64_to_float(*(int8_t*)paramValue, &bind->u.f4)); + } break; + case SQL_C_SSHORT: + case SQL_C_SHORT: { + CHK_CONV(1, tsdb_int64_to_float(*(int16_t*)paramValue, &bind->u.f4)); + } break; + case SQL_C_SLONG: + case SQL_C_LONG: { + CHK_CONV(1, tsdb_int64_to_float(*(int32_t*)paramValue, &bind->u.f4)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_float(*(int64_t*)paramValue, &bind->u.f4)); } break; case SQL_C_FLOAT: { bind->u.f4 = *(float*)paramValue; } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SHORT: - case SQL_C_SSHORT: + case SQL_C_DOUBLE: { + bind->u.f4 = (float)*(double*)paramValue; + } break; + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_float(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.f4)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_float(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.f4)); + } break; case SQL_C_USHORT: - case SQL_C_LONG: - case SQL_C_SLONG: case SQL_C_ULONG: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: @@ -1535,34 +1913,59 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_DOUBLE: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->buffer_length = sizeof(bind->u.f8); bind->buffer = &bind->u.f8; bind->length = &bind->buffer_length; switch (valueType) { + case SQL_C_BIT: { + CHK_CONV(1, tsdb_int64_to_double(*(int8_t*)paramValue, &bind->u.f8)); + } break; + case SQL_C_STINYINT: + case SQL_C_TINYINT: { + CHK_CONV(1, tsdb_int64_to_double(*(int8_t*)paramValue, &bind->u.f8)); + } break; + case SQL_C_SSHORT: + case SQL_C_SHORT: { + CHK_CONV(1, tsdb_int64_to_double(*(int16_t*)paramValue, &bind->u.f8)); + } break; + case SQL_C_SLONG: + case SQL_C_LONG: { + CHK_CONV(1, tsdb_int64_to_double(*(int32_t*)paramValue, &bind->u.f8)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(1, tsdb_int64_to_double(*(int64_t*)paramValue, &bind->u.f8)); + } break; + case SQL_C_FLOAT: { + bind->u.f8 = *(float*)paramValue; + } break; case SQL_C_DOUBLE: { bind->u.f8 = *(double*)paramValue; } break; - case SQL_C_CHAR: - case SQL_C_WCHAR: - case SQL_C_SHORT: - case SQL_C_SSHORT: + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + CHK_CONV(1, tsdb_conv_chars_to_double(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.f8)); + // CHK_CONV(1, tsdb_chars_to_double((const char*)paramValue, (size_t)*soi, &bind->u.f8)); + } break; + case SQL_C_WCHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_double(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.f8)); + } break; case SQL_C_USHORT: - case SQL_C_LONG: - case SQL_C_SLONG: case SQL_C_ULONG: - case SQL_C_FLOAT: - case SQL_C_BIT: - case SQL_C_TINYINT: - case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: @@ -1577,35 +1980,51 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; - case TSDB_DATA_TYPE_BINARY: { - bind->buffer_type = type; + case TSDB_DATA_TYPE_TIMESTAMP: { + bind->buffer_type = tsdb_type; + bind->buffer_length = sizeof(bind->u.v8); + bind->buffer = &bind->u.v8; bind->length = &bind->buffer_length; switch (valueType) { + case SQL_C_CHAR: { + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *client_to_utf8 = tsdb_conn_client_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_timestamp_ts(client_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v8)); + } break; case SQL_C_WCHAR: { - DASSERT(soi); - DASSERT(*soi != SQL_NTS); - size_t bytes = 0; - SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); - bind->allocated = 1; - bind->u.bin = utf8; - bind->buffer_length = bytes; - bind->buffer = bind->u.bin; + stack_buffer_t buffer; buffer.next = 0; + tsdb_conv_t *utf16_to_utf8 = tsdb_conn_utf16_to_utf8(conn); + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + CHK_CONV(1, tsdb_conv_chars_to_timestamp_ts(utf16_to_utf8, &buffer, (const char *)paramValue, slen, &bind->u.v8)); + } break; + case SQL_C_SBIGINT: { + int64_t t = *(int64_t*)paramValue; + bind->u.v8 = t; } break; - case SQL_C_BINARY: { - bind->u.bin = (unsigned char*)paramValue; - if (*soi == SQL_NTS) { - bind->buffer_length = strlen((const char*)paramValue); - } else { - bind->buffer_length = *soi; + case SQL_C_TYPE_TIMESTAMP: { + SQL_TIMESTAMP_STRUCT ts = *(SQL_TIMESTAMP_STRUCT*)paramValue; + struct tm vtm = {0}; + vtm.tm_year = ts.year - 1900; + vtm.tm_mon = ts.month - 1; + vtm.tm_mday = ts.day; + vtm.tm_hour = ts.hour; + vtm.tm_min = ts.minute; + vtm.tm_sec = ts.second; + int64_t t = (int64_t) mktime(&vtm); + if (t==-1) { + CHK_CONV(1, TSDB_CONV_NOT_VALID_TS); + // code never reached here } - bind->buffer = bind->u.bin; + bind->u.ts = t * 1000 + ts.fraction / 1000000; } break; - case SQL_C_CHAR: case SQL_C_SHORT: case SQL_C_SSHORT: case SQL_C_USHORT: @@ -1618,53 +2037,107 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin case SQL_C_TINYINT: case SQL_C_STINYINT: case SQL_C_UTINYINT: - case SQL_C_SBIGINT: case SQL_C_UBIGINT: + case SQL_C_BINARY: case SQL_C_DATE: case SQL_C_TIME: case SQL_C_TIMESTAMP: case SQL_C_TYPE_DATE: case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: case SQL_C_NUMERIC: case SQL_C_GUID: default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; - case TSDB_DATA_TYPE_TIMESTAMP: { - bind->buffer_type = type; - bind->buffer_length = sizeof(bind->u.v8); - bind->buffer = &bind->u.v8; + case TSDB_DATA_TYPE_BINARY: { + bind->buffer_type = tsdb_type; bind->length = &bind->buffer_length; switch (valueType) { case SQL_C_WCHAR: { - DASSERT(soi); - DASSERT(*soi != SQL_NTS); - size_t bytes = 0; - int r = 0; - int64_t t = 0; - SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); - // why cast utf8 to 'char*' ? - r = taosParseTime((char*)utf8, &t, strlen((const char*)utf8), TSDB_TIME_PRECISION_MILLI, 0); - bind->u.v8 = t; - free(utf8); - if (r) { - SET_ERROR(sql, "22007", TSDB_CODE_ODBC_OUT_OF_RANGE, - "convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d] failed", - sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); - return SQL_ERROR; + // taos cares nothing about what would be stored in 'binary' as most sql implementations do + // thus, we just copy it as is + // it's caller's responsibility to maintain data-consistency + // if he/she is going to use 'binary' to store characters + // taos might extend it's sql syntax to let user specify + // what charset is to be used for specific 'binary' field when + // table is to be created + // in such way, 'binary' would be 'internationalized' + // but actually speaking, normally, 'char' field is a better + // one for this purpose + size_t slen = (size_t)*soi; + DASSERT(slen != SQL_NTS); + bind->u.bin = (unsigned char*)malloc(slen + 1); // add null-terminator, just for case of use + if (!bind->u.bin) { + CHK_CONV(1, TSDB_CONV_OOM); + // code never reached here } + memcpy(bind->u.bin, paramValue, slen); + bind->buffer_length = slen; + bind->buffer = bind->u.bin; + CHK_CONV(1, TSDB_CONV_OK); + + // tsdb_conv_t *utf16_to_server = tsdb_conn_utf16_to_server(conn); + // size_t slen = (size_t)*soi; + // DASSERT(slen != SQL_NTS); + // const char *buf = NULL; + // size_t blen = 0; + // TSDB_CONV_CODE code = tsdb_conv(utf16_to_server, NULL, (const char *)paramValue, slen, &buf, &blen); + // if (code==TSDB_CONV_OK) { + // if (buf!=(const char*)paramValue) { + // bind->allocated = 1; + // } + // bind->u.bin = (unsigned char*)buf; + // bind->buffer_length = blen; + // bind->buffer = bind->u.bin; + // } + // CHK_CONV(1, code); } break; - case SQL_C_SBIGINT: { - int64_t t = *(int64_t*)paramValue; - bind->u.v8 = t; + case SQL_C_CHAR: { + // taos cares nothing about what would be stored in 'binary' as most sql implementations do + // thus, we just copy it as is + // it's caller's responsibility to maintain data-consistency + // if he/she is going to use 'binary' to store characters + // taos might extend it's sql syntax to let user specify + // what charset is to be used for specific 'binary' field when + // table is to be created + // in such way, 'binary' would be 'internationalized' + // but actually speaking, normally, 'char' field is a better + // one for this purpose + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + // we can not use strndup, because ODBC client might pass in a buffer without null-terminated + bind->u.bin = (unsigned char*)malloc(slen + 1); // add null-terminator, just for case of use + if (!bind->u.bin) { + CHK_CONV(1, TSDB_CONV_OOM); + // code never reached here + } + memcpy(bind->u.bin, paramValue, slen); + bind->buffer_length = slen; + bind->buffer = bind->u.bin; + CHK_CONV(1, TSDB_CONV_OK); + // code never reached here + + // tsdb_conv_t *client_to_server = tsdb_conn_client_to_server(conn); + // size_t slen = (size_t)*soi; + // if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + // const char *buf = NULL; + // size_t blen = 0; + // TSDB_CONV_CODE code = tsdb_conv(client_to_server, NULL, (const char *)paramValue, slen, &buf, &blen); + // if (code==TSDB_CONV_OK) { + // if (buf!=(const char*)paramValue) { + // bind->allocated = 1; + // } + // bind->u.bin = (unsigned char*)buf; + // bind->buffer_length = blen; + // bind->buffer = bind->u.bin; + // } + // CHK_CONV(1, code); } break; case SQL_C_SHORT: case SQL_C_SSHORT: @@ -1678,6 +2151,7 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin case SQL_C_TINYINT: case SQL_C_STINYINT: case SQL_C_UTINYINT: + case SQL_C_SBIGINT: case SQL_C_UBIGINT: case SQL_C_BINARY: case SQL_C_DATE: @@ -1685,40 +2159,55 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin case SQL_C_TIMESTAMP: case SQL_C_TYPE_DATE: case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: + case SQL_C_TYPE_TIMESTAMP: // we don't provide auto-converstion case SQL_C_NUMERIC: case SQL_C_GUID: default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } } break; case TSDB_DATA_TYPE_NCHAR: { - bind->buffer_type = type; + bind->buffer_type = tsdb_type; bind->length = &bind->buffer_length; switch (valueType) { case SQL_C_WCHAR: { - DASSERT(soi); - DASSERT(*soi != SQL_NTS); - size_t bytes = 0; - SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); - bind->allocated = 1; - bind->u.nchar = (char*)utf8; - bind->buffer_length = bytes; - bind->buffer = bind->u.nchar; + tsdb_conv_t *utf16_to_server = tsdb_conn_utf16_to_server(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + const char *buf = NULL; + size_t blen = 0; + TSDB_CONV_CODE code = tsdb_conv(utf16_to_server, NULL, (const char *)paramValue, slen, &buf, &blen); + if (code==TSDB_CONV_OK) { + if (buf!=(const char*)paramValue) { + bind->allocated = 1; + } + bind->u.nchar = (char*)buf; + bind->buffer_length = blen; + bind->buffer = bind->u.nchar; + } + CHK_CONV(1, code); } break; case SQL_C_CHAR: { - bind->u.nchar = (char*)paramValue; - if (*soi == SQL_NTS) { - bind->buffer_length = strlen((const char*)paramValue); - } else { - bind->buffer_length = *soi; + tsdb_conv_t *client_to_server = tsdb_conn_client_to_server(conn); + size_t slen = (size_t)*soi; + if (slen==SQL_NTS) slen = strlen((const char*)paramValue); + const char *buf = NULL; + size_t blen = 0; + TSDB_CONV_CODE code = tsdb_conv(client_to_server, NULL, (const char *)paramValue, slen, &buf, &blen); + if (code==TSDB_CONV_OK) { + if (buf!=(const char*)paramValue) { + bind->allocated = 1; + } + bind->u.bin = (unsigned char*)buf; + bind->buffer_length = blen; + bind->buffer = bind->u.bin; } - bind->buffer = bind->u.nchar; + CHK_CONV(1, code); } break; case SQL_C_SHORT: case SQL_C_SSHORT: @@ -1740,14 +2229,14 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin case SQL_C_TIMESTAMP: case SQL_C_TYPE_DATE: case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: + case SQL_C_TYPE_TIMESTAMP: // we don't provide auto-converstion case SQL_C_NUMERIC: case SQL_C_GUID: default: { SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } @@ -1756,7 +2245,7 @@ static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bin SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", sql_c_type(valueType), valueType, valueType, - taos_data_type(type), type, type, idx+1); + taos_data_type(tsdb_type), tsdb_type, tsdb_type, idx+1); return SQL_ERROR; } break; } @@ -1797,7 +2286,7 @@ static SQLRETURN do_execute(sql_t *sql) for (int i=0; in_rows; ++i) { TAOS_BIND *binds = NULL; if (sql->n_params>0) { - binds = (TAOS_BIND*)calloc(sql->n_params, sizeof(*binds)); + binds = (TAOS_BIND*)calloc((size_t)sql->n_params, sizeof(*binds)); if (!binds) { SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); return SQL_ERROR; @@ -1827,7 +2316,7 @@ static SQLRETURN do_execute(sql_t *sql) } sql->is_executed = 1; - if (sql->is_insert) return SQL_SUCCESS; + // if (sql->is_insert) return SQL_SUCCESS; SQLRETURN r = SQL_SUCCESS; PROFILE(sql->rs = taos_stmt_use_result(sql->stmt)); @@ -1872,8 +2361,12 @@ static SQLRETURN doSQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, SQLPOINTER DiagInfo, SQLSMALLINT BufferLength, SQLSMALLINT *StringLength) { - // if this function is not exported, isql will never call SQLGetDiagRec - return SQL_ERROR; + switch (DiagIdentifier) { + case SQL_DIAG_CLASS_ORIGIN: { + *StringLength = 0; + } break; + } + return SQL_SUCCESS; } SQLRETURN SQL_API SQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, @@ -1949,7 +2442,7 @@ static SQLRETURN doSQLBindParameter( SQLLEN *StrLen_or_Ind) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -1989,6 +2482,16 @@ static SQLRETURN doSQLBindParameter( return SQL_ERROR; } + if (ParameterValue==NULL) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "ParameterValue [@%p] not allowed", ParameterValue); + return SQL_ERROR; + } + + if (StrLen_or_Ind==NULL) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "StrLen_or_Ind [@%p] not allowed", StrLen_or_Ind); + return SQL_ERROR; + } + param_bind_t *pb = sql->params + ParameterNumber - 1; pb->ParameterNumber = ParameterNumber; @@ -2032,26 +2535,25 @@ static SQLRETURN doSQLDriverConnect( SQLUSMALLINT fDriverCompletion) { conn_t *conn = (conn_t*)hdbc; - if (!conn) return SQL_ERROR; + if (!conn) return SQL_INVALID_HANDLE; - if (fDriverCompletion!=SQL_DRIVER_NOPROMPT) { - SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "option[%d] other than SQL_DRIVER_NOPROMPT not supported yet", fDriverCompletion); + if (conn->taos) { + SET_ERROR(conn, "08002", TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still in use"); return SQL_ERROR; } - if (conn->taos) { - SET_ERROR(conn, "08002", TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still in use"); + if (fDriverCompletion!=SQL_DRIVER_NOPROMPT) { + SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "option[%d] other than SQL_DRIVER_NOPROMPT not supported yet", fDriverCompletion); return SQL_ERROR; } + NORM_STR_LENGTH(conn, szConnStrIn, cbConnStrIn); + // DSN=; UID=; PWD= const char *connStr = SDUP(szConnStrIn, cbConnStrIn); - char *serverName = NULL; - char *userName = NULL; - char *auth = NULL; - int bytes = 0; + conn_val_t val = {0}; do { if (szConnStrIn && !connStr) { @@ -2059,32 +2561,52 @@ static SQLRETURN doSQLDriverConnect( break; } - int n = sscanf((const char*)connStr, "DSN=%m[^;]; UID=%m[^;]; PWD=%m[^;] %n", &serverName, &userName, &auth, &bytes); - if (n<1) { + int n = todbc_parse_conn_string((const char *)connStr, &val); + if (n) { SET_ERROR(conn, "HY000", TSDB_CODE_ODBC_BAD_CONNSTR, "unrecognized connection string: [%s]", (const char*)szConnStrIn); break; } + char *ip = NULL; + int port = 0; + if (val.server) { + char *p = strchr(val.server, ':'); + if (p) { + ip = strndup(val.server, (size_t)(p-val.server)); + port = atoi(p+1); + } + } + + if ((val.cli_enc && strcmp(val.cli_enc, conn->client_enc)) || + (val.svr_enc && strcmp(val.svr_enc, conn->server_enc)) ) + { + tsdb_conn_close_convs(conn); + if (val.cli_enc) { + snprintf(conn->client_enc, sizeof(conn->client_enc), "%s", val.cli_enc); + } + if (val.svr_enc) { + snprintf(conn->server_enc, sizeof(conn->server_enc), "%s", val.svr_enc); + } + } // TODO: data-race // TODO: shall receive ip/port from odbc.ini - conn->taos = taos_connect("localhost", userName, auth, NULL, 0); + // shall we support non-ansi uid/pwd/db etc? + conn->taos = taos_connect(ip ? ip : "localhost", val.uid, val.pwd, val.db, (uint16_t)port); + free(ip); ip = NULL; if (!conn->taos) { SET_ERROR(conn, "HY000", terrno, "failed to connect to data source"); break; } if (szConnStrOut) { - snprintf((char*)szConnStrOut, cbConnStrOutMax, "%s", connStr); + snprintf((char*)szConnStrOut, (size_t)cbConnStrOutMax, "%s", connStr); } if (pcbConnStrOut) { *pcbConnStrOut = cbConnStrIn; } - } while (0); - if (serverName) free(serverName); - if (userName) free(userName); - if (auth) free(auth); + conn_val_reset(&val); SFRE(connStr, szConnStrIn, cbConnStrIn); @@ -2111,7 +2633,7 @@ static SQLRETURN doSQLSetConnectAttr(SQLHDBC ConnectionHandle, SQLINTEGER StringLength) { conn_t *conn = (conn_t*)ConnectionHandle; - if (!conn) return SQL_ERROR; + if (!conn) return SQL_INVALID_HANDLE; if (Attribute != SQL_ATTR_AUTOCOMMIT) { SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "Attribute other than SQL_ATTR_AUTOCOMMIT not supported yet"); @@ -2141,7 +2663,7 @@ static SQLRETURN doSQLDescribeCol(SQLHSTMT StatementHandle, SQLSMALLINT *DecimalDigits, SQLSMALLINT *Nullable) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -2158,19 +2680,21 @@ static SQLRETURN doSQLDescribeCol(SQLHSTMT StatementHandle, SET_ERROR(sql, "07009", TSDB_CODE_ODBC_OUT_OF_RANGE, "invalid column number [%d]", ColumnNumber); return SQL_ERROR; } + if (BufferLength<0) { + SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } TAOS_FIELD *field = fields + ColumnNumber - 1; if (ColumnName) { size_t n = sizeof(field->name); - if (n>BufferLength) n = BufferLength; + if (n>BufferLength) n = (size_t)BufferLength; strncpy((char*)ColumnName, field->name, n); } if (NameLength) { - *NameLength = strnlen(field->name, sizeof(field->name)); - } - if (ColumnSize) { - *ColumnSize = field->bytes; + *NameLength = (SQLSMALLINT)strnlen(field->name, sizeof(field->name)); } + if (ColumnSize) *ColumnSize = (SQLULEN)field->bytes; if (DecimalDigits) *DecimalDigits = 0; if (DataType) { @@ -2204,12 +2728,9 @@ static SQLRETURN doSQLDescribeCol(SQLHSTMT StatementHandle, } break; case TSDB_DATA_TYPE_TIMESTAMP: { - // *DataType = SQL_TIMESTAMP; - // *ColumnSize = 30; - // *DecimalDigits = 3; *DataType = SQL_TIMESTAMP; - *ColumnSize = sizeof(SQL_TIMESTAMP_STRUCT); - *DecimalDigits = 0; + if (ColumnSize) *ColumnSize = sizeof(SQL_TIMESTAMP_STRUCT); + if (DecimalDigits) *DecimalDigits = 0; } break; case TSDB_DATA_TYPE_NCHAR: { @@ -2218,7 +2739,7 @@ static SQLRETURN doSQLDescribeCol(SQLHSTMT StatementHandle, } break; case TSDB_DATA_TYPE_BINARY: { - *DataType = SQL_BINARY; + *DataType = SQL_CHAR; if (ColumnSize) *ColumnSize -= VARSTR_HEADER_SIZE; } break; @@ -2253,7 +2774,7 @@ SQLRETURN SQL_API SQLDescribeCol(SQLHSTMT StatementHandle, static SQLRETURN doSQLNumParams(SQLHSTMT hstmt, SQLSMALLINT *pcpar) { sql_t *sql = (sql_t*)hstmt; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -2281,7 +2802,7 @@ static SQLRETURN doSQLNumParams(SQLHSTMT hstmt, SQLSMALLINT *pcpar) return SQL_ERROR; } - if (pcpar) *pcpar = params; + if (pcpar) *pcpar = (SQLSMALLINT)params; return SQL_SUCCESS; } @@ -2298,7 +2819,7 @@ static SQLRETURN doSQLSetStmtAttr(SQLHSTMT StatementHandle, SQLINTEGER StringLength) { sql_t *sql = (sql_t*)StatementHandle; - if (!sql) return SQL_ERROR; + if (!sql) return SQL_INVALID_HANDLE; CHK_CONN(sql); CHK_CONN_TAOS(sql); @@ -2355,1126 +2876,296 @@ SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT StatementHandle, return r; } +#ifdef _MSC_VER + +#define POST_INSTALLER_ERROR(hwndParent, code, fmt, ...) \ +do { \ + char buf[4096]; \ + snprintf(buf, sizeof(buf), "%s[%d]%s():" fmt "", \ + basename((char*)__FILE__), __LINE__, __func__, \ + ##__VA_ARGS__); \ + SQLPostInstallerError(code, buf); \ + if (hwndParent) { \ + MessageBox(hwndParent, buf, "Error", MB_OK|MB_ICONEXCLAMATION); \ + } \ +} while (0) +typedef struct kv_s kv_t; +struct kv_s { + char *line; + size_t val; +}; +static BOOL get_driver_dll_path(HWND hwndParent, char *buf, size_t len) +{ + HMODULE hm = NULL; -static void init_routine(void) { - if (0) { - string_conv(NULL, NULL, NULL, 0, NULL, 0, NULL, NULL); - utf8_to_ucs4le(NULL, NULL); - ucs4le_to_utf8(NULL, 0, NULL); + if (GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (LPCSTR) &ConfigDSN, &hm) == 0) + { + int ret = GetLastError(); + POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_REQUEST_FAILED, "GetModuleHandle failed, error = %d\n", ret); + return FALSE; } - taos_init(); + if (GetModuleFileName(hm, buf, (DWORD)len) == 0) + { + int ret = GetLastError(); + POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_REQUEST_FAILED, "GetModuleFileName failed, error = %d\n", ret); + return FALSE; + } + return TRUE; } -static int do_field_display_size(TAOS_FIELD *field) { - switch (field->type) { - case TSDB_DATA_TYPE_TINYINT: - return 5; - break; +static BOOL doDSNAdd(HWND hwndParent, LPCSTR lpszDriver, LPCSTR lpszAttributes) +{ + BOOL r = TRUE; - case TSDB_DATA_TYPE_SMALLINT: - return 7; - break; + kv_t *kvs = NULL; - case TSDB_DATA_TYPE_INT: - return 12; - break; + kv_t dsn = {0}; + char *line = NULL; - case TSDB_DATA_TYPE_BIGINT: - return 22; - break; + do { + char driver_dll[MAX_PATH + 1]; + r = get_driver_dll_path(hwndParent, driver_dll, sizeof(driver_dll)); + if (!r) break; + + dsn.line = strdup("DSN=TAOS_DEMO"); + if (!dsn.line) { r = FALSE; break; } + + const char *p = lpszAttributes; + int ikvs = 0; + while (p && *p) { + line = strdup(p); + if (!line) { r = FALSE; break; } + char *v = strchr(line, '='); + if (!v) { r = FALSE; break; } + + if (strstr(line, "DSN")==line) { + if (dsn.line) { + free(dsn.line); + dsn.line = NULL; + dsn.val = 0; + } + dsn.line = line; + line = NULL; + } else { + kv_t *t = (kv_t*)realloc(kvs, (ikvs+1)*sizeof(*t)); + if (!t) { r = FALSE; free(line); break; } + t[ikvs].line = line; + *v = '\0'; + if (v) t[ikvs].val = v - line + 1; + line = NULL; + + kvs = t; + ++ikvs; + } - case TSDB_DATA_TYPE_FLOAT: { - return 12; - } break; + p += strlen(p) + 1; + } - case TSDB_DATA_TYPE_DOUBLE: { - return 20; - } break; + if (hwndParent) { + MessageBox(hwndParent, "Please use odbcconf to add DSN for TAOS ODBC Driver", "Warning!", MB_OK|MB_ICONEXCLAMATION); + } + if (!r) break; + + char *v = NULL; + v = strchr(dsn.line, '='); + if (!v) { r = FALSE; break; } + *v = '\0'; + dsn.val = v - dsn.line + 1; + + if ((!dsn.line)) { + if (!r) POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_REQUEST_FAILED, "lack of either DSN or Driver"); + } else { + if (r) r = SQLWritePrivateProfileString("ODBC Data Sources", dsn.line+dsn.val, lpszDriver, "Odbc.ini"); + if (r) r = SQLWritePrivateProfileString(dsn.line+dsn.val, "Driver", driver_dll, "Odbc.ini"); + } - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: { - return 3*(field->bytes - VARSTR_HEADER_SIZE) + 2; - } break; - - case TSDB_DATA_TYPE_TIMESTAMP: - return 26; - break; - - case TSDB_DATA_TYPE_BOOL: - return 7; - default: - break; - } - - return 10; -} - -// convertion from TSDB_DATA_TYPE_XXX to SQL_C_XXX -static SQLRETURN conv_tsdb_bool_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - int8_t v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - int8_t v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - int16_t v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - int32_t v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - int64_t v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - float v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - double v = b; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - DASSERT(target->len>0); - *target->soi = 1; - target->ptr[0] = '0' + b; - if (target->len>1) { - target->ptr[1] = '\0'; - } - - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bool_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) -{ - DASSERT(target->len>0); - *target->soi = 1; - target->ptr[0] = '0' + b; - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - int8_t v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - int16_t v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - int32_t v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - int64_t v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - float v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - double v = v1; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v1_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v1); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TINYINT -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_v1_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v1); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TINYINT -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_v2_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - int16_t v = v2; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v2_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - int32_t v = v2; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v2_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - int64_t v = v2; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v2_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - float v = v2; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v2_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - double v = v2; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v2_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v2); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_SMALLINT -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_v2_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v2); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_SMALLINT -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - - -static SQLRETURN conv_tsdb_v4_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - int32_t v = v4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v4_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - int64_t v = v4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - float v = v4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - double v = v4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v4); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_INTEGER -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_v4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%d", v4); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_INTEGER -> SQL_C_BINARY"); - return SQL_SUCCESS_WITH_INFO; -} - - -static SQLRETURN conv_tsdb_v8_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) -{ - int64_t v = v8; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v8_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) -{ - float v = v8; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) -{ - double v = v8; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_v8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%" PRId64 "", v8); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_BIGINT -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_v8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%" PRId64 "", v8); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_BIGINT -> SQL_C_BINARY"); - return SQL_SUCCESS_WITH_INFO; -} - - -static SQLRETURN conv_tsdb_f4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) -{ - float v = f4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_f4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) -{ - double v = f4; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_f4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%g", f4); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_FLOAT -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_f4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%g", f4); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_FLOAT -> SQL_C_BINARY"); - return SQL_SUCCESS_WITH_INFO; -} - - -static SQLRETURN conv_tsdb_f8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) -{ - double v = f8; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_f8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%.6f", f8); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_DOUBLE -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_f8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) -{ - char buf[64]; - int n = snprintf(buf, sizeof(buf), "%g", f8); - DASSERT(nsoi = n; - strncpy(target->ptr, buf, (n>target->len ? target->len : n)); - if (n<=target->len) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_DOUBLE -> SQL_C_BINARY"); - return SQL_SUCCESS_WITH_INFO; -} - - -static SQLRETURN conv_tsdb_ts_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) -{ - struct tm tm = {0}; - tm.tm_sec = ts->second; - tm.tm_min = ts->minute; - tm.tm_hour = ts->hour; - tm.tm_mday = ts->day; - tm.tm_mon = ts->month - 1; - tm.tm_year = ts->year - 1900; - time_t t = mktime(&tm); - DASSERT(sizeof(t) == sizeof(int64_t)); - int64_t v = (int64_t)t; - v *= 1000; - v += ts->fraction / 1000000; - memcpy(target->ptr, &v, sizeof(v)); - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_ts_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) -{ - struct tm tm = {0}; - tm.tm_sec = ts->second; - tm.tm_min = ts->minute; - tm.tm_hour = ts->hour; - tm.tm_mday = ts->day; - tm.tm_mon = ts->month - 1; - tm.tm_year = ts->year - 1900; - - char buf[64]; - int n = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); - DASSERT(n < sizeof(buf)); - - *target->soi = n; - - unsigned int fraction = ts->fraction; - fraction /= 1000000; - snprintf(target->ptr, target->len, "%s.%03d", buf, fraction); - if (target->soi) *target->soi = strlen((const char*)target->ptr); - - if (n <= target->len) { - return SQL_SUCCESS; - } - - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TIMESTAMP -> SQL_C_CHAR"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_ts_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) -{ - struct tm tm = {0}; - tm.tm_sec = ts->second; - tm.tm_min = ts->minute; - tm.tm_hour = ts->hour; - tm.tm_mday = ts->day; - tm.tm_mon = ts->month - 1; - tm.tm_year = ts->year - 1900; - - char buf[64]; - int n = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); - DASSERT(n < sizeof(buf)); - - unsigned int fraction = ts->fraction; - fraction /= 1000000; - snprintf(target->ptr, target->len, "%s.%03d", buf, fraction); - if (target->soi) *target->soi = strlen((const char*)target->ptr); - - if (n <= target->len) { - return SQL_SUCCESS; - } - - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TIMESTAMP -> SQL_C_BINARY"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_ts_to_c_ts(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) -{ - DASSERT(target->len == sizeof(*ts)); - memcpy(target->ptr, ts, sizeof(*ts)); - *target->soi = target->len; - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_bin_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin) -{ - if (target->len<1) { - SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); - return SQL_ERROR; - } - size_t field_bytes = field->bytes - VARSTR_HEADER_SIZE; - size_t n = strnlen((const char*)bin, field_bytes); - - if (n < target->len) { - memcpy(target->ptr, bin, n); - target->ptr[n] = '\0'; - *target->soi = n; - return SQL_SUCCESS; - } - n = target->len - 1; - *target->soi = n; - if (n > 0) { - memcpy(target->ptr, bin, n-1); - target->ptr[n-1] = '\0'; - } - SET_ERROR(sql, "01004", TSDB_CODE_ODBC_CONV_TRUNC, ""); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_bin_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin) -{ - if (target->len<1) { - SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); - return SQL_ERROR; - } - size_t field_bytes = field->bytes - VARSTR_HEADER_SIZE; - size_t n = strnlen((const char*)bin, field_bytes); - - if (n <= target->len) { - memcpy(target->ptr, bin, n); - if (nlen) target->ptr[n] = '\0'; - *target->soi = n; - return SQL_SUCCESS; - } - - n = target->len; - memcpy(target->ptr, bin, n); - *target->soi = n; - SET_ERROR(sql, "01004", TSDB_CODE_ODBC_CONV_TRUNC, ""); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_str_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - int8_t v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 1; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; - } - - char buf[64]; - snprintf(buf, sizeof(buf), "%d", v); - - if (strcmp(buf, str)==0) { - if (v==0 || v==1) return SQL_SUCCESS; - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; - } - - if (f8>0 || f8<2) { - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; - } - - if (f8<0 || f8>2) { - SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; - } - - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); - return SQL_SUCCESS_WITH_INFO; -} - -static SQLRETURN conv_tsdb_str_to_c_v1(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - int8_t v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 1; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_TINYINT"); - return SQL_SUCCESS_WITH_INFO; - } - - char buf[64]; - snprintf(buf, sizeof(buf), "%d", v); - - if (strcmp(buf, str)==0) return SQL_SUCCESS; + for (int i=0; r && iINT8_MAX || f8 SQL_C_TINYINT"); - return SQL_SUCCESS_WITH_INFO; - } + if (dsn.line) free(dsn.line); + if (line) free(line); - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_TINYINT"); - return SQL_SUCCESS_WITH_INFO; + return r; } -static SQLRETURN conv_tsdb_str_to_c_v2(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +static BOOL doDSNConfig(HWND hwndParent, LPCSTR lpszDriver, LPCSTR lpszAttributes) { - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - int16_t v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 2; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SHORT"); - return SQL_SUCCESS_WITH_INFO; - } - - char buf[64]; - snprintf(buf, sizeof(buf), "%d", v); - - if (strcmp(buf, str)==0) return SQL_SUCCESS; - - if (f8>INT16_MAX || f8 SQL_C_SHORT"); - return SQL_SUCCESS_WITH_INFO; + const char *p = lpszAttributes; + while (p && *p) { + p += strlen(p) + 1; } - - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SHORT"); - return SQL_SUCCESS_WITH_INFO; + return FALSE; } -static SQLRETURN conv_tsdb_str_to_c_v4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +static BOOL doDSNRemove(HWND hwndParent, LPCSTR lpszDriver, LPCSTR lpszAttributes) { - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - int32_t v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 4; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_LONG"); - return SQL_SUCCESS_WITH_INFO; - } - - char buf[64]; - snprintf(buf, sizeof(buf), "%d", v); - - if (strcmp(buf, str)==0) return SQL_SUCCESS; + BOOL r = TRUE; - if (f8>INT32_MAX || f8 SQL_C_LONG"); - return SQL_SUCCESS_WITH_INFO; - } - - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_LONG"); - return SQL_SUCCESS_WITH_INFO; -} + kv_t dsn = {0}; + char *line = NULL; -static SQLRETURN conv_tsdb_str_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); + do { + const char *p = lpszAttributes; + int ikvs = 0; + while (p && *p) { + line = strdup(p); + if (!line) { r = FALSE; break; } + char *v = strchr(line, '='); + if (!v) { r = FALSE; break; } + *v = '\0'; + + if (strstr(line, "DSN")==line) { + if (dsn.line) { + free(dsn.line); + dsn.line = NULL; + dsn.val = 0; + } + dsn.line = line; + dsn.val = v - line + 1; + line = NULL; + break; + } else { + free(line); + line = NULL; + } - int64_t v = f8; - memcpy(target->ptr, &v, sizeof(v)); + p += strlen(p) + 1; + } - *target->soi = 8; + if (!r) break; - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SBIGINT"); - return SQL_SUCCESS_WITH_INFO; - } + if (!dsn.line) { + POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_REQUEST_FAILED, "lack of DSN"); + r = FALSE; + break; + } - char buf[64]; - snprintf(buf, sizeof(buf), "%" PRId64 "", v); + r = SQLWritePrivateProfileString("ODBC Data Sources", dsn.line+dsn.val, NULL, "Odbc.ini"); + if (!r) break; - if (strcmp(buf, str)==0) return SQL_SUCCESS; + char buf[8192]; + r = SQLGetPrivateProfileString(dsn.line+dsn.val, NULL, "null", buf, sizeof(buf), "Odbc.ini"); + if (!r) break; - if (f8>INT64_MAX || f8 SQL_C_SBIGINT"); - return SQL_SUCCESS_WITH_INFO; - } + int n = 0; + char *s = buf; + while (s && *s && n++<10) { + SQLWritePrivateProfileString(dsn.line+dsn.val, s, NULL, "Odbc.ini"); + s += strlen(s) + 1; + } + } while (0); - SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SBIGINT"); - return SQL_SUCCESS_WITH_INFO; + if (dsn.line) free(dsn.line); + if (line) free(line); + return r; } -static SQLRETURN conv_tsdb_str_to_c_f4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +static BOOL doConfigDSN(HWND hwndParent, WORD fRequest, LPCSTR lpszDriver, LPCSTR lpszAttributes) { - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - float v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 4; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_FLOAT"); - return SQL_SUCCESS_WITH_INFO; + BOOL r = FALSE; + const char *sReq = NULL; + switch(fRequest) { + case ODBC_ADD_DSN: sReq = "ODBC_ADD_DSN"; break; + case ODBC_CONFIG_DSN: sReq = "ODBC_CONFIG_DSN"; break; + case ODBC_REMOVE_DSN: sReq = "ODBC_REMOVE_DSN"; break; + default: sReq = "UNKNOWN"; break; } - - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_str_to_c_f8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - int bytes = 0; - double f8 = 0; - int n = sscanf(str, "%lf%n", &f8, &bytes); - - float v = f8; - memcpy(target->ptr, &v, sizeof(v)); - - *target->soi = 8; - - if (n!=1 || bytes!=strlen(str)) { - SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_DOUBLE"); - return SQL_SUCCESS_WITH_INFO; - } - - return SQL_SUCCESS; -} - -static SQLRETURN conv_tsdb_str_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - return conv_tsdb_bin_to_c_str(sql, target, field, (const unsigned char*)str); -} - -static SQLRETURN conv_tsdb_str_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) -{ - return conv_tsdb_bin_to_c_bin(sql, target, field, (const unsigned char*)str); -} - - - - -const char* tsdb_int64_to_bit(int64_t src, int8_t *dst) -{ - *dst = src; - if (src==0 || src==1) return NULL; - return "22003"; -} - -const char* tsdb_int64_to_tinyint(int64_t src, int8_t *dst) -{ - *dst = src; - if (src>=SCHAR_MIN && src<=SCHAR_MAX) return NULL; - return "22003"; -} - -const char* tsdb_int64_to_smallint(int64_t src, int16_t *dst) -{ - *dst = src; - if (src>=SHRT_MIN && src<=SHRT_MAX) return NULL; - return "22003"; -} - -const char* tsdb_int64_to_int(int64_t src, int32_t *dst) -{ - *dst = src; - if (src>=LONG_MIN && src<=LONG_MAX) return NULL; - return "22003"; -} - -const char* tsdb_int64_to_bigint(int64_t src, int64_t *dst) -{ - *dst = src; - return NULL; -} - -const char* tsdb_int64_to_ts(int64_t src, int64_t *dst) -{ - *dst = src; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%" PRId64 "", src); - DASSERT(n>=0); - DASSERT(n=2) return "22003"; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); - DASSERT(n>=0); - DASSERT(nSCHAR_MAX) return "22003"; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); - DASSERT(n>=0); - DASSERT(nSHRT_MAX) return "22003"; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); - DASSERT(n>=0); - DASSERT(nLONG_MAX) return "22003"; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); - DASSERT(n>=0); - DASSERT(nLLONG_MAX) return "22003"; - - char buf[4096]; - int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); - DASSERT(n>=0); - DASSERT(n=0); - DASSERT(n=0); - DASSERT(n2>=0); - DASSERT(n1=0); - if (n>=dlen) return "22001"; - - return NULL; + POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_GENERAL_ERR, "not implemented yet"); + return FALSE; } -const char* tsdb_chars_to_bit(const char *src, int8_t *dst) +BOOL INSTAPI ConfigDriver(HWND hwndParent, WORD fRequest, LPCSTR lpszDriver, LPCSTR lpszArgs, + LPSTR lpszMsg, WORD cbMsgMax, WORD *pcbMsgOut) { - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; - - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (v==0 || v==1) return "22001"; - - return "22003"; - } - return "22018"; - } - - if (v==0 || v==1) return NULL; - - return "22003"; -} - -const char* tsdb_chars_to_tinyint(const char *src, int8_t *dst) -{ - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; - - - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (vSCHAR_MAX) return "22001"; - - return "22003"; - } - return "22018"; - } - - if (vSCHAR_MAX) return "22001"; - - return NULL; + POST_INSTALLER_ERROR(hwndParent, ODBC_ERROR_GENERAL_ERR, "not implemented yet"); + return FALSE; } -const char* tsdb_chars_to_smallint(const char *src, int16_t *dst) -{ - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; - +#endif // _MSC_VER - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (vSHRT_MAX) return "22001"; - return "22003"; - } - return "22018"; - } - - if (vSHRT_MAX) return "22001"; - - return NULL; -} - -const char* tsdb_chars_to_int(const char *src, int32_t *dst) -{ - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; - - - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (vLONG_MAX) return "22001"; - - return "22003"; - } - return "22018"; - } - - if (vLONG_MAX) return "22001"; - - return NULL; +static void init_routine(void) { + taos_init(); } -const char* tsdb_chars_to_bigint(const char *src, int64_t *dst) -{ - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; - +static size_t do_field_display_size(TAOS_FIELD *field) { + switch (field->type) { + case TSDB_DATA_TYPE_TINYINT: + return 5; + break; - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (vLLONG_MAX) return "22001"; + case TSDB_DATA_TYPE_SMALLINT: + return 7; + break; - return "22003"; - } - return "22018"; - } + case TSDB_DATA_TYPE_INT: + return 12; + break; - if (vLLONG_MAX) return "22001"; + case TSDB_DATA_TYPE_BIGINT: + return 22; + break; - return NULL; -} + case TSDB_DATA_TYPE_FLOAT: { + return 12; + } break; -const char* tsdb_chars_to_ts(const char *src, int64_t *dst) -{ - int bytes = 0; - int64_t v = 0; - int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); - if (n!=1) return "22018"; + case TSDB_DATA_TYPE_DOUBLE: { + return 20; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + return 3*((size_t)field->bytes - VARSTR_HEADER_SIZE) + 2; + } break; - if (bytes!=strlen(src)) { - if (src[bytes-1]=='.') { - if (vLLONG_MAX) return "22001"; + case TSDB_DATA_TYPE_TIMESTAMP: + return 26; + break; - return "22003"; - } - return "22018"; + case TSDB_DATA_TYPE_BOOL: + return 7; + default: + break; } - if (vLLONG_MAX) return "22001"; - - return NULL; -} - -const char* tsdb_chars_to_float(const char *src, float *dst) -{ - int bytes = 0; - int n = sscanf(src, "%f%n", dst, &bytes); - if (n!=1) return "22018"; - - if (bytes!=strlen(src)) return "22018"; - - return NULL; -} - -const char* tsdb_chars_to_double(const char *src, double *dst) -{ - int bytes = 0; - int n = sscanf(src, "%lf%n", dst, &bytes); - if (n!=1) return "22018"; - - if (bytes!=strlen(src)) return "22018"; - - return NULL; + return 10; } -const char* tsdb_chars_to_char(const char *src, char *dst, size_t dlen) -{ - int n = snprintf(dst, dlen, "%s", src); - if (n>=dlen) return "22001"; - return NULL; -} diff --git a/src/connector/odbc/src/todbc.def b/src/connector/odbc/src/todbc.def new file mode 100644 index 0000000000000000000000000000000000000000..1e080f01983ec2b38d657004291008f6da6198dc --- /dev/null +++ b/src/connector/odbc/src/todbc.def @@ -0,0 +1,31 @@ +EXPORTS +SQLAllocEnv +SQLFreeEnv +SQLAllocConnect +SQLFreeConnect +SQLConnect +SQLDisconnect +SQLAllocStmt +SQLAllocHandle +SQLFreeStmt +SQLExecDirect +SQLExecDirectW +SQLNumResultCols +SQLRowCount +SQLColAttribute +SQLGetData +SQLFetch +SQLPrepare +SQLExecute +SQLGetDiagField +SQLGetDiagRec +SQLBindParameter +SQLDriverConnect +SQLSetConnectAttr +SQLDescribeCol +SQLNumParams +SQLSetStmtAttr +ConfigDSN +ConfigTranslator +ConfigDriver + diff --git a/src/connector/odbc/src/todbc.rc.in b/src/connector/odbc/src/todbc.rc.in new file mode 100644 index 0000000000000000000000000000000000000000..cf0b21145456eb20097ed3dda1e582cee1e343d2 --- /dev/null +++ b/src/connector/odbc/src/todbc.rc.in @@ -0,0 +1,31 @@ +1 VERSIONINFO + FILEVERSION ${TD_VER_NUMBER} + PRODUCTVERSION ${TD_VER_NUMBER} + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x4L + FILETYPE 0x0L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "FileDescription", "ODBC Driver for TDengine" + VALUE "FileVersion", "${TD_VER_NUMBER}" + VALUE "InternalName", "todbc.dll(${TD_VER_CPUTYPE})" + VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data" + VALUE "OriginalFilename", "" + VALUE "ProductName", "todbc.dll(${TD_VER_CPUTYPE})" + VALUE "ProductVersion", "${TD_VER_NUMBER}" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END \ No newline at end of file diff --git a/src/connector/odbc/src/todbc.rsp b/src/connector/odbc/src/todbc.rsp new file mode 100644 index 0000000000000000000000000000000000000000..f5e511565872e51a6afa838ad6cb3d1774c2c422 --- /dev/null +++ b/src/connector/odbc/src/todbc.rsp @@ -0,0 +1,5 @@ +INSTALLDRIVER "TAOS ODBC|Driver=todbc.dll|FileUsage=0|ConnectFunctions=YYN" + + + + diff --git a/src/connector/odbc/src/todbc_conv.c b/src/connector/odbc/src/todbc_conv.c new file mode 100644 index 0000000000000000000000000000000000000000..9c0f19764c2b456c63b6d0a01402e37868a0a366 --- /dev/null +++ b/src/connector/odbc/src/todbc_conv.c @@ -0,0 +1,660 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "todbc_conv.h" + +#include "todbc_log.h" + +#include +#include +#include +#include +#include + +const char* tsdb_conv_code_str(TSDB_CONV_CODE code) { + switch (code) { + case TSDB_CONV_OK: return "TSDB_CONV_OK"; + case TSDB_CONV_NOT_AVAIL: return "TSDB_CONV_NOT_AVAIL"; + case TSDB_CONV_OOM: return "TSDB_CONV_OOM"; + case TSDB_CONV_OOR: return "TSDB_CONV_OOR"; + case TSDB_CONV_TRUNC_FRACTION: return "TSDB_CONV_TRUNC_FRACTION"; + case TSDB_CONV_TRUNC: return "TSDB_CONV_TRUNC"; + case TSDB_CONV_CHAR_NOT_NUM: return "TSDB_CONV_CHAR_NOT_NUM"; + case TSDB_CONV_CHAR_NOT_TS: return "TSDB_CONV_CHAR_NOT_TS"; + case TSDB_CONV_NOT_VALID_TS: return "TSDB_CONV_NOT_VALID_TS"; + case TSDB_CONV_GENERAL: return "TSDB_CONV_GENERAL"; + case TSDB_CONV_SRC_TOO_LARGE: return "TSDB_CONV_SRC_TOO_LARGE"; + case TSDB_CONV_SRC_BAD_SEQ: return "TSDB_CONV_SRC_BAD_SEQ"; + case TSDB_CONV_SRC_INCOMPLETE: return "TSDB_CONV_SRC_INCOMPLETE"; + case TSDB_CONV_SRC_GENERAL: return "TSDB_CONV_SRC_GENERAL"; + case TSDB_CONV_BAD_CHAR: return "TSDB_CONV_BAD_CHAR"; + default: return "UNKNOWN"; + }; +} + +// src: int +TSDB_CONV_CODE tsdb_int64_to_bit(int64_t src, int8_t *dst) { + *dst = (int8_t)src; + if (src==0 || src==1) return TSDB_CONV_OK; + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_tinyint(int64_t src, int8_t *dst) { + *dst = (int8_t)src; + if (src == *dst) return TSDB_CONV_OK; + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_smallint(int64_t src, int16_t *dst) { + *dst = (int16_t)src; + if (src == *dst) return TSDB_CONV_OK; + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_int(int64_t src, int32_t *dst) { + *dst = (int32_t)src; + if (src == *dst) return TSDB_CONV_OK; + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_bigint(int64_t src, int64_t *dst) { + *dst = src; + return TSDB_CONV_OK; +} + +TSDB_CONV_CODE tsdb_int64_to_ts(int64_t src, int64_t *dst) { + *dst = src; + + time_t t = (time_t)(src / 1000); + struct tm tm = {0}; + if (localtime_r(&t, &tm)) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_float(int64_t src, float *dst) { + *dst = (float)src; + + int64_t v = (int64_t)*dst; + if (v==src) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_double(int64_t src, double *dst) { + *dst = (double)src; + + int64_t v = (int64_t)*dst; + if (v==src) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_int64_to_char(int64_t src, char *dst, size_t dlen) { + int n = snprintf(dst, dlen, "%" PRId64 "", src); + DASSERT(n>=0); + + if (n=2) return TSDB_CONV_OOR; + if (src == *dst) return TSDB_CONV_OK; + + int64_t v = (int64_t)src; + if (v == *dst) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +TSDB_CONV_CODE tsdb_double_to_tinyint(double src, int8_t *dst) { + *dst = (int8_t)src; + + if (srcSCHAR_MAX) return TSDB_CONV_OOR; + if (src == *dst) return TSDB_CONV_OK; + + int64_t v = (int64_t)src; + if (v == *dst) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +TSDB_CONV_CODE tsdb_double_to_smallint(double src, int16_t *dst) { + *dst = (int16_t)src; + + if (srcSHRT_MAX) return TSDB_CONV_OOR; + if (src == *dst) return TSDB_CONV_OK; + + int64_t v = (int64_t)src; + if (v == *dst) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +TSDB_CONV_CODE tsdb_double_to_int(double src, int32_t *dst) { + *dst = (int32_t)src; + + if (srcLONG_MAX) return TSDB_CONV_OOR; + if (src == *dst) return TSDB_CONV_OK; + + int64_t v = (int64_t)src; + if (v == *dst) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +TSDB_CONV_CODE tsdb_double_to_bigint(double src, int64_t *dst) { + *dst = (int64_t)src; + + if (srcLLONG_MAX) return TSDB_CONV_OOR; + if (src == *dst) return TSDB_CONV_OK; + + int64_t v = (int64_t)src; + if (v == *dst) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +TSDB_CONV_CODE tsdb_double_to_ts(double src, int64_t *dst) { + TSDB_CONV_CODE code = tsdb_double_to_bigint(src, dst); + + if (code==TSDB_CONV_OK || code==TSDB_CONV_TRUNC_FRACTION) { + int64_t v = (int64_t)src; + time_t t = (time_t)(v / 1000); + struct tm tm = {0}; + if (localtime_r(&t, &tm)) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; + } + + return code; +} + +TSDB_CONV_CODE tsdb_double_to_char(double src, char *dst, size_t dlen) { + int n = snprintf(dst, dlen, "%lg", src); + DASSERT(n>=0); + + if (n=0); + if (n=19) return TSDB_CONV_TRUNC_FRACTION; + + return TSDB_CONV_TRUNC; +} + +// src: chars +TSDB_CONV_CODE tsdb_chars_to_bit(const char *src, size_t smax, int8_t *dst) { + if (strcmp(src, "0")==0) { + *dst = 0; + return TSDB_CONV_OK; + } + + if (strcmp(src, "1")==0) { + *dst = 1; + return TSDB_CONV_OK; + } + + double v; + int bytes; + int n = sscanf(src, "%lg%n", &v, &bytes); + + if (n!=1) return TSDB_CONV_CHAR_NOT_NUM; + if (bytes!=strlen(src)) return TSDB_CONV_CHAR_NOT_NUM; + + if (v<0 || v>=2) return TSDB_CONV_OOR; + + return TSDB_CONV_TRUNC_FRACTION; +} + +TSDB_CONV_CODE tsdb_chars_to_tinyint(const char *src, size_t smax, int8_t *dst) { + int64_t v; + TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v); + if (code!=TSDB_CONV_OK) return code; + + *dst = (int8_t)v; + + if (v==*dst) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_chars_to_smallint(const char *src, size_t smax, int16_t *dst) { + int64_t v; + TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v); + if (code!=TSDB_CONV_OK) return code; + + *dst = (int16_t)v; + + if (v==*dst) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_chars_to_int(const char *src, size_t smax, int32_t *dst) { + int64_t v; + TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v); + if (code!=TSDB_CONV_OK) return code; + + *dst = (int32_t)v; + + if (v==*dst) return TSDB_CONV_OK; + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_chars_to_bigint(const char *src, size_t smax, int64_t *dst) { + int bytes; + int n = sscanf(src, "%" PRId64 "%n", dst, &bytes); + + if (n!=1) return TSDB_CONV_CHAR_NOT_NUM; + if (bytes==strlen(src)) { + return TSDB_CONV_OK; + } + + double v; + n = sscanf(src, "%lg%n", &v, &bytes); + if (n!=1) return TSDB_CONV_CHAR_NOT_NUM; + if (bytes==strlen(src)) { + return TSDB_CONV_TRUNC_FRACTION; + } + + return TSDB_CONV_OK; +} + +TSDB_CONV_CODE tsdb_chars_to_ts(const char *src, size_t smax, int64_t *dst) { + int64_t v; + TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v); + if (code!=TSDB_CONV_OK) return code; + + *dst = v; + + if (v==*dst) { + time_t t = (time_t)(v / 1000); + struct tm tm = {0}; + if (localtime_r(&t, &tm)) return TSDB_CONV_OK; + } + + return TSDB_CONV_OOR; +} + +TSDB_CONV_CODE tsdb_chars_to_float(const char *src, size_t smax, float *dst) { + int bytes; + int n = sscanf(src, "%g%n", dst, &bytes); + + if (n==1 && bytes==strlen(src)) { + return TSDB_CONV_OK; + } + + return TSDB_CONV_CHAR_NOT_NUM; +} + +TSDB_CONV_CODE tsdb_chars_to_double(const char *src, size_t smax, double *dst) { + int bytes; + int n = sscanf(src, "%lg%n", dst, &bytes); + + if (n==1 && bytes==strlen(src)) { + return TSDB_CONV_OK; + } + + return TSDB_CONV_CHAR_NOT_NUM; +} + +TSDB_CONV_CODE tsdb_chars_to_timestamp(const char *src, size_t smax, SQL_TIMESTAMP_STRUCT *dst) { + int64_t v = 0; + // why cast to 'char*' ? + int r = taosParseTime((char*)src, &v, (int32_t)smax, TSDB_TIME_PRECISION_MILLI, 0); + + if (r) { + return TSDB_CONV_CHAR_NOT_TS; + } + + time_t t = v/1000; + struct tm vtm = {0}; + localtime_r(&t, &vtm); + dst->year = (SQLSMALLINT)(vtm.tm_year + 1900); + dst->month = (SQLUSMALLINT)(vtm.tm_mon + 1); + dst->day = (SQLUSMALLINT)(vtm.tm_mday); + dst->hour = (SQLUSMALLINT)(vtm.tm_hour); + dst->minute = (SQLUSMALLINT)(vtm.tm_min); + dst->second = (SQLUSMALLINT)(vtm.tm_sec); + dst->fraction = (SQLUINTEGER)(v%1000 * 1000000); + + return TSDB_CONV_OK; +} + +TSDB_CONV_CODE tsdb_chars_to_timestamp_ts(const char *src, size_t smax, int64_t *dst) { + // why cast to 'char*' ? + int r = taosParseTime((char*)src, dst, (int32_t)smax, TSDB_TIME_PRECISION_MILLI, 0); + + if (r) { + return TSDB_CONV_CHAR_NOT_TS; + } + + return TSDB_CONV_OK; +} + +TSDB_CONV_CODE tsdb_chars_to_char(const char *src, size_t smax, char *dst, size_t dmax) { + int n = snprintf(dst, dmax, "%s", src); + DASSERT(n>=0); + if (nnext + bytes; + if (next>sizeof(buffer->buf)) return NULL; + + char *p = buffer->buf + buffer->next; + buffer->next = next; + return p; +} + +int is_owned_by_stack_buffer(stack_buffer_t *buffer, const char *ptr) { + if (!buffer) return 0; + if (ptr>=buffer->buf && ptrbuf+buffer->next) return 1; + return 0; +} + + +struct tsdb_conv_s { + iconv_t cnv; + unsigned int direct:1; +}; + +static tsdb_conv_t no_conversion = {0}; +static pthread_once_t once = PTHREAD_ONCE_INIT; +static void once_init(void) { + no_conversion.cnv = (iconv_t)-1; + no_conversion.direct = 1; +} + +tsdb_conv_t* tsdb_conv_direct() { // get a non-conversion-converter + pthread_once(&once, once_init); + return &no_conversion; +} + +tsdb_conv_t* tsdb_conv_open(const char *from_enc, const char *to_enc) { + pthread_once(&once, once_init); + tsdb_conv_t *cnv = (tsdb_conv_t*)calloc(1, sizeof(*cnv)); + if (!cnv) return NULL; + if (strcmp(from_enc, to_enc)==0 && 0) { + cnv->cnv = (iconv_t)-1; + cnv->direct = 1; + return cnv; + } + cnv->cnv = iconv_open(to_enc, from_enc); + if (cnv->cnv == (iconv_t)-1) { + free(cnv); + return NULL; + } + cnv->direct = 0; + return cnv; +} + +void tsdb_conv_close(tsdb_conv_t *cnv) { + if (!cnv) return; + if (cnv == &no_conversion) return; + if (!cnv->direct) { + if (cnv->cnv != (iconv_t)-1) { + iconv_close(cnv->cnv); + } + } + cnv->cnv = (iconv_t)-1; + cnv->direct = 0; + free(cnv); +} + +TSDB_CONV_CODE tsdb_conv_write(tsdb_conv_t *cnv, const char *src, size_t *slen, char *dst, size_t *dlen) { + if (!cnv) return TSDB_CONV_NOT_AVAIL; + if (cnv->direct) { + size_t n = (*slen > *dlen) ? *dlen : *slen; + memcpy(dst, src, n); + *slen -= n; + *dlen -= n; + if (*dlen) dst[n] = '\0'; + return TSDB_CONV_OK; + } + if (!cnv->cnv) return TSDB_CONV_NOT_AVAIL; + size_t r = iconv(cnv->cnv, (char**)&src, slen, &dst, dlen); + if (r==(size_t)-1) return TSDB_CONV_BAD_CHAR; + if (*slen) return TSDB_CONV_TRUNC; + if (*dlen) *dst = '\0'; + return TSDB_CONV_OK; +} + +TSDB_CONV_CODE tsdb_conv_write_int64(tsdb_conv_t *cnv, int64_t val, char *dst, size_t *dlen) { + char utf8[64]; + int n = snprintf(utf8, sizeof(utf8), "%" PRId64 "", val); + DASSERT(n>=0); + DASSERT(n=0); + DASSERT(n=0); + DASSERT(ndirect) { + if (src[slen]=='\0') { // access violation? + *dst = src; + if (dlen) *dlen = slen; + return TSDB_CONV_OK; + } + blen = slen + 1; + } else { + blen = (slen + 1) * 4; + } + + buf = stack_buffer_alloc(buffer, blen); + if (!buf) { + buf = (char*)malloc(blen); + if (!buf) return TSDB_CONV_OOM; + } + + if (cnv->direct) { + size_t n = slen; + DASSERT(blen > n); + memcpy(buf, src, n); + buf[n] = '\0'; + *dst = buf; + if (dlen) *dlen = n; + return TSDB_CONV_OK; + } + + const char *orig_s = src; + char *orig_d = buf; + size_t orig_blen = blen; + + TSDB_CONV_CODE code; + size_t r = iconv(cnv->cnv, (char**)&src, &slen, &buf, &blen); + do { + if (r==(size_t)-1) { + switch(errno) { + case E2BIG: { + code = TSDB_CONV_SRC_TOO_LARGE; + } break; + case EILSEQ: { + code = TSDB_CONV_SRC_BAD_SEQ; + } break; + case EINVAL: { + code = TSDB_CONV_SRC_INCOMPLETE; + } break; + default: { + code = TSDB_CONV_SRC_GENERAL; + } break; + } + break; + } + if (slen) { + code = TSDB_CONV_TRUNC; + break; + } + DASSERT(blen); + *buf = '\0'; + *dst = orig_d; + if (dlen) *dlen = orig_blen - blen; + return TSDB_CONV_OK; + } while (0); + + if (orig_d!=(char*)orig_s && !is_owned_by_stack_buffer(buffer, orig_d)) free(orig_d); + return code; +} + +void tsdb_conv_free(tsdb_conv_t *cnv, const char *ptr, stack_buffer_t *buffer, const char *src) { + if (ptr!=src && !is_owned_by_stack_buffer(buffer, ptr)) free((char*)ptr); +} + diff --git a/src/connector/odbc/src/todbc_conv.h b/src/connector/odbc/src/todbc_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..2941f3e4961d38ed1e72bfd3d1184d1ea8de251b --- /dev/null +++ b/src/connector/odbc/src/todbc_conv.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _todbc_conv_h_ +#define _todbc_conv_h_ + +#include "os.h" +#include +#include + + +typedef enum { + TSDB_CONV_OK = 0, + TSDB_CONV_NOT_AVAIL, + TSDB_CONV_OOM, + TSDB_CONV_OOR, + TSDB_CONV_TRUNC_FRACTION, + TSDB_CONV_TRUNC, + TSDB_CONV_CHAR_NOT_NUM, + TSDB_CONV_CHAR_NOT_TS, + TSDB_CONV_NOT_VALID_TS, + TSDB_CONV_GENERAL, + TSDB_CONV_BAD_CHAR, + TSDB_CONV_SRC_TOO_LARGE, + TSDB_CONV_SRC_BAD_SEQ, + TSDB_CONV_SRC_INCOMPLETE, + TSDB_CONV_SRC_GENERAL, +} TSDB_CONV_CODE; + +const char* tsdb_conv_code_str(TSDB_CONV_CODE code); + +typedef struct stack_buffer_s stack_buffer_t; +struct stack_buffer_s { + char buf[1024*16]; + size_t next; +}; + +char* stack_buffer_alloc(stack_buffer_t *buffer, size_t bytes); +int is_owned_by_stack_buffer(stack_buffer_t *buffer, const char *ptr); + +typedef struct tsdb_conv_s tsdb_conv_t; +tsdb_conv_t* tsdb_conv_direct(); // get a non-conversion-converter +tsdb_conv_t* tsdb_conv_open(const char *from_enc, const char *to_enc); +void tsdb_conv_close(tsdb_conv_t *cnv); + +TSDB_CONV_CODE tsdb_conv_write(tsdb_conv_t *cnv, const char *src, size_t *slen, char *dst, size_t *dlen); +TSDB_CONV_CODE tsdb_conv_write_int64(tsdb_conv_t *cnv, int64_t val, char *dst, size_t *dlen); +TSDB_CONV_CODE tsdb_conv_write_double(tsdb_conv_t *cnv, double val, char *dst, size_t *dlen); +TSDB_CONV_CODE tsdb_conv_write_timestamp(tsdb_conv_t *cnv, SQL_TIMESTAMP_STRUCT val, char *dst, size_t *dlen); + +TSDB_CONV_CODE tsdb_conv_chars_to_bit(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_tinyint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_smallint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int16_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_int(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int32_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_bigint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_float(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, float *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_double(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, double *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_timestamp(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, SQL_TIMESTAMP_STRUCT *dst); +TSDB_CONV_CODE tsdb_conv_chars_to_timestamp_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst); +TSDB_CONV_CODE tsdb_conv(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, const char **dst, size_t *dlen); +void tsdb_conv_free(tsdb_conv_t *cnv, const char *ptr, stack_buffer_t *buffer, const char *src); + + +TSDB_CONV_CODE tsdb_int64_to_bit(int64_t src, int8_t *dst); +TSDB_CONV_CODE tsdb_int64_to_tinyint(int64_t src, int8_t *dst); +TSDB_CONV_CODE tsdb_int64_to_smallint(int64_t src, int16_t *dst); +TSDB_CONV_CODE tsdb_int64_to_int(int64_t src, int32_t *dst); +TSDB_CONV_CODE tsdb_int64_to_bigint(int64_t src, int64_t *dst); +TSDB_CONV_CODE tsdb_int64_to_ts(int64_t src, int64_t *dst); +TSDB_CONV_CODE tsdb_int64_to_float(int64_t src, float *dst); +TSDB_CONV_CODE tsdb_int64_to_double(int64_t src, double *dst); +TSDB_CONV_CODE tsdb_int64_to_char(int64_t src, char *dst, size_t dlen); + +TSDB_CONV_CODE tsdb_double_to_bit(double src, int8_t *dst); +TSDB_CONV_CODE tsdb_double_to_tinyint(double src, int8_t *dst); +TSDB_CONV_CODE tsdb_double_to_smallint(double src, int16_t *dst); +TSDB_CONV_CODE tsdb_double_to_int(double src, int32_t *dst); +TSDB_CONV_CODE tsdb_double_to_bigint(double src, int64_t *dst); +TSDB_CONV_CODE tsdb_double_to_ts(double src, int64_t *dst); +TSDB_CONV_CODE tsdb_double_to_char(double src, char *dst, size_t dlen); + +TSDB_CONV_CODE tsdb_timestamp_to_char(SQL_TIMESTAMP_STRUCT src, char *dst, size_t dlen); + +TSDB_CONV_CODE tsdb_chars_to_bit(const char *src, size_t smax, int8_t *dst); +TSDB_CONV_CODE tsdb_chars_to_tinyint(const char *src, size_t smax, int8_t *dst); +TSDB_CONV_CODE tsdb_chars_to_smallint(const char *src, size_t smax, int16_t *dst); +TSDB_CONV_CODE tsdb_chars_to_int(const char *src, size_t smax, int32_t *dst); +TSDB_CONV_CODE tsdb_chars_to_bigint(const char *src, size_t smax, int64_t *dst); +TSDB_CONV_CODE tsdb_chars_to_ts(const char *src, size_t smax, int64_t *dst); +TSDB_CONV_CODE tsdb_chars_to_float(const char *src, size_t smax, float *dst); +TSDB_CONV_CODE tsdb_chars_to_double(const char *src, size_t smax, double *dst); +TSDB_CONV_CODE tsdb_chars_to_timestamp(const char *src, size_t smax, SQL_TIMESTAMP_STRUCT *dst); +TSDB_CONV_CODE tsdb_chars_to_char(const char *src, size_t smax, char *dst, size_t dmax); + +#endif // _todbc_conv_h_ + diff --git a/src/connector/odbc/src/todbc_flex.h b/src/connector/odbc/src/todbc_flex.h new file mode 100644 index 0000000000000000000000000000000000000000..a13f1f4d2ebd8bbca73d9ff224bb3ed20ed43174 --- /dev/null +++ b/src/connector/odbc/src/todbc_flex.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TODBC_FLEX_H_ +#define _TODBC_FLEX_H_ + +typedef struct conn_val_s conn_val_t; +struct conn_val_s { + char *key; + char *dsn; + char *uid; + char *pwd; + char *db; + char *server; + char *svr_enc; + char *cli_enc; +}; + + +void conn_val_reset(conn_val_t *val); +int todbc_parse_conn_string(const char *conn, conn_val_t *val); + +#endif // _TODBC_FLEX_H_ + diff --git a/src/connector/odbc/src/todbc_log.h b/src/connector/odbc/src/todbc_log.h new file mode 100644 index 0000000000000000000000000000000000000000..391a690cccb0954736cac76af3354cc8a39754a8 --- /dev/null +++ b/src/connector/odbc/src/todbc_log.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _todbc_log_h_ +#define _todbc_log_h_ + +#include "os.h" + +#define D(fmt, ...) \ + fprintf(stderr, \ + "%s[%d]:%s() " fmt "\n", \ + basename((char*)__FILE__), __LINE__, __func__, \ + ##__VA_ARGS__) + +#define DASSERT(statement) \ +do { \ + if (statement) break; \ + D("Assertion failure: %s", #statement); \ + abort(); \ +} while (0) + +#define DASSERTX(statement, fmt, ...) \ +do { \ + if (statement) break; \ + D("Assertion failure: %s, " fmt "", #statement, ##__VA_ARGS__); \ + abort(); \ +} while (0) + +#endif // _todbc_log_h_ + diff --git a/src/connector/odbc/src/todbc_scanner.l b/src/connector/odbc/src/todbc_scanner.l new file mode 100644 index 0000000000000000000000000000000000000000..f8c6a15d92442ee7f7b9041f017b41a4ed590314 --- /dev/null +++ b/src/connector/odbc/src/todbc_scanner.l @@ -0,0 +1,165 @@ +%{ +#include "todbc_flex.h" +#include + +#ifdef _MSC_VER +#define strncasecmp _strnicmp +#define strcasecmp _stricmp +#endif + +#define PUSH_STATE(state) yy_push_state(state, yyscanner) +#define POP_STATE() yy_pop_state(yyscanner) + +#define CHG_STATE(state) \ +do { \ + yy_pop_state(yyscanner); \ + yy_push_state(state, yyscanner); \ +} while (0) + +#define TOP_STATE(top) \ +do { \ + yy_push_state(INITIAL, yyscanner); \ + top = yy_top_state(yyscanner); \ + yy_pop_state(yyscanner); \ +} while (0) + +#define UNPUT() \ +do { \ + while (yyleng) unput(yytext[yyleng-1]); \ +} while (0) + +#define set_key() \ +do { \ + free(yyextra->key); \ + yyextra->key = strdup(yytext); \ +} while (0) + +#define set_val() \ +do { \ + if (!yyextra->key) break; \ + if (strcasecmp(yyextra->key, "DSN")==0) { \ + free(yyextra->dsn); \ + yyextra->dsn = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "UID")==0) { \ + free(yyextra->uid); \ + yyextra->uid = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "PWD")==0) { \ + free(yyextra->pwd); \ + yyextra->pwd = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "DB")==0) { \ + free(yyextra->db); \ + yyextra->pwd = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "Server")==0) { \ + free(yyextra->server); \ + yyextra->server = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "SERVER_ENC")==0) { \ + free(yyextra->svr_enc); \ + yyextra->svr_enc = strdup(yytext); \ + break; \ + } \ + if (strcasecmp(yyextra->key, "CLIENT_ENC")==0) { \ + free(yyextra->cli_enc); \ + yyextra->cli_enc = strdup(yytext); \ + break; \ + } \ +} while (0) + +%} + +%option prefix="todbc_yy" +%option extra-type="conn_val_t *" +%option nounistd +%option never-interactive +%option reentrant +%option noyywrap +%option noinput nounput +%option debug verbose +%option stack +%option nodefault +%option warn +%option perf-report +%option 8bit + +%x KEY EQ BRACE1 BRACE2 VAL + +%% +<> { int state; TOP_STATE(state); + if (state == INITIAL) yyterminate(); + if (state == VAL) yyterminate(); + return -1; } +[[:space:]]+ { } +[[:alnum:]_]+ { set_key(); PUSH_STATE(KEY); } +.|\n { return -1; } + +[[:space:]]+ { } +[=] { CHG_STATE(EQ); } +.|\n { return -1; } + +[[:space:]]+ { } +[^][{}(),;?*=!@/\\\n[:space:]]+ { set_val(); CHG_STATE(VAL); } +[{] { CHG_STATE(BRACE1); } +.|\n { return -1; } + +[^{}\n]+ { set_val(); CHG_STATE(BRACE2); } +.|\n { return -1; } + +[[:space:]]+ { } +[}] { CHG_STATE(VAL); } +.|\n { return -1; } + +[;] { POP_STATE(); } +.|\n { return -1; } +%% + +int todbc_parse_conn_string(const char *conn, conn_val_t *val) { + yyscan_t arg = {0}; + yylex_init(&arg); + yyset_debug(0, arg); + yyset_extra(val, arg); + yy_scan_string(conn, arg); + int ret =yylex(arg); + yylex_destroy(arg); + if (val->key) free(val->key); val->key = NULL; + if (ret) { + conn_val_reset(val); + } + return ret ? -1 : 0; +} + +void conn_val_reset(conn_val_t *val) { + if (val->key) { + free(val->key); val->key = NULL; + } + if (val->dsn) { + free(val->dsn); val->dsn = NULL; + } + if (val->uid) { + free(val->uid); val->uid = NULL; + } + if (val->pwd) { + free(val->pwd); val->pwd = NULL; + } + if (val->db) { + free(val->db); val->db = NULL; + } + if (val->server) { + free(val->server); val->server = NULL; + } + if (val->svr_enc) { + free(val->svr_enc); val->svr_enc = NULL; + } + if (val->cli_enc) { + free(val->cli_enc); val->cli_enc = NULL; + } +} + diff --git a/src/connector/odbc/src/todbc_util.c b/src/connector/odbc/src/todbc_util.c index b6b45d8120d28da31dcc1d323893984f9b93519b..9c130b4f2f2e6d0f5d4a19e11ab323d42b800195 100644 --- a/src/connector/odbc/src/todbc_util.c +++ b/src/connector/odbc/src/todbc_util.c @@ -14,14 +14,10 @@ */ #include "todbc_util.h" - -#include "iconv.h" - -#include -#include +#include "todbc_log.h" +#include #include -#include -#include + const char* sql_sql_type(int type) { switch (type) { @@ -111,39 +107,6 @@ int is_valid_sql_sql_type(int type) { return 1; } -int string_conv(const char *fromcode, const char *tocode, - const unsigned char *src, size_t sbytes, - unsigned char *dst, size_t dbytes, - size_t *consumed, size_t *generated) -{ - if (consumed) *consumed = 0; - if (generated) *generated = 0; - - if (dbytes <= 0) return -1; - dst[0] = '\0'; - - iconv_t conv = iconv_open(tocode, fromcode); - if (!conv) return -1; - - int r = 0; - do { - char *s = (char*)src; - char *d = (char*)dst; - size_t sl = sbytes; - size_t dl = dbytes; - - r = iconv(conv, &s, &sl, &d, &dl); - *d = '\0'; - - if (consumed) *consumed = sbytes - sl; - if (generated) *generated = dbytes - dl; - - } while (0); - - iconv_close(conv); - return r; -} - int utf8_chars(const char *src) { const char *fromcode = "UTF-8"; @@ -161,78 +124,6 @@ int utf8_chars(const char *src) size_t chars = (sizeof(buf) - dlen) / 2; iconv_close(conv); - return chars; -} - -unsigned char* utf8_to_ucs4le(const char *utf8, size_t *chars) -{ - const char *tocode = "UCS-4LE"; - const char *fromcode = "UTF-8"; - - iconv_t conv = iconv_open(tocode, fromcode); - if (!conv) return NULL; - - unsigned char *ucs4le = NULL; - - do { - size_t slen = strlen(utf8); - size_t dlen = slen * 4; - - ucs4le = (unsigned char*)malloc(dlen+1); - if (!ucs4le) break; - - char *src = (char*)utf8; - char *dst = (char*)ucs4le; - size_t s = slen; - size_t d = dlen; - iconv(conv, &src, &s, &dst, &d); - dst[0] = '\0'; - - if (chars) *chars = (dlen - d) / 4; - } while (0); - - iconv_close(conv); - return ucs4le; -} - -char* ucs4le_to_utf8(const unsigned char *ucs4le, size_t slen, size_t *chars) -{ - const char *fromcode = "UCS-4LE"; - const char *tocode = "UTF-8"; - - iconv_t conv = iconv_open(tocode, fromcode); - if (!conv) return NULL; - - char *utf8 = NULL; - - do { - size_t dlen = slen; - - utf8 = (char*)malloc(dlen+1); - if (!utf8) break; - - char *dst = utf8; - char *src = (char*)ucs4le; - size_t s = slen; - size_t d = dlen; - iconv(conv, &src, &s, &dst, &d); - dst[0] = '\0'; - - if (chars) *chars = (slen - s) / 4; - } while (0); - - iconv_close(conv); - return utf8; -} - -SQLCHAR* wchars_to_chars(const SQLWCHAR *wchars, size_t chs, size_t *bytes) -{ - size_t dlen = chs * 4; - SQLCHAR *dst = (SQLCHAR*)malloc(dlen + 1); - if (!dst) return NULL; - - string_conv("UCS-2LE", "UTF-8", (const unsigned char*)wchars, chs * sizeof(*wchars), dst, dlen + 1, NULL, bytes); - - return dst; + return (int)chars; } diff --git a/src/connector/odbc/src/todbc_util.h b/src/connector/odbc/src/todbc_util.h index 43264975b4e618bd495691e59fb9df59f6664e03..ead0d7348973409c85741cc4d676e40f6f140447 100644 --- a/src/connector/odbc/src/todbc_util.h +++ b/src/connector/odbc/src/todbc_util.h @@ -16,33 +16,10 @@ #ifndef _TODBC_UTIL_H_ #define _TODBC_UTIL_H_ -#include -#include -#include -#include -#include - -#define D(fmt, ...) \ - fprintf(stderr, \ - "%s[%d]:%s() " fmt "\n", \ - basename((char*)__FILE__), __LINE__, __func__, \ - ##__VA_ARGS__) - -#define DASSERT(statement) \ -do { \ - if (statement) break; \ - D("Assertion failure: %s", #statement); \ - abort(); \ -} while (0) - -#define DASSERTX(statement, fmt, ...) \ -do { \ - if (statement) break; \ - D("Assertion failure: %s, " fmt "", #statement, ##__VA_ARGS__); \ - abort(); \ -} while (0) - +#include "os.h" +#include +#include const char* sql_sql_type(int type); const char* sql_c_type(int type); @@ -50,14 +27,7 @@ const char* sql_c_type(int type); int is_valid_sql_c_type(int type); int is_valid_sql_sql_type(int type); -int string_conv(const char *fromcode, const char *tocode, - const unsigned char *src, size_t sbytes, - unsigned char *dst, size_t dbytes, - size_t *consumed, size_t *generated); int utf8_chars(const char *src); -unsigned char* utf8_to_ucs4le(const char *utf8, size_t *chars); -char* ucs4le_to_utf8(const unsigned char *ucs4le, size_t slen, size_t *chars); -SQLCHAR* wchars_to_chars(const SQLWCHAR *wchars, size_t chs, size_t *bytes); - #endif // _TODBC_UTIL_H_ + diff --git a/src/connector/odbc/tests/CMakeLists.txt b/src/connector/odbc/tests/CMakeLists.txt index ac57a5647fce8bd036e133936284f3f4c847d8c8..1cc6acaf4bf34aa2158cc1f4fa0836d6e51f3a41 100644 --- a/src/connector/odbc/tests/CMakeLists.txt +++ b/src/connector/odbc/tests/CMakeLists.txt @@ -1,7 +1,18 @@ PROJECT(TDengine) IF (TD_LINUX) - AUX_SOURCE_DIRECTORY(. SRC) + # AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(tcodbc main.c) TARGET_LINK_LIBRARIES(tcodbc odbc) + ADD_EXECUTABLE(tconv tconv.c) +ENDIF () + +IF (TD_WINDOWS_64) + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /GL") + SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /GL") + # AUX_SOURCE_DIRECTORY(. SRC) + ADD_EXECUTABLE(tcodbc main.c) + TARGET_LINK_LIBRARIES(tcodbc odbc32 odbccp32 user32 legacy_stdio_definitions os) + ADD_EXECUTABLE(tconv tconv.c) + TARGET_LINK_LIBRARIES(tconv tutil) ENDIF () diff --git a/src/connector/odbc/tests/create_data.stmts b/src/connector/odbc/tests/create_data.stmts new file mode 100644 index 0000000000000000000000000000000000000000..549cb583d8322906b4bdaffafde8eb510cb91c90 --- /dev/null +++ b/src/connector/odbc/tests/create_data.stmts @@ -0,0 +1,12 @@ +P:drop database if exists m; +P:create database m; +P:use m; + +P:drop table if exists t; +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(3), name nchar(1)); +P:insert into t (ts, blob, name) values('2020-10-10 00:00:00', 0, 1); +P:insert into t (ts, blob, name) values('2020-10-10 00:00:00.001', 1, 2); +P:insert into t (ts, blob, name) values('2020-10-10 00:00:00.002', '你', '好'); +P:insert into t (ts, blob, name) values('2020-10-10 00:00:00.003', 'abc', 'd'); +P:select * from t; + diff --git a/src/connector/odbc/tests/main.c b/src/connector/odbc/tests/main.c index 1ac9b71369e8a526b051008b9379fdda4b6a5e77..417de00d55f64249a9194b77fecbeb458c560cc7 100644 --- a/src/connector/odbc/tests/main.c +++ b/src/connector/odbc/tests/main.c @@ -1,14 +1,38 @@ +#include "../src/todbc_log.h" + +#ifdef _MSC_VER +#include +#include +#include "os.h" +#endif #include #include +#include #include #include -#include "os.h" +#define CHK_TEST(statement) \ +do { \ + D("testing: %s", #statement); \ + int r = (statement); \ + if (r) { \ + D("testing failed: %s", #statement); \ + return 1; \ + } \ +} while (0); + +typedef struct db_column_s db_column_t; +struct db_column_s { + SQLSMALLINT nameLength; + char name[4096]; // seems enough + SQLSMALLINT dataType; + SQLULEN columnSize; + SQLSMALLINT decimalDigits; + SQLSMALLINT nullable; +}; -// static const char *dsn = "TAOS_DSN"; -// static const char *uid = "root"; -// static const char *pwd = "taosdata"; +static db_column_t *columns = NULL; typedef struct data_s data_t; struct data_s { @@ -37,7 +61,7 @@ static const char *pro_stmts[] = { // "drop database db" }; -#define CHK_RESULT(r, ht, h) \ +#define CHK_RESULT(r, ht, h, fmt, ...) \ do { \ if (r==0) break; \ SQLCHAR ss[10]; \ @@ -48,23 +72,149 @@ do { es[0] = '\0'; \ SQLRETURN ret = SQLGetDiagRec(ht, h, 1, ss, &ne, es, sizeof(es), &n); \ if (ret) break; \ - fprintf(stderr, "%s%s\n", ss, es); \ + D("[%s]%s: " fmt "", ss, es, ##__VA_ARGS__); \ } while (0) +static int open_connect(const char *dsn, const char *uid, const char *pwd, SQLHENV *pEnv, SQLHDBC *pConn) { + SQLRETURN r; + SQLHENV env = {0}; + SQLHDBC conn = {0}; + r = SQLAllocEnv(&env); + if (r!=SQL_SUCCESS) return 1; + do { + r = SQLAllocConnect(env, &conn); + CHK_RESULT(r, SQL_HANDLE_ENV, env, ""); + if (r!=SQL_SUCCESS) break; + do { + r = SQLConnect(conn, (SQLCHAR*)dsn, (SQLSMALLINT)(dsn ? strlen(dsn) : 0), + (SQLCHAR*)uid, (SQLSMALLINT)(uid ? strlen(uid) : 0), + (SQLCHAR*)pwd, (SQLSMALLINT)(pwd ? strlen(pwd) : 0)); + CHK_RESULT(r, SQL_HANDLE_DBC, conn, ""); + if (r==SQL_SUCCESS) { + *pEnv = env; + *pConn = conn; + return 0; + } + } while (0); + SQLFreeConnect(conn); + } while (0); + SQLFreeEnv(env); + + return 1; +} + +static int open_driver_connect(const char *connstr, SQLHENV *pEnv, SQLHDBC *pConn) { + SQLRETURN r; + SQLHENV env = {0}; + SQLHDBC conn = {0}; + r = SQLAllocEnv(&env); + if (r!=SQL_SUCCESS) return 1; + do { + r = SQLAllocConnect(env, &conn); + CHK_RESULT(r, SQL_HANDLE_ENV, env, ""); + if (r!=SQL_SUCCESS) break; + do { + SQLCHAR buf[4096]; + SQLSMALLINT blen = 0; + SQLHDBC ConnectionHandle = conn; + SQLHWND WindowHandle = NULL; + SQLCHAR * InConnectionString = (SQLCHAR*)connstr; + SQLSMALLINT StringLength1 = (SQLSMALLINT)(connstr ? strlen(connstr) : 0); + SQLCHAR * OutConnectionString = buf; + SQLSMALLINT BufferLength = sizeof(buf); + SQLSMALLINT * StringLength2Ptr = &blen; + SQLUSMALLINT DriverCompletion = SQL_DRIVER_NOPROMPT; + r = SQLDriverConnect(ConnectionHandle, WindowHandle, InConnectionString, + StringLength1, OutConnectionString, BufferLength, + StringLength2Ptr, DriverCompletion); + CHK_RESULT(r, SQL_HANDLE_DBC, conn, ""); + if (r==SQL_SUCCESS) { + *pEnv = env; + *pConn = conn; + return 0; + } + } while (0); + SQLFreeConnect(conn); + } while (0); + SQLFreeEnv(env); + + return 1; +} + +static SQLRETURN traverse_cols(SQLHSTMT stmt, SQLSMALLINT cols) { + SQLRETURN r = SQL_ERROR; + for (SQLSMALLINT i=0; i0) fprintf(stdout, "\n"); + return r; + } + } + if (soi==SQL_NULL_DATA) { + fprintf(stdout, "%snull", i==0?"":","); + } else { + fprintf(stdout, "%s\"%s\"", i==0?"":",", buf); + } + } + fprintf(stdout, "\n"); + } + + // r = SQLFetch(stmt); + // if (r==SQL_NO_DATA) { + // D(".........."); + // r = SQL_SUCCESS; + // break; + // } + // CHK_RESULT(r, SQL_HANDLE_STMT, stmt, ""); + // if (r) break; + // r = SQLPrepare(stmt, (SQLCHAR*)statement, strlen(statement)); + // CHK_RESULT(r, SQL_HANDLE_STMT, stmt, ""); + // if (r) break; + // r = SQLExecute(stmt); + // CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); + // if (r) break; } while (0); - fprintf(stderr, "r: [%x][%d]\n", r, r); return r; } @@ -74,158 +224,450 @@ static int do_insert(SQLHSTMT stmt, data_t data) { SQLLEN lblob; const char *statement = "insert into t values (?, ?, ?, ?, ?, ?, ?, ?, ?,?)"; - int ignored = 0; + #define ignored 0 do { - fprintf(stderr, "prepare [%s]\n", statement); - r = SQLPrepare(stmt, (SQLCHAR*)statement, strlen(statement)); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + r = SQLPrepare(stmt, (SQLCHAR*)statement, (SQLINTEGER)strlen(statement)); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 1 [%s]\n", statement); r = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_TIMESTAMP, ignored, ignored, &data.ts, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 2 [%s]\n", statement); r = SQLBindParameter(stmt, 2, SQL_PARAM_INPUT, SQL_C_BIT, SQL_BIT, ignored, ignored, &data.b, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 3 [%s]\n", statement); r = SQLBindParameter(stmt, 3, SQL_PARAM_INPUT, SQL_C_TINYINT, SQL_TINYINT, ignored, ignored, &data.v1, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 4 [%s]\n", statement); r = SQLBindParameter(stmt, 4, SQL_PARAM_INPUT, SQL_C_SHORT, SQL_SMALLINT, ignored, ignored, &data.v2, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 5 [%s]\n", statement); r = SQLBindParameter(stmt, 5, SQL_PARAM_INPUT, SQL_C_LONG, SQL_INTEGER, ignored, ignored, &data.v4, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 6 [%s]\n", statement); r = SQLBindParameter(stmt, 6, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_BIGINT, ignored, ignored, &data.v8, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 7 [%s]\n", statement); r = SQLBindParameter(stmt, 7, SQL_PARAM_INPUT, SQL_C_FLOAT, SQL_FLOAT, ignored, ignored, &data.f4, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 8 [%s]\n", statement); r = SQLBindParameter(stmt, 8, SQL_PARAM_INPUT, SQL_C_DOUBLE, SQL_DOUBLE, ignored, ignored, &data.f8, ignored, NULL); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 9 [%s]\n", statement); lbin = SQL_NTS; r = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_BINARY, SQL_VARBINARY, sizeof(data.bin)-1, ignored, &data.bin, ignored, &lbin); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "bind 10 [%s]\n", statement); lblob = SQL_NTS; r = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_VARCHAR, sizeof(data.blob)-1, ignored, &data.blob, ignored, &lblob); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; - fprintf(stderr, "execute [%s]\n", statement); r = SQLExecute(stmt); - CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "statement: %s", statement); if (r) break; // ts += 1; // v = 2; - // fprintf(stderr, "execute [%s]\n", statement); // r = SQLExecute(stmt); // if (r) break; - - fprintf(stderr, "done\n"); } while (0); - fprintf(stderr, "r: [%x][%d]\n", r, r); + + #undef ignored return r; } -int main(int argc, char *argv[]) { - if (argc < 4) return 1; - const char *dsn = argv[1]; - const char *uid = argv[2]; - const char *pwd = argv[3]; - SQLRETURN r; +static int test1(const char *dsn, const char *uid, const char *pwd) { SQLHENV env = {0}; SQLHDBC conn = {0}; - r = SQLAllocEnv(&env); - if (r!=SQL_SUCCESS) return 1; + int n = open_connect(dsn, uid, pwd, &env, &conn); + if (n) return 1; + + int ok = 0; do { - r = SQLAllocConnect(env, &conn); - CHK_RESULT(r, SQL_HANDLE_ENV, env); + SQLRETURN r = SQL_SUCCESS; + SQLHSTMT stmt = {0}; + r = SQLAllocHandle(SQL_HANDLE_STMT, conn, &stmt); if (r!=SQL_SUCCESS) break; do { - r = SQLConnect(conn, (SQLCHAR*)dsn, strlen(dsn), - (SQLCHAR*)uid, strlen(uid), - (SQLCHAR*)pwd, strlen(pwd)); - CHK_RESULT(r, SQL_HANDLE_DBC, conn); - if (r!=SQL_SUCCESS) break; + if (do_statement(stmt, "drop database if exists db")) { + break; + } + for (size_t i=0; i0 && line[n-1] == '\n') line[n-1]='\0'; + if (n>0 && line[n-1] == '\r') line[n-1]='\0'; + if (n>1 && line[n-2] == '\r') line[n-2]='\0'; + p = line; + while (isspace(*p)) ++p; + + if (*p==0) break; + + int positive = 1; + if (strncmp(p, "N:", 2)==0) { + positive = 0; + p += 2; + } else if (strncmp(p, "P:", 2)==0) { + p += 2; + } + + D("statement: [%s]", p); + r = do_statement(stmt, p); + + if (positive && r==0) break; + if (!positive && r) { r = 0; break; } + if (positive) return r; + D("expecting negative result, but got positive"); + return -1; + } while (0); + + free(line); + + if (r) break; + } + + fclose(f); + return r ? 1 : 0; +} + +int test_sqls_in_conn(SQLHENV env, SQLHDBC conn, const char *sqls) { + SQLHSTMT stmt = {0}; + CHK_TEST(create_statement(env, conn, &stmt)); + int r = test_sqls_in_stmt(env, conn, stmt, sqls); + SQLFreeHandle(SQL_HANDLE_STMT, stmt); + return r ? 1 : 0; +} + +int test_sqls(const char *dsn, const char *uid, const char *pwd, const char *connstr, const char *sqls) { + int r = 0; + SQLHENV env = {0}; + SQLHDBC conn = {0}; + if (dsn) { + CHK_TEST(open_connect(dsn, uid, pwd, &env, &conn)); + } else { + CHK_TEST(open_driver_connect(connstr, &env, &conn)); + } + if (sqls) { + r = test_sqls_in_conn(env, conn, sqls); + } + SQLDisconnect(conn); + SQLFreeConnect(conn); + SQLFreeEnv(env); + return r ? 1 : 0; +} + +void usage(const char *arg0) { + fprintf(stdout, "%s usage:\n", arg0); + fprintf(stdout, "%s [--dsn ] [--uid ] [--pwd ] [--dcs ] [--sts ]\n", arg0); + fprintf(stdout, " --dsn : DSN\n"); + fprintf(stdout, " --uid : UID\n"); + fprintf(stdout, " --pwd : PWD\n"); + fprintf(stdout, " --dcs : driver connection string\n"); + fprintf(stdout, " --sts : file where statements store\n"); +} + +int main(int argc, char *argv[]) { + // if (argc==1) { + // CHK_TEST(test_env()); + // CHK_TEST(test1("TAOS_DSN", "root", "taoxsdata")); + // D("Done!"); + // return 0; + // } + + const char *dsn = NULL; + const char *uid = NULL; + const char *pwd = NULL; + const char *dcs = NULL; // driver connection string + const char *sts = NULL; // statements file + for (size_t i=1; i=argc) { + D(" expected but got nothing"); + return 1; + } + if (dcs) { + D("--dcs has already been specified"); + return 1; + } + dsn = argv[i]; + continue; + } + if (strcmp(arg, "--uid")==0) { + ++i; + if (i>=argc) { + D(" expected but got nothing"); + return 1; + } + uid = argv[i]; + continue; + } + if (strcmp(arg, "--pwd")==0) { + ++i; + if (i>=argc) { + D(" expected but got nothing"); + return 1; + } + pwd = argv[i]; + continue; + } + if (strcmp(arg, "--dcs")==0) { + ++i; + if (i>=argc) { + D(" expected but got nothing"); + return 1; + } + if (dsn || uid || pwd) { + D("either of --dsn/--uid/--pwd has already been specified"); + return 1; + } + dcs = argv[i]; + continue; + } + if (strcmp(arg, "--sts")==0) { + ++i; + if (i>=argc) { + D(" expected but got nothing"); + return 1; + } + sts = argv[i]; + continue; + } + } + CHK_TEST(test_sqls(dsn, uid, pwd, dcs, sts)); + D("Done!"); + return 0; + + if (0) { + const char *dsn = (argc>1) ? argv[1] : NULL; + const char *uid = (argc>2) ? argv[2] : NULL; + const char *pwd = (argc>3) ? argv[3] : NULL; + const char *connstr = (argc>4) ? argv[4] : NULL; + const char *sqls = (argc>5) ? argv[5] : NULL; + + dsn = NULL; + uid = NULL; + pwd = NULL; + connstr = argv[1]; + sqls = argv[2]; + if (0) { + CHK_TEST(test_env()); + + CHK_TEST(test1(dsn, uid, pwd)); + + const char *statements[] = { + "drop database if exists m", + "create database m", + "use m", + "drop database m", + NULL + }; + CHK_TEST(test_statements(dsn, uid, pwd, statements)); + + if (connstr) + CHK_TEST(test_driver_connect(connstr)); + + if (connstr) { + SQLHENV env = {0}; + SQLHDBC conn = {0}; + CHK_TEST(open_driver_connect(connstr, &env, &conn)); + int r = tests(env, conn); + SQLDisconnect(conn); + SQLFreeConnect(conn); + SQLFreeEnv(env); + if (r) return 1; + } + } + + if ((dsn || connstr) && 1) { + CHK_TEST(test_sqls(dsn, uid, pwd, connstr, sqls)); + } + + D("Done!"); + return 0; + } +} + diff --git a/src/connector/odbc/tests/odbc.py b/src/connector/odbc/tests/odbc.py index d2de8f39c631dff15a29565a9a19582c58c43b6f..c137905775e567f6163846690886850cb77a684a 100644 --- a/src/connector/odbc/tests/odbc.py +++ b/src/connector/odbc/tests/odbc.py @@ -1,16 +1,17 @@ import pyodbc -cnxn = pyodbc.connect('DSN=TAOS_DSN;UID=root;PWD=taosdata', autocommit=True) +# cnxn = pyodbc.connect('DSN={TAOS_DSN};UID={ root };PWD={ taosdata };HOST={ localhost:6030 }', autocommit=True) +cnxn = pyodbc.connect('DSN={TAOS_DSN}; UID=root;PWD=taosdata; HOST=localhost:6030', autocommit=True) cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') #cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf-8') #cnxn.setencoding(encoding='utf-8') -cursor = cnxn.cursor() -cursor.execute("SELECT * from db.t") -row = cursor.fetchone() -while row: - print(row) - row = cursor.fetchone() -cursor.close() +#cursor = cnxn.cursor() +#cursor.execute("SELECT * from db.t") +#row = cursor.fetchone() +#while row: +# print(row) +# row = cursor.fetchone() +#cursor.close() #cursor = cnxn.cursor() #cursor.execute(""" @@ -36,32 +37,32 @@ cursor.execute("create database db"); cursor.close() cursor = cnxn.cursor() -cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))"); +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))"); cursor.close() cursor = cnxn.cursor() -cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hello', 'world')") +cursor.execute("insert into db.mt values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hello', 'world')") cursor.close() cursor = cnxn.cursor() -cursor.execute("insert into db.t values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo", "wo哈rld"); +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo", "wo哈rld"); cursor.close() cursor = cnxn.cursor() -cursor.execute("SELECT * from db.t") +cursor.execute("SELECT * from db.mt") row = cursor.fetchone() while row: print(row) row = cursor.fetchone() cursor.close() -cursor = cnxn.cursor() -cursor.execute("drop database if exists db"); -cursor.close() - -cursor = cnxn.cursor() -cursor.execute("create database db"); -cursor.close() +#cursor = cnxn.cursor() +#cursor.execute("drop database if exists db"); +#cursor.close() +# +#cursor = cnxn.cursor() +#cursor.execute("create database db"); +#cursor.close() cursor = cnxn.cursor() cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); @@ -118,3 +119,13 @@ while row: row = cursor.fetchone() cursor.close() +cursor = cnxn.cursor() +cursor.execute("create table db.f (ts timestamp, v1 float)") +cursor.close() + +params = [ ('2020-10-20 00:00:10', '123.3') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +cursor.executemany("insert into db.f values (?, ?)", params) +cursor.close() + diff --git a/src/connector/odbc/tests/query_data.stmts b/src/connector/odbc/tests/query_data.stmts new file mode 100644 index 0000000000000000000000000000000000000000..b0e9ea27ea81f53780ebf3bced8e8a5f63bf7e2b --- /dev/null +++ b/src/connector/odbc/tests/query_data.stmts @@ -0,0 +1 @@ +P:select * from m.t; diff --git a/src/connector/odbc/tests/select.stmts b/src/connector/odbc/tests/select.stmts new file mode 100644 index 0000000000000000000000000000000000000000..f7152ba6cf59ae2d0af75bba1845f6fb80150a0e --- /dev/null +++ b/src/connector/odbc/tests/select.stmts @@ -0,0 +1,4 @@ +P: select * from db.t; +P: select * from db.f; +P: select * from db.v; +P: select * from db.mt; \ No newline at end of file diff --git a/src/connector/odbc/tests/simples.stmts b/src/connector/odbc/tests/simples.stmts new file mode 100644 index 0000000000000000000000000000000000000000..0db58a8807491b9ea4be96dc7c4efc5b43488244 --- /dev/null +++ b/src/connector/odbc/tests/simples.stmts @@ -0,0 +1,44 @@ +P:drop database if exists m; +P:create database m; +P:use m; + +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(1), name nchar(1)); +P:insert into t (ts, b) values('2020-10-10 00:00:00', 0); +P:insert into t (ts, b) values('2020-10-10 00:00:00.001', 1); +P:insert into t (ts, b) values('2020-10-10 00:00:00.002', 10); +P:select * from t; + +P:drop table t; +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(1), name nchar(1)); +P:insert into t (ts, v1) values('2020-10-10 00:00:00', 0); +P:insert into t (ts, v1) values('2020-10-10 00:00:00.001', 1); +P:insert into t (ts, v1) values('2020-10-10 00:00:00.002', 10); +P:select * from t; + +P:drop table t; +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(1), name nchar(1)); +P:insert into t (ts, name) values('2020-10-10 00:00:00', 0); +P:insert into t (ts, name) values('2020-10-10 00:00:00.001', 1); +P:insert into t (ts, name) values('2020-10-10 00:00:00.002', '人'); +P:insert into t (ts, name) values('2020-10-10 00:00:00.003', 'a'); +P:select * from t; + +P:drop table t; +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(3), name nchar(1)); +P:insert into t (ts, blob) values('2020-10-10 00:00:00', 0); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.001', 1); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.002', 'a'); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.003', 'b'); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.004', '人'); +P:select * from t; + +P:drop table t; +P:create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, blob binary(3), name nchar(1)); +N:insert into t (ts, blob) values('2020-10-10 00:00:00', '1234'); +N:insert into t (ts, blob) values('2020-10-10 00:00:00.001', '0000'); +N:insert into t (ts, blob) values('2020-10-10 00:00:00.002', '人a'); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.003', 'a'); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.004', 'b'); +P:insert into t (ts, blob) values('2020-10-10 00:00:00.005', '人'); +P:select * from t; + diff --git a/src/connector/odbc/tests/tconv.c b/src/connector/odbc/tests/tconv.c new file mode 100644 index 0000000000000000000000000000000000000000..acae6421bbe52d8522f37a756d5097a3b32bb5c7 --- /dev/null +++ b/src/connector/odbc/tests/tconv.c @@ -0,0 +1,156 @@ +#include "../src/todbc_log.h" + +#ifdef _MSC_VER +#include +#include +#endif + +#include + + +#include +#include + +static void usage(const char *arg0); +static int do_conv(iconv_t cnv, FILE *fin, FILE *fout); + +int main(int argc, char *argv[]) { + const char *from_enc = "UTF-8"; + const char *to_enc = "UTF-8"; + const char *dst_file = NULL; + const char *src = NULL; +#ifdef _MSC_VER + from_enc = "CP936"; + to_enc = "CP936"; +#endif + for (int i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (strcmp(arg, "-h") == 0) { + usage(argv[0]); + return 0; + } else if (strcmp(arg, "-f") == 0 ) { + i += 1; + if (i>=argc) { + fprintf(stderr, "expecing , but got nothing\n"); + return 1; + } + from_enc = argv[i]; + continue; + } else if (strcmp(arg, "-t") == 0 ) { + i += 1; + if (i>=argc) { + fprintf(stderr, "expecing , but got nothing\n"); + return 1; + } + to_enc = argv[i]; + continue; + } else if (strcmp(arg, "-o") == 0 ) { + i += 1; + if (i>=argc) { + fprintf(stderr, "expecing , but got nothing\n"); + return 1; + } + dst_file = argv[i]; + continue; + } else if (arg[0]=='-') { + fprintf(stderr, "unknown argument: [%s]\n", arg); + return 1; + } else { + if (src) { + fprintf(stderr, "does not allow multiple files\n"); + return 1; + } + src = arg; + continue; + } + } + int r = -1; + FILE *fin = src ? fopen(src, "rb") : stdin; + FILE *fout = dst_file ? fopen(dst_file, "wb") : stdout; + iconv_t cnv = iconv_open(to_enc, from_enc); + do { + if (!fin) { + fprintf(stderr, "failed to open file [%s]\n", src); + break; + } + if (!fout) { + fprintf(stderr, "failed to open file [%s]\n", dst_file); + break; + } +#ifdef _MSC_VER + if (fout == stdout) { + r = _setmode(_fileno(fout), _O_BINARY); + if (r == -1) { + fprintf(stderr, "Cannot set binary mode for output stream: %d[%s]\n", errno, strerror(errno)); + } + } +#endif + + if (cnv == (iconv_t)-1) { + fprintf(stderr, "failed to open conv from [%s] to [%s]: [%s]\n", from_enc, to_enc, strerror(errno)); + break; + } + r = do_conv(cnv, fin, fout); + iconv_close(cnv); + cnv = (iconv_t)-1; + } while (0); + if (fin && fin != stdin) fclose(fin); + if (fout && fout != stdout) fclose(fout); + return r ? 1 : 0; +} + +static void usage(const char *arg0) { + fprintf(stderr, "%s -h | [-f ] [-t ] [-o ] [file]\n", arg0); + return; +} + +#define IN_SIZE (64*1024) +#define OUT_SIZE (8*IN_SIZE) +static int do_conv(iconv_t cnv, FILE *fin, FILE *fout) { + int r = 0; + char src[IN_SIZE]; + size_t slen = sizeof(src); + char dst[OUT_SIZE]; + size_t dlen = sizeof(dst); + char *start = src; + while (!feof(fin)) { + slen = (size_t)(src + sizeof(src) - start); + size_t n = fread(start, 1, slen, fin); + if (n>0) { + char *ss = src; + size_t sl = n; + while (sl) { + char *dd = dst; + size_t dn = dlen; + size_t v = iconv(cnv, &ss, &sl, &dd, &dn); + if (v==(size_t)-1) { + int err = errno; + if (err == EILSEQ) { + fprintf(stderr, "failed to convert: [%s]\n", strerror(err)); + r = -1; + break; + } + if (err == EINVAL) { + fprintf(stderr, "[%s]\n", strerror(errno)); + size_t ava = (size_t)(src + sizeof(src) - ss); + memcpy(src, ss, ava); + start = ss; + } else { + fprintf(stderr, "internal logic error: [%s]\n", strerror(errno)); + r = -1; + break; + } + } + n = fwrite(dst, 1, (size_t)(dd-dst), fout); + if (n +#include +#include "os.h" +#endif +#include + +#include +#include +#include + +static void usage(const char *arg0); +static int do_install(int i, int argc, char *argv[]); +static int do_uninstall(int i, int argc, char *argv[]); + +int main(int argc, char *argv[]) { + for (int i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (strcmp(arg, "-h") == 0) { + usage(argv[0]); + return 0; + } else if (strcmp(arg, "-i") == 0 ) { + i = do_install(i + 1, argc, argv); + if (i > 0) continue; + return i == 0 ? 0 : 1; + } else if (strcmp(arg, "-u") == 0 ) { + i = do_uninstall(i + 1, argc, argv); + if (i > 0) continue; + return i == 0 ? 0 : 1; + } else { + fprintf(stderr, "unknown argument: [%s]\n", arg); + return 1; + } + } +} + +static void usage(const char *arg0) { + fprintf(stderr, "%s -h | -i -n [TaosDriverName] -p [TaosDriverPath] | -u [-f] -n [TaosDriverName]\n", arg0); + return; +} + +static int do_install(int i, int argc, char *argv[]) { + const char* driverName = NULL; +#ifdef _MSC_VER + const char* driverFile = "todbc.dll"; +#else + const char* driverFile = "libtodbc.so"; +#endif + const char* driverPath = NULL; + for (; i < argc; ++i) { + const char *arg = argv[i]; + if (strcmp(arg, "-n") == 0) { + i += 1; + if (i >= argc) { + fprintf(stderr, "expecting TaosDriverName, but got nothing\n"); + return -1; + } + arg = argv[i]; + if (strstr(arg, "TAOS") != arg) { + fprintf(stderr, "TaosDriverName shall begin with 'TAOS': [%s]\n", arg); + return -1; + } + driverName = arg; + } else if (strcmp(arg, "-p") == 0) { + i += 1; + if (i >= argc) { + fprintf(stderr, "expecting TaosDriverPath, but got nothing\n"); + return -1; + } + driverPath = argv[i]; + } else { + fprintf(stderr, "unknown argument: [%s]\n", arg); + return -1; + } + } + if (!driverName) { + fprintf(stderr, "TaosDriverName not specified\n"); + return -1; + } + if (!driverPath) { + fprintf(stderr, "TaosDriverPath not specified\n"); + return -1; + } + char buf[8192]; + snprintf(buf, sizeof(buf), "%s%cDriver=%s%cFileUage=0%cConnectFunctions=YYN%c", + driverName, 0, driverFile, 0, 0, 0); + BOOL ok = 1; + DWORD usageCount = 1; + char installed[PATH_MAX + 1]; + WORD len = 0; + ok = SQLInstallDriverEx(buf, driverPath, installed, sizeof(installed), &len, ODBC_INSTALL_INQUIRY, &usageCount); + if (!ok) { + fprintf(stderr, "failed to query TaosDriverName: [%s]\n", driverName); + return -1; + } + int r = 0; +#ifdef _MSC_VER + r = stricmp(driverPath, installed); +#else + r = strcasecmp(driverPath, installed); +#endif + if (r) { + fprintf(stderr, "previously installed TaosDriver [%s] has different target path [%s]\n" + "it shall be uninstalled before you can install it to different path [%s]\n", + driverName, installed, driverPath); + return -1; + } + ok = SQLInstallDriverEx(buf, driverPath, installed, sizeof(installed), &len, ODBC_INSTALL_COMPLETE, &usageCount); + if (!ok) { + fprintf(stderr, "failed to install TaosDriverName: [%s][%s]\n", driverName, driverPath); + return -1; + } + + fprintf(stderr, "ODBC driver [%s] has been installed in [%s], and UsageCount is now [%d]\n", + driverName, driverPath, usageCount); + return argc; +} + +static int do_uninstall(int i, int argc, char *argv[]) { + int forceful = 0; + const char* driverName = NULL; + for (; i < argc; ++i) { + const char *arg = argv[i]; + if (strcmp(arg, "-f") == 0) { + forceful = 1; + } else if (strcmp(arg, "-n") == 0) { + i += 1; + if (i >= argc) { + fprintf(stderr, "expecting TaosDriverName, but got nothing\n"); + return -1; + } + arg = argv[i]; + if (strstr(arg, "TAOS") != arg) { + fprintf(stderr, "TaosDriverName shall begin with 'TAOS': [%s]\n", arg); + return -1; + } + driverName = arg; + } else { + fprintf(stderr, "unknown argument: [%s]\n", arg); + return -1; + } + } + if (!driverName) { + fprintf(stderr, "TaosDriverName not specified\n"); + return -1; + } + BOOL ok = 1; + DWORD usageCount = 1; + do { + ok = SQLRemoveDriver(driverName, 0, &usageCount); + if (!ok) { + fprintf(stderr, "failed to remove driver [%s]\n", driverName); + return -1; + } + if (!forceful) { + fprintf(stderr, "UsageCount for ODBC driver [%s] is now: [%d]\n", driverName, usageCount); + return argc; + } + } while (usageCount > 0); + fprintf(stderr, "ODBC driver [%s] is now fully uninstalled\n", driverName); + return argc; +} + diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 32859f6b340b4393b523d475bce1ac89cef1a040..269326535cfb39373544c71685e8a2b9e9b51db1 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -18,7 +18,7 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::1])) else: return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) @@ -26,7 +26,7 @@ def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] @@ -34,7 +34,7 @@ def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] @@ -42,7 +42,7 @@ def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::1]] else: return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] @@ -50,7 +50,7 @@ def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] @@ -58,7 +58,7 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] @@ -66,7 +66,7 @@ def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] @@ -74,7 +74,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] @@ -82,7 +82,7 @@ def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::1]] else: return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] @@ -311,29 +311,24 @@ class CTaosInterface(object): @staticmethod def fetchBlock(result, fields): pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock : + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + else: return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if data == None: - blocks[i] = [None] * num_of_rows - continue - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) - return blocks, abs(num_of_rows) - @staticmethod def freeResult(result): CTaosInterface.libtaos.taos_free_result(result) diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index 37c02d330e856717b5ed0bdac76723cf64d3860b..bc6670ca776a351c4739d61fc165d71334ceaa3b 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -1,6 +1,7 @@ from .cinterface import CTaosInterface from .error import * from .constants import FieldType +import threading class TDengineCursor(object): @@ -35,6 +36,7 @@ class TDengineCursor(object): self._block_iter = 0 self._affected_rows = 0 self._logfile = "" + self._threadId = threading.get_ident() if connection is not None: self._connection = connection @@ -42,7 +44,7 @@ class TDengineCursor(object): def __iter__(self): return self - def next(self): + def __next__(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetch iterator") @@ -137,7 +139,7 @@ class TDengineCursor(object): else: raise ProgrammingError( CTaosInterface.errStr( - self._result ), errno) + self._result), errno) def executemany(self, operation, seq_of_parameters): """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. @@ -148,6 +150,8 @@ class TDengineCursor(object): """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. """ pass + def fetchmany(self): + pass def istype(self, col, dataType): if (dataType.upper() == "BOOL"): @@ -180,9 +184,6 @@ class TDengineCursor(object): return False - def fetchmany(self): - pass - def fetchall(self): """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """ @@ -201,8 +202,6 @@ class TDengineCursor(object): self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - - return list(map(tuple, zip(*buffer))) def nextset(self): diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index 609154a3a4b38cfd1cf7976fb755e4aef7be3354..be5b99d8c18272c5f53d1e49f731588b70062b29 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -18,7 +18,7 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::1])) else: return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) @@ -26,7 +26,7 @@ def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] @@ -34,7 +34,7 @@ def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] @@ -42,7 +42,7 @@ def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::1]] else: return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] @@ -50,7 +50,7 @@ def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] @@ -58,7 +58,7 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] @@ -66,7 +66,7 @@ def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] @@ -74,7 +74,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] @@ -82,7 +82,7 @@ def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::1]] else: return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] @@ -111,7 +111,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): # except ValueError: # res.append(None) # return res - # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::1]] # else: # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] @@ -308,32 +308,48 @@ class CTaosInterface(object): return fields + # @staticmethod + # def fetchBlock(result, fields): + # pblock = ctypes.c_void_p(0) + # num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + # result, ctypes.byref(pblock)) + # if num_of_rows == 0: + # return None, 0 + + # isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + # blocks = [None] * len(fields) + # fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + # fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + # for i in range(len(fields)): + # data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + # if fields[i]['type'] not in _CONVERT_FUNC: + # raise DatabaseError("Invalid data type returned from database") + # print('====================',fieldLen[i]) + # blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + + # return blocks, abs(num_of_rows) @staticmethod def fetchBlock(result, fields): pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock : + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + else: return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if data == None: - blocks[i] = [None] * num_of_rows - continue - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) - return blocks, abs(num_of_rows) - @staticmethod def freeResult(result): CTaosInterface.libtaos.taos_free_result(result) diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index ec7a85ee1a3f8cb0cd49aca8c2a4242dca89021e..eb10bed4859828961a0c521e146c18a5f8e21d7c 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -216,7 +216,6 @@ class TDengineCursor(object): self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) def nextset(self): diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 6a9c5bfcef0d317844b6b58796f307e5cf155a43..084d38e41cee0e8c31a11d319d47e6bc5b125bd7 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -18,7 +18,7 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::1])) else: return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) @@ -26,7 +26,7 @@ def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] @@ -34,7 +34,7 @@ def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] @@ -42,7 +42,7 @@ def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::1]] else: return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] @@ -50,7 +50,7 @@ def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] @@ -58,7 +58,7 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] @@ -66,7 +66,7 @@ def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] @@ -74,7 +74,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] @@ -82,7 +82,7 @@ def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::1]] else: return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] @@ -310,27 +310,23 @@ class CTaosInterface(object): @staticmethod def fetchBlock(result, fields): pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock : + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + else: return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if data == None: - blocks[i] = [None] * num_of_rows - continue - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) - return blocks, abs(num_of_rows) @staticmethod diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py index 8714fe77cb739f23f79247a41d72aa127b6d6d25..35846cbe11f49386747a5a1f59e5529b84a741a0 100644 --- a/src/connector/python/windows/python2/taos/cursor.py +++ b/src/connector/python/windows/python2/taos/cursor.py @@ -1,5 +1,9 @@ from .cinterface import CTaosInterface from .error import * +from .constants import FieldType +import threading + +# querySeqNum = 0 class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -32,6 +36,8 @@ class TDengineCursor(object): self._block_rows = -1 self._block_iter = 0 self._affected_rows = 0 + self._logfile = "" + self._threadId = threading.get_ident() if connection is not None: self._connection = connection @@ -39,7 +45,7 @@ class TDengineCursor(object): def __iter__(self): return self - def next(self): + def __next__(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetch iterator") diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index fa7124431c92a179b2a0042b57e8ad45609ea352..68adf191c9bf696575254974ab6a544d5664faa5 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -18,7 +18,7 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::1])) else: return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) @@ -26,7 +26,7 @@ def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] @@ -34,7 +34,7 @@ def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] @@ -42,7 +42,7 @@ def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::1]] else: return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] @@ -50,7 +50,7 @@ def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] @@ -58,7 +58,7 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::1] ] else: return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] @@ -66,7 +66,7 @@ def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] @@ -74,7 +74,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::1] ] else: return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] @@ -82,7 +82,7 @@ def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::1]] else: return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] @@ -225,6 +225,7 @@ class CTaosInterface(object): if connection.value == None: print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") # sys.exit(1) else: print('connect to TDengine success') @@ -310,27 +311,23 @@ class CTaosInterface(object): @staticmethod def fetchBlock(result, fields): pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock : + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + else: return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if data == None: - blocks[i] = [None] * num_of_rows - continue - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) - return blocks, abs(num_of_rows) @staticmethod diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py index c2c442b06ee71ae90ec63662886c709e38d4d2ad..b58b494cad0e0a2b664ef4765eae2baee27aa20b 100644 --- a/src/connector/python/windows/python3/taos/cursor.py +++ b/src/connector/python/windows/python3/taos/cursor.py @@ -1,5 +1,10 @@ from .cinterface import CTaosInterface from .error import * +from .constants import FieldType +import threading + +# querySeqNum = 0 + class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -32,6 +37,8 @@ class TDengineCursor(object): self._block_rows = -1 self._block_iter = 0 self._affected_rows = 0 + self._logfile = "" + self._threadId = threading.get_ident() if connection is not None: self._connection = connection diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index 1a99a84b8ebfe8ed503213299646da39b5b1d27a..1be7552a892e25cf44317a99fe52ff57689ad338 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -39,16 +39,16 @@ #define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }} typedef struct { - int vgId; + int32_t vgId; char user[TSDB_USER_LEN]; char pass[TSDB_PASSWORD_LEN]; char db[TSDB_DB_NAME_LEN]; FCqWrite cqWrite; void *ahandle; - int num; // number of continuous streams + int32_t num; // number of continuous streams struct SCqObj *pHead; void *dbConn; - int master; + int32_t master; void *tmrCtrl; pthread_mutex_t mutex; } SCqContext; @@ -57,7 +57,7 @@ typedef struct SCqObj { tmr_h tmrId; uint64_t uid; int32_t tid; // table ID - int rowSize; // bytes of a row + int32_t rowSize; // bytes of a row char * sqlStr; // SQL string STSchema * pSchema; // pointer to schema array void * pStream; @@ -115,7 +115,7 @@ void cqClose(void *handle) { SCqObj *pTemp = pObj; pObj = pObj->next; tdFreeSchema(pTemp->pSchema); - taosTFree(pTemp->sqlStr); + tfree(pTemp->sqlStr); free(pTemp); } @@ -175,7 +175,7 @@ void cqStop(void *handle) { pthread_mutex_unlock(&pContext->mutex); } -void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSchema) { +void *cqCreate(void *handle, uint64_t uid, int32_t tid, char *sqlStr, STSchema *pSchema) { SCqContext *pContext = handle; SCqObj *pObj = calloc(sizeof(SCqObj), 1); @@ -237,7 +237,7 @@ void cqDrop(void *handle) { pthread_mutex_unlock(&pContext->mutex); } -static void doCreateStream(void *param, TAOS_RES *result, int code) { +static void doCreateStream(void *param, TAOS_RES *result, int32_t code) { SCqObj* pObj = (SCqObj*)param; SCqContext* pContext = pObj->pContext; SSqlObj* pSql = (SSqlObj*)result; @@ -288,7 +288,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr); - int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize; + int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize; char *buffer = calloc(size, 1); SWalHead *pHead = (SWalHead *)buffer; @@ -334,7 +334,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { pHead->version = 0; // write into vnode write queue - pContext->cqWrite(pContext->ahandle, pHead, TAOS_QTYPE_CQ); + pContext->cqWrite(pContext->ahandle, pHead, TAOS_QTYPE_CQ, NULL); free(buffer); } diff --git a/src/cq/test/cqtest.c b/src/cq/test/cqtest.c index 1daee644a7effc5fa50fbc340ad7df8e2b5d70b0..e1114fc024054f7a4af32ca1497a63f7da942147 100644 --- a/src/cq/test/cqtest.c +++ b/src/cq/test/cqtest.c @@ -24,7 +24,7 @@ int64_t ver = 0; void *pCq = NULL; -int writeToQueue(void *pVnode, void *data, int type) { +int writeToQueue(void *pVnode, void *data, int type, void *pMsg) { return 0; } diff --git a/src/dnode/inc/dnodeCfg.h b/src/dnode/inc/dnodeCfg.h new file mode 100644 index 0000000000000000000000000000000000000000..35d889646021a079abe18c01a26fa5891718aaa0 --- /dev/null +++ b/src/dnode/inc/dnodeCfg.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_CFG_H +#define TDENGINE_DNODE_CFG_H + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t dnodeInitCfg(); +void dnodeCleanupCfg(); +void dnodeUpdateCfg(SDnodeCfg *cfg); +int32_t dnodeGetDnodeId(); +void dnodeGetCfg(int32_t *dnodeId, char *clusterId); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/dnode/inc/dnodeEps.h b/src/dnode/inc/dnodeEps.h new file mode 100644 index 0000000000000000000000000000000000000000..2a203498c1f270c7e456694a4e1e195cbd9022cd --- /dev/null +++ b/src/dnode/inc/dnodeEps.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_EP_H +#define TDENGINE_DNODE_EP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosmsg.h" + +int32_t dnodeInitEps(); +void dnodeCleanupEps(); +void dnodeUpdateEps(SDnodeEps *eps); +void dnodeUpdateEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port); +bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/dnode/inc/dnodeMInfos.h b/src/dnode/inc/dnodeMInfos.h new file mode 100644 index 0000000000000000000000000000000000000000..9c3c85c47e2dbcc11c5b5a80fbf091bd93855149 --- /dev/null +++ b/src/dnode/inc/dnodeMInfos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_MINFOS_H +#define TDENGINE_DNODE_MINFOS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosmsg.h" + +int32_t dnodeInitMInfos(); +void dnodeCleanupMInfos(); +void dnodeUpdateMInfos(SMnodeInfos *minfos); +void dnodeUpdateEpSetForPeer(SRpcEpSet *epSet); +void dnodeGetMInfos(SMnodeInfos *minfos); +bool dnodeIsMasterEp(char *ep); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/dnode/inc/dnodeMPeer.h b/src/dnode/inc/dnodeMPeer.h index 9a48703110c389f10ac623ccaeaa85420a32817f..00221baa221a411d614c1fa1bb3dc4525de2ed71 100644 --- a/src/dnode/inc/dnodeMPeer.h +++ b/src/dnode/inc/dnodeMPeer.h @@ -20,11 +20,11 @@ extern "C" { #endif -int32_t dnodeInitMnodePeer(); -void dnodeCleanupMnodePeer(); -int32_t dnodeAllocateMnodePqueue(); -void dnodeFreeMnodePqueue(); -void dnodeDispatchToMnodePeerQueue(SRpcMsg *pMsg); +int32_t dnodeInitMPeer(); +void dnodeCleanupMPeer(); +int32_t dnodeAllocateMPeerQueue(); +void dnodeFreeMPeerQueue(); +void dnodeDispatchToMPeerQueue(SRpcMsg *pMsg); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeMRead.h b/src/dnode/inc/dnodeMRead.h index 4e93838b7998850e1bec79f017fdfbd28f286a11..8a8e71227ddf3cd7b7c77ef664ac3d4c935889db 100644 --- a/src/dnode/inc/dnodeMRead.h +++ b/src/dnode/inc/dnodeMRead.h @@ -20,11 +20,11 @@ extern "C" { #endif -int32_t dnodeInitMnodeRead(); -void dnodeCleanupMnodeRead(); -int32_t dnodeAllocateMnodeRqueue(); -void dnodeFreeMnodeRqueue(); -void dnodeDispatchToMnodeReadQueue(SRpcMsg *rpcMsg); +int32_t dnodeInitMRead(); +void dnodeCleanupMRead(); +int32_t dnodeAllocMReadQueue(); +void dnodeFreeMReadQueue(); +void dnodeDispatchToMReadQueue(SRpcMsg *rpcMsg); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeMWrite.h b/src/dnode/inc/dnodeMWrite.h index 498fea81c59329b4d30874d36d10182b6e3ae54f..6a3d41bc81a03465a2e097bc711c83a8d8af362e 100644 --- a/src/dnode/inc/dnodeMWrite.h +++ b/src/dnode/inc/dnodeMWrite.h @@ -20,11 +20,11 @@ extern "C" { #endif -int32_t dnodeInitMnodeWrite(); -void dnodeCleanupMnodeWrite(); -int32_t dnodeAllocateMnodeWqueue(); -void dnodeFreeMnodeWqueue(); -void dnodeDispatchToMnodeWriteQueue(SRpcMsg *pMsg); +int32_t dnodeInitMWrite(); +void dnodeCleanupMWrite(); +int32_t dnodeAllocMWritequeue(); +void dnodeFreeMWritequeue(); +void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeMgmt.h b/src/dnode/inc/dnodeMgmt.h index e8f4a0823f3cc9846b85804d323e73a8cfc0476e..2038ef5286b32522b11409ba5a253b33228b984d 100644 --- a/src/dnode/inc/dnodeMgmt.h +++ b/src/dnode/inc/dnodeMgmt.h @@ -20,6 +20,8 @@ extern "C" { #endif +#include "trpc.h" + int32_t dnodeInitMgmt(); void dnodeCleanupMgmt(); int32_t dnodeInitMgmtTimer(); @@ -35,8 +37,8 @@ void* dnodeGetVnodeTsdb(void *pVnode); void dnodeReleaseVnode(void *pVnode); void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell); -void dnodeGetMnodeEpSetForPeer(void *epSet); -void dnodeGetMnodeEpSetForShell(void *epSet); +void dnodeGetEpSetForPeer(SRpcEpSet *epSet); +void dnodeGetEpSetForShell(SRpcEpSet *epSet); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeVRead.h b/src/dnode/inc/dnodeVRead.h index a1035200475259bd91757f934d35a0dd5a69b1fe..5b17693146cb68ab66943593bc2f9cd6587cc225 100644 --- a/src/dnode/inc/dnodeVRead.h +++ b/src/dnode/inc/dnodeVRead.h @@ -20,9 +20,11 @@ extern "C" { #endif -int32_t dnodeInitVnodeRead(); -void dnodeCleanupVnodeRead(); -void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg); +int32_t dnodeInitVRead(); +void dnodeCleanupVRead(); +void dnodeDispatchToVReadQueue(SRpcMsg *pMsg); +void * dnodeAllocVReadQueue(void *pVnode); +void dnodeFreeVReadQueue(void *pRqueue); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeVWrite.h b/src/dnode/inc/dnodeVWrite.h index 7da701a8e270239ee2f48e090138f048cc81f880..759e9ca8a5599236d08228ddd87ed8d4f8c55dca 100644 --- a/src/dnode/inc/dnodeVWrite.h +++ b/src/dnode/inc/dnodeVWrite.h @@ -20,9 +20,12 @@ extern "C" { #endif -int32_t dnodeInitVnodeWrite(); -void dnodeCleanupVnodeWrite(); -void dnodeDispatchToVnodeWriteQueue(SRpcMsg *pMsg); +int32_t dnodeInitVWrite(); +void dnodeCleanupVWrite(); +void dnodeDispatchToVWriteQueue(SRpcMsg *pMsg); +void * dnodeAllocVWriteQueue(void *pVnode); +void dnodeFreeVWriteQueue(void *pWqueue); +void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c new file mode 100644 index 0000000000000000000000000000000000000000..16d109a13a7600c5414b1f1edd175a8f15aa4cf0 --- /dev/null +++ b/src/dnode/src/dnodeCfg.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "cJSON.h" +#include "tglobal.h" +#include "dnode.h" +#include "dnodeInt.h" +#include "dnodeCfg.h" + +static SDnodeCfg tsCfg = {0}; +static pthread_mutex_t tsCfgMutex; + +static int32_t dnodeReadCfg(); +static int32_t dnodeWriteCfg(); +static void dnodeResetCfg(SDnodeCfg *cfg); +static void dnodePrintCfg(SDnodeCfg *cfg); + +int32_t dnodeInitCfg() { + pthread_mutex_init(&tsCfgMutex, NULL); + dnodeResetCfg(NULL); + int32_t ret = dnodeReadCfg(); + if (ret == 0) { + dInfo("dnode cfg is initialized"); + } + return ret; +} + +void dnodeCleanupCfg() { pthread_mutex_destroy(&tsCfgMutex); } + +void dnodeUpdateCfg(SDnodeCfg *cfg) { + if (tsCfg.dnodeId != 0) return; + dnodeResetCfg(cfg); +} + +int32_t dnodeGetDnodeId() { + int32_t dnodeId = 0; + pthread_mutex_lock(&tsCfgMutex); + dnodeId = tsCfg.dnodeId; + pthread_mutex_unlock(&tsCfgMutex); + return dnodeId; +} + +void dnodeGetCfg(int32_t *dnodeId, char *clusterId) { + pthread_mutex_lock(&tsCfgMutex); + *dnodeId = tsCfg.dnodeId; + tstrncpy(clusterId, tsCfg.clusterId, TSDB_CLUSTER_ID_LEN); + pthread_mutex_unlock(&tsCfgMutex); +} + +static void dnodeResetCfg(SDnodeCfg *cfg) { + if (cfg == NULL) return; + if (cfg->dnodeId == 0) return; + + pthread_mutex_lock(&tsCfgMutex); + tsCfg.dnodeId = cfg->dnodeId; + tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN); + dnodePrintCfg(cfg); + dnodeWriteCfg(); + pthread_mutex_unlock(&tsCfgMutex); +} + +static void dnodePrintCfg(SDnodeCfg *cfg) { + dInfo("dnodeId is set to %d, clusterId is set to %s", cfg->dnodeId, cfg->clusterId); +} + +static int32_t dnodeReadCfg() { + int32_t len = 0; + int32_t maxLen = 200; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + SDnodeCfg cfg = {0}; + + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/dnodeCfg.json", tsDnodeDir); + + fp = fopen(file, "r"); + if (!fp) { + dDebug("failed to read %s, file not exist", file); + goto PARSE_CFG_OVER; + } + + len = fread(content, 1, maxLen, fp); + if (len <= 0) { + dError("failed to read %s, content is null", file); + goto PARSE_CFG_OVER; + } + + content[len] = 0; + root = cJSON_Parse(content); + if (root == NULL) { + dError("failed to read %s, invalid json format", file); + goto PARSE_CFG_OVER; + } + + cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId"); + if (!dnodeId || dnodeId->type != cJSON_Number) { + dError("failed to read %s, dnodeId not found", file); + goto PARSE_CFG_OVER; + } + cfg.dnodeId = dnodeId->valueint; + + cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); + if (!clusterId || clusterId->type != cJSON_String) { + dError("failed to read %s, clusterId not found", file); + goto PARSE_CFG_OVER; + } + tstrncpy(cfg.clusterId, clusterId->valuestring, TSDB_CLUSTER_ID_LEN); + + dInfo("read file %s successed", file); + +PARSE_CFG_OVER: + if (content != NULL) free(content); + if (root != NULL) cJSON_Delete(root); + if (fp != NULL) fclose(fp); + terrno = 0; + + dnodeResetCfg(&cfg); + return 0; +} + +static int32_t dnodeWriteCfg() { + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/dnodeCfg.json", tsDnodeDir); + + FILE *fp = fopen(file, "w"); + if (!fp) { + dError("failed to write %s, reason:%s", file, strerror(errno)); + return -1; + } + + int32_t len = 0; + int32_t maxLen = 200; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsCfg.dnodeId); + len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsCfg.clusterId); + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fflush(fp); + fclose(fp); + free(content); + terrno = 0; + + dInfo("successed to write %s", file); + return 0; +} diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index 9b68fc1f6c7a4db266eeb36cc20459bc71870739..a9ee4ac649c2f4b2734ce7f0dd59004b08e2fb67 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -15,9 +15,7 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taosdef.h" #include "tglobal.h" -#include "mnode.h" #include "dnodeInt.h" #include "dnodeCheck.h" @@ -30,8 +28,8 @@ typedef struct { void (*stopFp)(); } SCheckItem; -static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}}; -int64_t tsMinFreeMemSizeForStart = 0; +static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}}; +int64_t tsMinFreeMemSizeForStart = 0; static int bindTcpPort(int port) { int serverSocket; @@ -264,8 +262,6 @@ int32_t dnodeInitCheck() { } } + dInfo("dnode check is initialized"); return 0; } - - - diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c new file mode 100644 index 0000000000000000000000000000000000000000..9c90c391813f57024ecf090136dfa86f5e5f91e6 --- /dev/null +++ b/src/dnode/src/dnodeEps.c @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "cJSON.h" +#include "tglobal.h" +#include "hash.h" +#include "dnode.h" +#include "dnodeInt.h" +#include "dnodeEps.h" + +static SDnodeEps *tsEps = NULL; +static SHashObj * tsEpsHash = NULL; +static pthread_mutex_t tsEpsMutex; + +static int32_t dnodeReadEps(); +static int32_t dnodeWriteEps(); +static void dnodeResetEps(SDnodeEps *eps); +static void dnodePrintEps(SDnodeEps *eps); + +int32_t dnodeInitEps() { + pthread_mutex_init(&tsEpsMutex, NULL); + tsEpsHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); + dnodeResetEps(NULL); + int32_t ret = dnodeReadEps(); + if (ret == 0) { + dInfo("dnode eps is initialized"); + } + return ret; +} + +void dnodeCleanupEps() { + pthread_mutex_lock(&tsEpsMutex); + if (tsEps) { + free(tsEps); + tsEps = NULL; + } + if (tsEpsHash) { + taosHashCleanup(tsEpsHash); + tsEpsHash = NULL; + } + pthread_mutex_unlock(&tsEpsMutex); + pthread_mutex_destroy(&tsEpsMutex); +} + +void dnodeUpdateEps(SDnodeEps *eps) { + if (eps == NULL) return; + + eps->dnodeNum = htonl(eps->dnodeNum); + for (int32_t i = 0; i < eps->dnodeNum; ++i) { + eps->dnodeEps[i].dnodeId = htonl(eps->dnodeEps[i].dnodeId); + eps->dnodeEps[i].dnodePort = htons(eps->dnodeEps[i].dnodePort); + } + + pthread_mutex_lock(&tsEpsMutex); + if (eps->dnodeNum != tsEps->dnodeNum) { + dnodeResetEps(eps); + dnodeWriteEps(); + } else { + int32_t size = sizeof(SDnodeEps) + eps->dnodeNum * sizeof(SDnodeEp); + if (memcmp(eps, tsEps, size) != 0) { + dnodeResetEps(eps); + dnodeWriteEps(); + } + } + pthread_mutex_unlock(&tsEpsMutex); +} + +bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr) { + bool changed = false; + pthread_mutex_lock(&tsEpsMutex); + SDnodeEp *ep = taosHashGet(tsEpsHash, &dnodeId, sizeof(int32_t)); + if (ep != NULL) { + char epSaved[TSDB_EP_LEN + 1]; + snprintf(epSaved, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort); + changed = strcmp(epstr, epSaved) != 0; + tstrncpy(epstr, epSaved, TSDB_EP_LEN); + } + pthread_mutex_unlock(&tsEpsMutex); + return changed; +} + +void dnodeUpdateEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port) { + pthread_mutex_lock(&tsEpsMutex); + SDnodeEp *ep = taosHashGet(tsEpsHash, &dnodeId, sizeof(int32_t)); + if (ep != NULL) { + if (port) *port = ep->dnodePort; + if (fqdn) tstrncpy(fqdn, ep->dnodeFqdn, TSDB_FQDN_LEN); + if (epstr) snprintf(epstr, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort); + } + pthread_mutex_unlock(&tsEpsMutex); +} + +static void dnodeResetEps(SDnodeEps *eps) { + if (eps == NULL) { + int32_t size = sizeof(SDnodeEps) + sizeof(SDnodeEp); + if (tsEps == NULL) { + tsEps = calloc(1, size); + } else { + tsEps->dnodeNum = 0; + } + } else { + assert(tsEps); + + int32_t size = sizeof(SDnodeEps) + sizeof(SDnodeEp) * eps->dnodeNum; + if (eps->dnodeNum > tsEps->dnodeNum) { + tsEps = realloc(tsEps, size); + } + memcpy(tsEps, eps, size); + dnodePrintEps(eps); + } + + for (int32_t i = 0; i < tsEps->dnodeNum; ++i) { + SDnodeEp *ep = &tsEps->dnodeEps[i]; + taosHashPut(tsEpsHash, &ep->dnodeId, sizeof(int32_t), ep, sizeof(SDnodeEp)); + } +} + +static void dnodePrintEps(SDnodeEps *eps) { + dDebug("print dnodeEp, dnodeNum:%d", eps->dnodeNum); + for (int32_t i = 0; i < eps->dnodeNum; i++) { + SDnodeEp *ep = &eps->dnodeEps[i]; + dDebug("dnodeId:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort); + } +} + +static int32_t dnodeReadEps() { + int32_t ret = -1; + int32_t len = 0; + int32_t maxLen = 30000; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + SDnodeEps *eps = NULL; + + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/dnodeEps.json", tsDnodeDir); + + fp = fopen(file, "r"); + if (!fp) { + dDebug("failed to read %s, file not exist", file); + goto PRASE_EPS_OVER; + } + + len = fread(content, 1, maxLen, fp); + if (len <= 0) { + dError("failed to read %s, content is null", file); + goto PRASE_EPS_OVER; + } + + content[len] = 0; + root = cJSON_Parse(content); + if (root == NULL) { + dError("failed to read %s, invalid json format", file); + goto PRASE_EPS_OVER; + } + + cJSON *dnodeNum = cJSON_GetObjectItem(root, "dnodeNum"); + if (!dnodeNum || dnodeNum->type != cJSON_Number) { + dError("failed to read %s, dnodeNum not found", file); + goto PRASE_EPS_OVER; + } + + cJSON *dnodeInfos = cJSON_GetObjectItem(root, "dnodeInfos"); + if (!dnodeInfos || dnodeInfos->type != cJSON_Array) { + dError("failed to read %s, dnodeInfos not found", file); + goto PRASE_EPS_OVER; + } + + int32_t dnodeInfosSize = cJSON_GetArraySize(dnodeInfos); + if (dnodeInfosSize != dnodeNum->valueint) { + dError("failed to read %s, dnodeInfos size:%d not matched dnodeNum:%d", file, dnodeInfosSize, + (int32_t)dnodeNum->valueint); + goto PRASE_EPS_OVER; + } + + int32_t epsSize = sizeof(SDnodeEps) + dnodeInfosSize * sizeof(SDnodeEp); + eps = calloc(1, epsSize); + eps->dnodeNum = dnodeInfosSize; + + for (int32_t i = 0; i < dnodeInfosSize; ++i) { + cJSON *dnodeInfo = cJSON_GetArrayItem(dnodeInfos, i); + if (dnodeInfo == NULL) break; + + SDnodeEp *ep = &eps->dnodeEps[i]; + + cJSON *dnodeId = cJSON_GetObjectItem(dnodeInfo, "dnodeId"); + if (!dnodeId || dnodeId->type != cJSON_Number) { + dError("failed to read %s, dnodeId not found", file); + goto PRASE_EPS_OVER; + } + ep->dnodeId = dnodeId->valueint; + + cJSON *dnodeFqdn = cJSON_GetObjectItem(dnodeInfo, "dnodeFqdn"); + if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) { + dError("failed to read %s, dnodeFqdn not found", file); + goto PRASE_EPS_OVER; + } + strncpy(ep->dnodeFqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN); + + cJSON *dnodePort = cJSON_GetObjectItem(dnodeInfo, "dnodePort"); + if (!dnodePort || dnodePort->type != cJSON_Number) { + dError("failed to read %s, dnodePort not found", file); + goto PRASE_EPS_OVER; + } + ep->dnodePort = (uint16_t)dnodePort->valueint; + } + + ret = 0; + + dInfo("read file %s successed", file); + dnodePrintEps(eps); + +PRASE_EPS_OVER: + if (content != NULL) free(content); + if (root != NULL) cJSON_Delete(root); + if (fp != NULL) fclose(fp); + if (ret != 0) { + if (eps) free(eps); + eps = NULL; + } + + dnodeResetEps(eps); + if (eps) free(eps); + + dnodeUpdateEp(dnodeGetDnodeId(), tsLocalEp, tsLocalFqdn, &tsServerPort); + + terrno = 0; + return 0; +} + +static int32_t dnodeWriteEps() { + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/dnodeEps.json", tsDnodeDir); + + FILE *fp = fopen(file, "w"); + if (!fp) { + dError("failed to write %s, reason:%s", file, strerror(errno)); + return -1; + } + + int32_t len = 0; + int32_t maxLen = 30000; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"dnodeNum\": %d,\n", tsEps->dnodeNum); + len += snprintf(content + len, maxLen - len, " \"dnodeInfos\": [{\n"); + for (int32_t i = 0; i < tsEps->dnodeNum; ++i) { + SDnodeEp *ep = &tsEps->dnodeEps[i]; + len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", ep->dnodeId); + len += snprintf(content + len, maxLen - len, " \"dnodeFqdn\": \"%s\",\n", ep->dnodeFqdn); + len += snprintf(content + len, maxLen - len, " \"dnodePort\": %u\n", ep->dnodePort); + if (i < tsEps->dnodeNum - 1) { + len += snprintf(content + len, maxLen - len, " },{\n"); + } else { + len += snprintf(content + len, maxLen - len, " }]\n"); + } + } + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fflush(fp); + fclose(fp); + free(content); + terrno = 0; + + dInfo("successed to write %s", file); + return 0; +} diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c new file mode 100644 index 0000000000000000000000000000000000000000..cefe44aebe7f87803141ce3d75c45dca18463849 --- /dev/null +++ b/src/dnode/src/dnodeMInfos.c @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "cJSON.h" +#include "tglobal.h" +#include "mnode.h" +#include "dnode.h" +#include "dnodeInt.h" +#include "dnodeMInfos.h" + +static SMnodeInfos tsMInfos; +static SRpcEpSet tsMEpSet; +static pthread_mutex_t tsMInfosMutex; + +static void dnodeResetMInfos(SMnodeInfos *minfos); +static void dnodePrintMInfos(SMnodeInfos *minfos); +static int32_t dnodeReadMInfos(); +static int32_t dnodeWriteMInfos(); + +int32_t dnodeInitMInfos() { + pthread_mutex_init(&tsMInfosMutex, NULL); + dnodeResetMInfos(NULL); + int32_t ret = dnodeReadMInfos(); + if (ret == 0) { + dInfo("dnode minfos is initialized"); + } + + return ret; +} + +void dnodeCleanupMInfos() { pthread_mutex_destroy(&tsMInfosMutex); } + +void dnodeUpdateMInfos(SMnodeInfos *minfos) { + if (minfos->mnodeNum <= 0 || minfos->mnodeNum > 3) { + dError("invalid mnode infos, mnodeNum:%d", minfos->mnodeNum); + return; + } + + for (int32_t i = 0; i < minfos->mnodeNum; ++i) { + SMnodeInfo *minfo = &minfos->mnodeInfos[i]; + minfo->mnodeId = htonl(minfo->mnodeId); + if (minfo->mnodeId <= 0 || strlen(minfo->mnodeEp) <= 5) { + dError("invalid mnode info:%d, mnodeId:%d mnodeEp:%s", i, minfo->mnodeId, minfo->mnodeEp); + return; + } + } + + pthread_mutex_lock(&tsMInfosMutex); + if (minfos->mnodeNum != tsMInfos.mnodeNum) { + dnodeResetMInfos(minfos); + dnodeWriteMInfos(); + sdbUpdateAsync(); + } else { + int32_t size = sizeof(SMnodeInfos); + if (memcmp(minfos, &tsMInfos, size) != 0) { + dnodeResetMInfos(minfos); + dnodeWriteMInfos(); + sdbUpdateAsync(); + } + } + pthread_mutex_unlock(&tsMInfosMutex); +} + +void dnodeUpdateEpSetForPeer(SRpcEpSet *ep) { + if (ep->numOfEps <= 0) { + dError("minfos is changed, but content is invalid, discard it"); + return; + } + + pthread_mutex_lock(&tsMInfosMutex); + dInfo("minfos is changed, numOfEps:%d inUse:%d", ep->numOfEps, ep->inUse); + for (int i = 0; i < ep->numOfEps; ++i) { + ep->port[i] -= TSDB_PORT_DNODEDNODE; + dInfo("minfo:%d %s:%u", i, ep->fqdn[i], ep->port[i]); + } + tsMEpSet = *ep; + pthread_mutex_unlock(&tsMInfosMutex); +} + +bool dnodeIsMasterEp(char *ep) { + pthread_mutex_lock(&tsMInfosMutex); + bool isMaster = strcmp(ep, tsMInfos.mnodeInfos[tsMEpSet.inUse].mnodeEp) == 0; + pthread_mutex_unlock(&tsMInfosMutex); + + return isMaster; +} + +void dnodeGetMInfos(SMnodeInfos *minfos) { + pthread_mutex_lock(&tsMInfosMutex); + memcpy(minfos, &tsMInfos, sizeof(SMnodeInfos)); + for (int32_t i = 0; i < tsMInfos.mnodeNum; ++i) { + minfos->mnodeInfos[i].mnodeId = htonl(tsMInfos.mnodeInfos[i].mnodeId); + } + pthread_mutex_unlock(&tsMInfosMutex); +} + +void dnodeGetEpSetForPeer(SRpcEpSet *epSet) { + pthread_mutex_lock(&tsMInfosMutex); + *epSet = tsMEpSet; + for (int i = 0; i < epSet->numOfEps; ++i) { + epSet->port[i] += TSDB_PORT_DNODEDNODE; + } + pthread_mutex_unlock(&tsMInfosMutex); +} + +void dnodeGetEpSetForShell(SRpcEpSet *epSet) { + pthread_mutex_lock(&tsMInfosMutex); + *epSet = tsMEpSet; + pthread_mutex_unlock(&tsMInfosMutex); +} + +static void dnodePrintMInfos(SMnodeInfos *minfos) { + dInfo("print mnode infos, mnodeNum:%d inUse:%d", minfos->mnodeNum, minfos->inUse); + for (int32_t i = 0; i < minfos->mnodeNum; i++) { + dInfo("mnode index:%d, %s", minfos->mnodeInfos[i].mnodeId, minfos->mnodeInfos[i].mnodeEp); + } +} + +static void dnodeResetMInfos(SMnodeInfos *minfos) { + if (minfos == NULL) { + tsMEpSet.numOfEps = 1; + taosGetFqdnPortFromEp(tsFirst, tsMEpSet.fqdn[0], &tsMEpSet.port[0]); + + if (strcmp(tsSecond, tsFirst) != 0) { + tsMEpSet.numOfEps = 2; + taosGetFqdnPortFromEp(tsSecond, tsMEpSet.fqdn[1], &tsMEpSet.port[1]); + } + return; + } + + if (minfos->mnodeNum == 0) return; + + int32_t size = sizeof(SMnodeInfos); + memcpy(&tsMInfos, minfos, size); + + tsMEpSet.inUse = tsMInfos.inUse; + tsMEpSet.numOfEps = tsMInfos.mnodeNum; + for (int32_t i = 0; i < tsMInfos.mnodeNum; i++) { + taosGetFqdnPortFromEp(tsMInfos.mnodeInfos[i].mnodeEp, tsMEpSet.fqdn[i], &tsMEpSet.port[i]); + } + + dnodePrintMInfos(minfos); +} + +static int32_t dnodeReadMInfos() { + int32_t len = 0; + int32_t maxLen = 2000; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + SMnodeInfos minfos = {0}; + + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/mnodeEpSet.json", tsDnodeDir); + + fp = fopen(file, "r"); + if (!fp) { + dDebug("failed to read %s, file not exist", file); + goto PARSE_MINFOS_OVER; + } + + len = fread(content, 1, maxLen, fp); + if (len <= 0) { + dError("failed to read %s, content is null", file); + goto PARSE_MINFOS_OVER; + } + + content[len] = 0; + root = cJSON_Parse(content); + if (root == NULL) { + dError("failed to read %s, invalid json format", file); + goto PARSE_MINFOS_OVER; + } + + cJSON *inUse = cJSON_GetObjectItem(root, "inUse"); + if (!inUse || inUse->type != cJSON_Number) { + dError("failed to read mnodeEpSet.json, inUse not found"); + goto PARSE_MINFOS_OVER; + } + tsMInfos.inUse = inUse->valueint; + + cJSON *nodeNum = cJSON_GetObjectItem(root, "nodeNum"); + if (!nodeNum || nodeNum->type != cJSON_Number) { + dError("failed to read mnodeEpSet.json, nodeNum not found"); + goto PARSE_MINFOS_OVER; + } + minfos.mnodeNum = nodeNum->valueint; + + cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); + if (!nodeInfos || nodeInfos->type != cJSON_Array) { + dError("failed to read mnodeEpSet.json, nodeInfos not found"); + goto PARSE_MINFOS_OVER; + } + + int size = cJSON_GetArraySize(nodeInfos); + if (size != minfos.mnodeNum) { + dError("failed to read mnodeEpSet.json, nodeInfos size not matched"); + goto PARSE_MINFOS_OVER; + } + + for (int i = 0; i < size; ++i) { + cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); + if (nodeInfo == NULL) continue; + + cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); + if (!nodeId || nodeId->type != cJSON_Number) { + dError("failed to read mnodeEpSet.json, nodeId not found"); + goto PARSE_MINFOS_OVER; + } + minfos.mnodeInfos[i].mnodeId = nodeId->valueint; + + cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp"); + if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) { + dError("failed to read mnodeEpSet.json, nodeName not found"); + goto PARSE_MINFOS_OVER; + } + strncpy(minfos.mnodeInfos[i].mnodeEp, nodeEp->valuestring, TSDB_EP_LEN); + } + + dInfo("read file %s successed", file); + dnodePrintMInfos(&minfos); + +PARSE_MINFOS_OVER: + if (content != NULL) free(content); + if (root != NULL) cJSON_Delete(root); + if (fp != NULL) fclose(fp); + terrno = 0; + + for (int32_t i = 0; i < minfos.mnodeNum; ++i) { + SMnodeInfo *mInfo = &minfos.mnodeInfos[i]; + dnodeUpdateEp(mInfo->mnodeId, mInfo->mnodeEp, NULL, NULL); + } + dnodeResetMInfos(&minfos); + return 0; +} + +static int32_t dnodeWriteMInfos() { + char file[TSDB_FILENAME_LEN + 20] = {0}; + sprintf(file, "%s/mnodeEpSet.json", tsDnodeDir); + + FILE *fp = fopen(file, "w"); + if (!fp) { + dError("failed to write %s, reason:%s", file, strerror(errno)); + return -1; + } + + int32_t len = 0; + int32_t maxLen = 2000; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"inUse\": %d,\n", tsMInfos.inUse); + len += snprintf(content + len, maxLen - len, " \"nodeNum\": %d,\n", tsMInfos.mnodeNum); + len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); + for (int32_t i = 0; i < tsMInfos.mnodeNum; i++) { + len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", tsMInfos.mnodeInfos[i].mnodeId); + len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", tsMInfos.mnodeInfos[i].mnodeEp); + if (i < tsMInfos.mnodeNum - 1) { + len += snprintf(content + len, maxLen - len, " },{\n"); + } else { + len += snprintf(content + len, maxLen - len, " }]\n"); + } + } + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fflush(fp); + fclose(fp); + free(content); + terrno = 0; + + dInfo("successed to write %s", file); + return 0; +} diff --git a/src/dnode/src/dnodeMPeer.c b/src/dnode/src/dnodeMPeer.c index 8414d79a9815287dfdfae3af5cc123304745c56d..05b37bd3388c22046d2a0dd1827655c97c2e40ad 100644 --- a/src/dnode/src/dnodeMPeer.c +++ b/src/dnode/src/dnodeMPeer.c @@ -35,44 +35,44 @@ typedef struct { typedef struct { int32_t curNum; int32_t maxNum; - SMPeerWorker *peerWorker; + SMPeerWorker *worker; } SMPeerWorkerPool; -static SMPeerWorkerPool tsMPeerPool; +static SMPeerWorkerPool tsMPeerWP; static taos_qset tsMPeerQset; static taos_queue tsMPeerQueue; -static void *dnodeProcessMnodePeerQueue(void *param); +static void *dnodeProcessMPeerQueue(void *param); -int32_t dnodeInitMnodePeer() { +int32_t dnodeInitMPeer() { tsMPeerQset = taosOpenQset(); - tsMPeerPool.maxNum = 1; - tsMPeerPool.curNum = 0; - tsMPeerPool.peerWorker = (SMPeerWorker *)calloc(sizeof(SMPeerWorker), tsMPeerPool.maxNum); + tsMPeerWP.maxNum = 1; + tsMPeerWP.curNum = 0; + tsMPeerWP.worker = (SMPeerWorker *)calloc(sizeof(SMPeerWorker), tsMPeerWP.maxNum); - if (tsMPeerPool.peerWorker == NULL) return -1; - for (int32_t i = 0; i < tsMPeerPool.maxNum; ++i) { - SMPeerWorker *pWorker = tsMPeerPool.peerWorker + i; + if (tsMPeerWP.worker == NULL) return -1; + for (int32_t i = 0; i < tsMPeerWP.maxNum; ++i) { + SMPeerWorker *pWorker = tsMPeerWP.worker + i; pWorker->workerId = i; dDebug("dnode mpeer worker:%d is created", i); } - dDebug("dnode mpeer is opened, workers:%d qset:%p", tsMPeerPool.maxNum, tsMPeerQset); + dDebug("dnode mpeer is initialized, workers:%d qset:%p", tsMPeerWP.maxNum, tsMPeerQset); return 0; } -void dnodeCleanupMnodePeer() { - for (int32_t i = 0; i < tsMPeerPool.maxNum; ++i) { - SMPeerWorker *pWorker = tsMPeerPool.peerWorker + i; +void dnodeCleanupMPeer() { + for (int32_t i = 0; i < tsMPeerWP.maxNum; ++i) { + SMPeerWorker *pWorker = tsMPeerWP.worker + i; if (pWorker->thread) { taosQsetThreadResume(tsMPeerQset); } dDebug("dnode mpeer worker:%d is closed", i); } - for (int32_t i = 0; i < tsMPeerPool.maxNum; ++i) { - SMPeerWorker *pWorker = tsMPeerPool.peerWorker + i; + for (int32_t i = 0; i < tsMPeerWP.maxNum; ++i) { + SMPeerWorker *pWorker = tsMPeerWP.worker + i; dDebug("dnode mpeer worker:%d start to join", i); if (pWorker->thread) { pthread_join(pWorker->thread, NULL); @@ -84,61 +84,60 @@ void dnodeCleanupMnodePeer() { taosCloseQset(tsMPeerQset); tsMPeerQset = NULL; - taosTFree(tsMPeerPool.peerWorker); + tfree(tsMPeerWP.worker); } -int32_t dnodeAllocateMnodePqueue() { +int32_t dnodeAllocateMPeerQueue() { tsMPeerQueue = taosOpenQueue(); if (tsMPeerQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; taosAddIntoQset(tsMPeerQset, tsMPeerQueue, NULL); - for (int32_t i = tsMPeerPool.curNum; i < tsMPeerPool.maxNum; ++i) { - SMPeerWorker *pWorker = tsMPeerPool.peerWorker + i; + for (int32_t i = tsMPeerWP.curNum; i < tsMPeerWP.maxNum; ++i) { + SMPeerWorker *pWorker = tsMPeerWP.worker + i; pWorker->workerId = i; pthread_attr_t thAttr; pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMnodePeerQueue, pWorker) != 0) { + if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMPeerQueue, pWorker) != 0) { dError("failed to create thread to process mpeer queue, reason:%s", strerror(errno)); } pthread_attr_destroy(&thAttr); - tsMPeerPool.curNum = i + 1; - dDebug("dnode mpeer worker:%d is launched, total:%d", pWorker->workerId, tsMPeerPool.maxNum); + tsMPeerWP.curNum = i + 1; + dDebug("dnode mpeer worker:%d is launched, total:%d", pWorker->workerId, tsMPeerWP.maxNum); } dDebug("dnode mpeer queue:%p is allocated", tsMPeerQueue); return TSDB_CODE_SUCCESS; } -void dnodeFreeMnodePqueue() { +void dnodeFreeMPeerQueue() { dDebug("dnode mpeer queue:%p is freed", tsMPeerQueue); taosCloseQueue(tsMPeerQueue); tsMPeerQueue = NULL; } -void dnodeDispatchToMnodePeerQueue(SRpcMsg *pMsg) { +void dnodeDispatchToMPeerQueue(SRpcMsg *pMsg) { if (!mnodeIsRunning() || tsMPeerQueue == NULL) { dnodeSendRedirectMsg(pMsg, false); - rpcFreeCont(pMsg->pCont); - return; + } else { + SMnodeMsg *pPeer = mnodeCreateMsg(pMsg); + taosWriteQitem(tsMPeerQueue, TAOS_QTYPE_RPC, pPeer); } - SMnodeMsg *pPeer = (SMnodeMsg *)taosAllocateQitem(sizeof(SMnodeMsg)); - mnodeCreateMsg(pPeer, pMsg); - taosWriteQitem(tsMPeerQueue, TAOS_QTYPE_RPC, pPeer); + rpcFreeCont(pMsg->pCont); } -static void dnodeFreeMnodePeerMsg(SMnodeMsg *pPeer) { +static void dnodeFreeMPeerMsg(SMnodeMsg *pPeer) { mnodeCleanupMsg(pPeer); taosFreeQitem(pPeer); } -static void dnodeSendRpcMnodePeerRsp(SMnodeMsg *pPeer, int32_t code) { +static void dnodeSendRpcMPeerRsp(SMnodeMsg *pPeer, int32_t code) { if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; SRpcMsg rpcRsp = { @@ -149,10 +148,10 @@ static void dnodeSendRpcMnodePeerRsp(SMnodeMsg *pPeer, int32_t code) { }; rpcSendResponse(&rpcRsp); - dnodeFreeMnodePeerMsg(pPeer); + dnodeFreeMPeerMsg(pPeer); } -static void *dnodeProcessMnodePeerQueue(void *param) { +static void *dnodeProcessMPeerQueue(void *param) { SMnodeMsg *pPeerMsg; int32_t type; void * unUsed; @@ -165,7 +164,7 @@ static void *dnodeProcessMnodePeerQueue(void *param) { dDebug("msg:%s will be processed in mpeer queue", taosMsg[pPeerMsg->rpcMsg.msgType]); int32_t code = mnodeProcessPeerReq(pPeerMsg); - dnodeSendRpcMnodePeerRsp(pPeerMsg, code); + dnodeSendRpcMPeerRsp(pPeerMsg, code); } return NULL; diff --git a/src/dnode/src/dnodeMRead.c b/src/dnode/src/dnodeMRead.c index fdcbb5889f766ddebdb3f1e56ccffa0b5b129552..ee9da72e4d3424ffb97b6cbc90098edd1abf3a6d 100644 --- a/src/dnode/src/dnodeMRead.c +++ b/src/dnode/src/dnodeMRead.c @@ -35,46 +35,46 @@ typedef struct { typedef struct { int32_t curNum; int32_t maxNum; - SMReadWorker *readWorker; + SMReadWorker *worker; } SMReadWorkerPool; -static SMReadWorkerPool tsMReadPool; +static SMReadWorkerPool tsMReadWP; static taos_qset tsMReadQset; static taos_queue tsMReadQueue; -static void *dnodeProcessMnodeReadQueue(void *param); +static void *dnodeProcessMReadQueue(void *param); -int32_t dnodeInitMnodeRead() { +int32_t dnodeInitMRead() { tsMReadQset = taosOpenQset(); - tsMReadPool.maxNum = tsNumOfCores * tsNumOfThreadsPerCore / 2; - tsMReadPool.maxNum = MAX(2, tsMReadPool.maxNum); - tsMReadPool.maxNum = MIN(4, tsMReadPool.maxNum); - tsMReadPool.curNum = 0; - tsMReadPool.readWorker = (SMReadWorker *)calloc(sizeof(SMReadWorker), tsMReadPool.maxNum); + tsMReadWP.maxNum = tsNumOfCores * tsNumOfThreadsPerCore / 2; + tsMReadWP.maxNum = MAX(2, tsMReadWP.maxNum); + tsMReadWP.maxNum = MIN(4, tsMReadWP.maxNum); + tsMReadWP.curNum = 0; + tsMReadWP.worker = (SMReadWorker *)calloc(sizeof(SMReadWorker), tsMReadWP.maxNum); - if (tsMReadPool.readWorker == NULL) return -1; - for (int32_t i = 0; i < tsMReadPool.maxNum; ++i) { - SMReadWorker *pWorker = tsMReadPool.readWorker + i; + if (tsMReadWP.worker == NULL) return -1; + for (int32_t i = 0; i < tsMReadWP.maxNum; ++i) { + SMReadWorker *pWorker = tsMReadWP.worker + i; pWorker->workerId = i; dDebug("dnode mread worker:%d is created", i); } - dDebug("dnode mread is opened, workers:%d qset:%p", tsMReadPool.maxNum, tsMReadQset); + dDebug("dnode mread is initialized, workers:%d qset:%p", tsMReadWP.maxNum, tsMReadQset); return 0; } -void dnodeCleanupMnodeRead() { - for (int32_t i = 0; i < tsMReadPool.maxNum; ++i) { - SMReadWorker *pWorker = tsMReadPool.readWorker + i; +void dnodeCleanupMRead() { + for (int32_t i = 0; i < tsMReadWP.maxNum; ++i) { + SMReadWorker *pWorker = tsMReadWP.worker + i; if (pWorker->thread) { taosQsetThreadResume(tsMReadQset); } dDebug("dnode mread worker:%d is closed", i); } - for (int32_t i = 0; i < tsMReadPool.maxNum; ++i) { - SMReadWorker *pWorker = tsMReadPool.readWorker + i; + for (int32_t i = 0; i < tsMReadWP.maxNum; ++i) { + SMReadWorker *pWorker = tsMReadWP.worker + i; dDebug("dnode mread worker:%d start to join", i); if (pWorker->thread) { pthread_join(pWorker->thread, NULL); @@ -86,64 +86,63 @@ void dnodeCleanupMnodeRead() { taosCloseQset(tsMReadQset); tsMReadQset = NULL; - free(tsMReadPool.readWorker); + free(tsMReadWP.worker); } -int32_t dnodeAllocateMnodeRqueue() { +int32_t dnodeAllocMReadQueue() { tsMReadQueue = taosOpenQueue(); if (tsMReadQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; taosAddIntoQset(tsMReadQset, tsMReadQueue, NULL); - for (int32_t i = tsMReadPool.curNum; i < tsMReadPool.maxNum; ++i) { - SMReadWorker *pWorker = tsMReadPool.readWorker + i; + for (int32_t i = tsMReadWP.curNum; i < tsMReadWP.maxNum; ++i) { + SMReadWorker *pWorker = tsMReadWP.worker + i; pWorker->workerId = i; pthread_attr_t thAttr; pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMnodeReadQueue, pWorker) != 0) { + if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMReadQueue, pWorker) != 0) { dError("failed to create thread to process mread queue, reason:%s", strerror(errno)); } pthread_attr_destroy(&thAttr); - tsMReadPool.curNum = i + 1; - dDebug("dnode mread worker:%d is launched, total:%d", pWorker->workerId, tsMReadPool.maxNum); + tsMReadWP.curNum = i + 1; + dDebug("dnode mread worker:%d is launched, total:%d", pWorker->workerId, tsMReadWP.maxNum); } dDebug("dnode mread queue:%p is allocated", tsMReadQueue); return TSDB_CODE_SUCCESS; } -void dnodeFreeMnodeRqueue() { +void dnodeFreeMReadQueue() { dDebug("dnode mread queue:%p is freed", tsMReadQueue); taosCloseQueue(tsMReadQueue); tsMReadQueue = NULL; } -void dnodeDispatchToMnodeReadQueue(SRpcMsg *pMsg) { +void dnodeDispatchToMReadQueue(SRpcMsg *pMsg) { if (!mnodeIsRunning() || tsMReadQueue == NULL) { dnodeSendRedirectMsg(pMsg, true); - rpcFreeCont(pMsg->pCont); - return; + } else { + SMnodeMsg *pRead = mnodeCreateMsg(pMsg); + taosWriteQitem(tsMReadQueue, TAOS_QTYPE_RPC, pRead); } - SMnodeMsg *pRead = (SMnodeMsg *)taosAllocateQitem(sizeof(SMnodeMsg)); - mnodeCreateMsg(pRead, pMsg); - taosWriteQitem(tsMReadQueue, TAOS_QTYPE_RPC, pRead); + rpcFreeCont(pMsg->pCont); } -static void dnodeFreeMnodeReadMsg(SMnodeMsg *pRead) { +static void dnodeFreeMReadMsg(SMnodeMsg *pRead) { mnodeCleanupMsg(pRead); taosFreeQitem(pRead); } -static void dnodeSendRpcMnodeReadRsp(SMnodeMsg *pRead, int32_t code) { +static void dnodeSendRpcMReadRsp(SMnodeMsg *pRead, int32_t code) { if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { // may be a auto create req, should put into write queue - dnodeReprocessMnodeWriteMsg(pRead); + dnodeReprocessMWriteMsg(pRead); return; } @@ -155,23 +154,24 @@ static void dnodeSendRpcMnodeReadRsp(SMnodeMsg *pRead, int32_t code) { }; rpcSendResponse(&rpcRsp); - dnodeFreeMnodeReadMsg(pRead); + dnodeFreeMReadMsg(pRead); } -static void *dnodeProcessMnodeReadQueue(void *param) { - SMnodeMsg *pReadMsg; +static void *dnodeProcessMReadQueue(void *param) { + SMnodeMsg *pRead; int32_t type; void * unUsed; - + while (1) { - if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pReadMsg, &unUsed) == 0) { + if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pRead, &unUsed) == 0) { dDebug("qset:%p, mnode read got no message from qset, exiting", tsMReadQset); break; } - dDebug("%p, msg:%s will be processed in mread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]); - int32_t code = mnodeProcessRead(pReadMsg); - dnodeSendRpcMnodeReadRsp(pReadMsg, code); + dDebug("msg:%p, app:%p type:%s will be processed in mread queue", pRead->rpcMsg.ahandle, pRead, + taosMsg[pRead->rpcMsg.msgType]); + int32_t code = mnodeProcessRead(pRead); + dnodeSendRpcMReadRsp(pRead, code); } return NULL; diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index 384a0fae75088197d7fb01dd67c6e1b9d38739cd..65c0d5381969f291d40245bc31dc4acdf849f5ac 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -36,45 +36,45 @@ typedef struct { typedef struct { int32_t curNum; int32_t maxNum; - SMWriteWorker *writeWorker; + SMWriteWorker *worker; } SMWriteWorkerPool; -static SMWriteWorkerPool tsMWritePool; +static SMWriteWorkerPool tsMWriteWP; static taos_qset tsMWriteQset; static taos_queue tsMWriteQueue; extern void * tsDnodeTmr; -static void *dnodeProcessMnodeWriteQueue(void *param); +static void *dnodeProcessMWriteQueue(void *param); -int32_t dnodeInitMnodeWrite() { +int32_t dnodeInitMWrite() { tsMWriteQset = taosOpenQset(); - tsMWritePool.maxNum = 1; - tsMWritePool.curNum = 0; - tsMWritePool.writeWorker = (SMWriteWorker *)calloc(sizeof(SMWriteWorker), tsMWritePool.maxNum); + tsMWriteWP.maxNum = 1; + tsMWriteWP.curNum = 0; + tsMWriteWP.worker = (SMWriteWorker *)calloc(sizeof(SMWriteWorker), tsMWriteWP.maxNum); - if (tsMWritePool.writeWorker == NULL) return -1; - for (int32_t i = 0; i < tsMWritePool.maxNum; ++i) { - SMWriteWorker *pWorker = tsMWritePool.writeWorker + i; + if (tsMWriteWP.worker == NULL) return -1; + for (int32_t i = 0; i < tsMWriteWP.maxNum; ++i) { + SMWriteWorker *pWorker = tsMWriteWP.worker + i; pWorker->workerId = i; dDebug("dnode mwrite worker:%d is created", i); } - dDebug("dnode mwrite is opened, workers:%d qset:%p", tsMWritePool.maxNum, tsMWriteQset); + dDebug("dnode mwrite is initialized, workers:%d qset:%p", tsMWriteWP.maxNum, tsMWriteQset); return 0; } -void dnodeCleanupMnodeWrite() { - for (int32_t i = 0; i < tsMWritePool.maxNum; ++i) { - SMWriteWorker *pWorker = tsMWritePool.writeWorker + i; +void dnodeCleanupMWrite() { + for (int32_t i = 0; i < tsMWriteWP.maxNum; ++i) { + SMWriteWorker *pWorker = tsMWriteWP.worker + i; if (pWorker->thread) { taosQsetThreadResume(tsMWriteQset); } dDebug("dnode mwrite worker:%d is closed", i); } - for (int32_t i = 0; i < tsMWritePool.maxNum; ++i) { - SMWriteWorker *pWorker = tsMWritePool.writeWorker + i; + for (int32_t i = 0; i < tsMWriteWP.maxNum; ++i) { + SMWriteWorker *pWorker = tsMWriteWP.worker + i; dDebug("dnode mwrite worker:%d start to join", i); if (pWorker->thread) { pthread_join(pWorker->thread, NULL); @@ -86,71 +86,69 @@ void dnodeCleanupMnodeWrite() { taosCloseQset(tsMWriteQset); tsMWriteQset = NULL; - taosTFree(tsMWritePool.writeWorker); + tfree(tsMWriteWP.worker); } -int32_t dnodeAllocateMnodeWqueue() { +int32_t dnodeAllocMWritequeue() { tsMWriteQueue = taosOpenQueue(); if (tsMWriteQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; taosAddIntoQset(tsMWriteQset, tsMWriteQueue, NULL); - for (int32_t i = tsMWritePool.curNum; i < tsMWritePool.maxNum; ++i) { - SMWriteWorker *pWorker = tsMWritePool.writeWorker + i; + for (int32_t i = tsMWriteWP.curNum; i < tsMWriteWP.maxNum; ++i) { + SMWriteWorker *pWorker = tsMWriteWP.worker + i; pWorker->workerId = i; pthread_attr_t thAttr; pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMnodeWriteQueue, pWorker) != 0) { + if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessMWriteQueue, pWorker) != 0) { dError("failed to create thread to process mwrite queue, reason:%s", strerror(errno)); } pthread_attr_destroy(&thAttr); - tsMWritePool.curNum = i + 1; - dDebug("dnode mwrite worker:%d is launched, total:%d", pWorker->workerId, tsMWritePool.maxNum); + tsMWriteWP.curNum = i + 1; + dDebug("dnode mwrite worker:%d is launched, total:%d", pWorker->workerId, tsMWriteWP.maxNum); } dDebug("dnode mwrite queue:%p is allocated", tsMWriteQueue); return TSDB_CODE_SUCCESS; } -void dnodeFreeMnodeWqueue() { +void dnodeFreeMWritequeue() { dDebug("dnode mwrite queue:%p is freed", tsMWriteQueue); taosCloseQueue(tsMWriteQueue); tsMWriteQueue = NULL; } -void dnodeDispatchToMnodeWriteQueue(SRpcMsg *pMsg) { +void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) { if (!mnodeIsRunning() || tsMWriteQueue == NULL) { dnodeSendRedirectMsg(pMsg, true); - rpcFreeCont(pMsg->pCont); - return; + } else { + SMnodeMsg *pWrite = mnodeCreateMsg(pMsg); + dDebug("msg:%p, app:%p type:%s is put into mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, + taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); + taosWriteQitem(tsMWriteQueue, TAOS_QTYPE_RPC, pWrite); } - SMnodeMsg *pWrite = (SMnodeMsg *)taosAllocateQitem(sizeof(SMnodeMsg)); - mnodeCreateMsg(pWrite, pMsg); - - dDebug("app:%p:%p, msg:%s is put into mwrite queue:%p", pWrite->rpcMsg.ahandle, pWrite, - taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); - taosWriteQitem(tsMWriteQueue, TAOS_QTYPE_RPC, pWrite); + rpcFreeCont(pMsg->pCont); } -static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) { - dDebug("app:%p:%p, msg:%s is freed from mwrite queue:%p", pWrite->rpcMsg.ahandle, pWrite, +static void dnodeFreeMWriteMsg(SMnodeMsg *pWrite) { + dDebug("msg:%p, app:%p type:%s is freed from mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); mnodeCleanupMsg(pWrite); taosFreeQitem(pWrite); } -void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) { +void dnodeSendRpcMWriteRsp(void *pMsg, int32_t code) { SMnodeMsg *pWrite = pMsg; if (pWrite == NULL) return; if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { - dnodeReprocessMnodeWriteMsg(pWrite); + dnodeReprocessMWriteMsg(pWrite); return; } @@ -162,10 +160,10 @@ void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) { }; rpcSendResponse(&rpcRsp); - dnodeFreeMnodeWriteMsg(pWrite); + dnodeFreeMWriteMsg(pWrite); } -static void *dnodeProcessMnodeWriteQueue(void *param) { +static void *dnodeProcessMWriteQueue(void *param) { SMnodeMsg *pWrite; int32_t type; void * unUsed; @@ -176,39 +174,39 @@ static void *dnodeProcessMnodeWriteQueue(void *param) { break; } - dDebug("app:%p:%p, msg:%s will be processed in mwrite queue", pWrite->rpcMsg.ahandle, pWrite, + dDebug("msg:%p, app:%p type:%s will be processed in mwrite queue", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType]); int32_t code = mnodeProcessWrite(pWrite); - dnodeSendRpcMnodeWriteRsp(pWrite, code); + dnodeSendRpcMWriteRsp(pWrite, code); } return NULL; } -void dnodeReprocessMnodeWriteMsg(void *pMsg) { +void dnodeReprocessMWriteMsg(void *pMsg) { SMnodeMsg *pWrite = pMsg; if (!mnodeIsRunning() || tsMWriteQueue == NULL) { - dDebug("app:%p:%p, msg:%s is redirected for mnode not running, retry times:%d", pWrite->rpcMsg.ahandle, pWrite, + dDebug("msg:%p, app:%p type:%s is redirected for mnode not running, retry times:%d", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], pWrite->retry); dnodeSendRedirectMsg(pMsg, true); - dnodeFreeMnodeWriteMsg(pWrite); + dnodeFreeMWriteMsg(pWrite); } else { - dDebug("app:%p:%p, msg:%s is reput into mwrite queue:%p, retry times:%d", pWrite->rpcMsg.ahandle, pWrite, + dDebug("msg:%p, app:%p type:%s is reput into mwrite queue:%p, retry times:%d", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue, pWrite->retry); taosWriteQitem(tsMWriteQueue, TAOS_QTYPE_RPC, pWrite); } } -static void dnodeDoDelayReprocessMnodeWriteMsg(void *param, void *tmrId) { - dnodeReprocessMnodeWriteMsg(param); +static void dnodeDoDelayReprocessMWriteMsg(void *param, void *tmrId) { + dnodeReprocessMWriteMsg(param); } -void dnodeDelayReprocessMnodeWriteMsg(void *pMsg) { +void dnodeDelayReprocessMWriteMsg(void *pMsg) { SMnodeMsg *mnodeMsg = pMsg; void *unUsed = NULL; - taosTmrReset(dnodeDoDelayReprocessMnodeWriteMsg, 300, mnodeMsg, tsDnodeTmr, &unUsed); + taosTmrReset(dnodeDoDelayReprocessMWriteMsg, 300, mnodeMsg, tsDnodeTmr, &unUsed); } diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 97e6f2ce6debe7ffc273ca103e3f55576da0eb31..130be0af202a4882a100335713ba0f81af7a3c11 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -19,11 +19,16 @@ #include "tutil.h" #include "tconfig.h" #include "tglobal.h" +#include "twal.h" +#include "trpc.h" #include "dnode.h" #include "dnodeInt.h" #include "dnodeMgmt.h" #include "dnodePeer.h" #include "dnodeModule.h" +#include "dnodeEps.h" +#include "dnodeMInfos.h" +#include "dnodeCfg.h" #include "dnodeCheck.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" @@ -33,29 +38,36 @@ #include "dnodeShell.h" #include "dnodeTelemetry.h" +static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED; + static int32_t dnodeInitStorage(); -static void dnodeCleanupStorage(); -static void dnodeSetRunStatus(SDnodeRunStatus status); -static void dnodeCheckDataDirOpenned(char *dir); -static SDnodeRunStatus tsDnodeRunStatus = TSDB_DNODE_RUN_STATUS_STOPPED; +static void dnodeCleanupStorage(); +static void dnodeSetRunStatus(SRunStatus status); +static void dnodeCheckDataDirOpenned(char *dir); static int32_t dnodeInitComponents(); -static void dnodeCleanupComponents(int32_t stepId); -static int dnodeCreateDir(const char *dir); +static void dnodeCleanupComponents(int32_t stepId); +static int dnodeCreateDir(const char *dir); typedef struct { const char *const name; - int (*init)(); - void (*cleanup)(); + int32_t (*init)(); + void (*cleanup)(); } SDnodeComponent; static const SDnodeComponent tsDnodeComponents[] = { + {"rpc", rpcInit, rpcCleanup}, {"storage", dnodeInitStorage, dnodeCleanupStorage}, + {"dnodecfg", dnodeInitCfg, dnodeCleanupCfg}, + {"dnodeeps", dnodeInitEps, dnodeCleanupEps}, + {"globalcfg" ,taosCheckGlobalCfg, NULL}, + {"mnodeinfos",dnodeInitMInfos, dnodeCleanupMInfos}, + {"wal", walInit, walCleanUp}, {"check", dnodeInitCheck, dnodeCleanupCheck}, // NOTES: dnodeInitCheck must be behind the dnodeinitStorage component !!! - {"vread", dnodeInitVnodeRead, dnodeCleanupVnodeRead}, - {"vwrite", dnodeInitVnodeWrite, dnodeCleanupVnodeWrite}, - {"mread", dnodeInitMnodeRead, dnodeCleanupMnodeRead}, - {"mwrite", dnodeInitMnodeWrite, dnodeCleanupMnodeWrite}, - {"mpeer", dnodeInitMnodePeer, dnodeCleanupMnodePeer}, + {"vread", dnodeInitVRead, dnodeCleanupVRead}, + {"vwrite", dnodeInitVWrite, dnodeCleanupVWrite}, + {"mread", dnodeInitMRead, dnodeCleanupMRead}, + {"mwrite", dnodeInitMWrite, dnodeCleanupMWrite}, + {"mpeer", dnodeInitMPeer, dnodeCleanupMPeer}, {"client", dnodeInitClient, dnodeCleanupClient}, {"server", dnodeInitServer, dnodeCleanupServer}, {"mgmt", dnodeInitMgmt, dnodeCleanupMgmt}, @@ -75,7 +87,9 @@ static int dnodeCreateDir(const char *dir) { static void dnodeCleanupComponents(int32_t stepId) { for (int32_t i = stepId; i >= 0; i--) { - tsDnodeComponents[i].cleanup(); + if (tsDnodeComponents[i].cleanup) { + (*tsDnodeComponents[i].cleanup)(); + } } } @@ -92,7 +106,7 @@ static int32_t dnodeInitComponents() { } int32_t dnodeInitSystem() { - dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_INITIALIZE); + dnodeSetRunStatus(TSDB_RUN_STATUS_INITIALIZE); tscEmbedded = 1; taosBlockSIGPIPE(); taosResolveCRC(); @@ -112,21 +126,20 @@ int32_t dnodeInitSystem() { printf("failed to init log file\n"); } - if (!taosReadGlobalCfg() || !taosCheckGlobalCfg()) { + if (!taosReadGlobalCfg()) { taosPrintGlobalCfg(); dError("TDengine read global config failed"); return -1; } - taosPrintGlobalCfg(); - dInfo("start to initialize TDengine on %s", tsLocalEp); + dInfo("start to initialize TDengine"); if (dnodeInitComponents() != 0) { return -1; } dnodeStartModules(); - dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_RUNING); + dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING); dInfo("TDengine is initialized successfully"); @@ -134,20 +147,20 @@ int32_t dnodeInitSystem() { } void dnodeCleanUpSystem() { - if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_STOPPED) { - dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_STOPPED); + if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) { + dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED); dnodeCleanupComponents(sizeof(tsDnodeComponents) / sizeof(tsDnodeComponents[0]) - 1); taos_cleanup(); taosCloseLog(); } } -SDnodeRunStatus dnodeGetRunStatus() { - return tsDnodeRunStatus; +SRunStatus dnodeGetRunStatus() { + return tsRunStatus; } -static void dnodeSetRunStatus(SDnodeRunStatus status) { - tsDnodeRunStatus = status; +static void dnodeSetRunStatus(SRunStatus status) { + tsRunStatus = status; } static void dnodeCheckDataDirOpenned(char *dir) { @@ -198,7 +211,7 @@ static int32_t dnodeInitStorage() { dnodeCheckDataDirOpenned(tsDnodeDir); - dInfo("storage directory is initialized"); + dInfo("dnode storage is initialized at %s", tsDnodeDir); return 0; } diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 968a8d9759e5618753996476b40efc3be77f7925..da1852e05eca11ae9909fe75ff1bb14eee6ae5fb 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -31,12 +31,13 @@ #include "mnode.h" #include "dnodeInt.h" #include "dnodeMgmt.h" +#include "dnodeEps.h" +#include "dnodeCfg.h" +#include "dnodeMInfos.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" #include "dnodeModule.h" -#define MPEER_CONTENT_LEN 2000 - typedef struct { pthread_t thread; int32_t threadIndex; @@ -46,23 +47,18 @@ typedef struct { int32_t * vnodeList; } SOpenVnodeThread; -void * tsDnodeTmr = NULL; -static void * tsStatusTimer = NULL; -static uint32_t tsRebootTime; - -static SRpcEpSet tsDMnodeEpSet = {0}; -static SDMMnodeInfos tsDMnodeInfos = {0}; -static SDMDnodeCfg tsDnodeCfg = {0}; -static taos_qset tsMgmtQset = NULL; -static taos_queue tsMgmtQueue = NULL; -static pthread_t tsQthread; - -static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes); -static bool dnodeReadMnodeInfos(); -static void dnodeSaveMnodeInfos(); -static void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg); -static bool dnodeReadDnodeCfg(); -static void dnodeSaveDnodeCfg(); +typedef struct { + SRpcMsg rpcMsg; + char pCont[]; +} SMgmtMsg; + +void * tsDnodeTmr = NULL; +static void * tsStatusTimer = NULL; +static uint32_t tsRebootTime; +static taos_qset tsMgmtQset = NULL; +static taos_queue tsMgmtQueue = NULL; +static pthread_t tsQthread; + static void dnodeProcessStatusRsp(SRpcMsg *pMsg); static void dnodeSendStatusMsg(void *handle, void *tmrId); static void *dnodeProcessMgmtQueue(void *param); @@ -74,7 +70,7 @@ static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg); static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); int32_t dnodeInitMgmt() { @@ -86,28 +82,8 @@ int32_t dnodeInitMgmt() { dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeProcessCreateMnodeMsg; dnodeAddClientRspHandle(TSDB_MSG_TYPE_DM_STATUS_RSP, dnodeProcessStatusRsp); - dnodeReadDnodeCfg(); tsRebootTime = taosGetTimestampSec(); - if (!dnodeReadMnodeInfos()) { - memset(&tsDMnodeEpSet, 0, sizeof(SRpcEpSet)); - memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos)); - - tsDMnodeEpSet.numOfEps = 1; - taosGetFqdnPortFromEp(tsFirst, tsDMnodeEpSet.fqdn[0], &tsDMnodeEpSet.port[0]); - - if (strcmp(tsSecond, tsFirst) != 0) { - tsDMnodeEpSet.numOfEps = 2; - taosGetFqdnPortFromEp(tsSecond, tsDMnodeEpSet.fqdn[1], &tsDMnodeEpSet.port[1]); - } - } else { - tsDMnodeEpSet.inUse = tsDMnodeInfos.inUse; - tsDMnodeEpSet.numOfEps = tsDMnodeInfos.nodeNum; - for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { - taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeEpSet.fqdn[i], &tsDMnodeEpSet.port[i]); - } - } - int32_t code = vnodeInitResources(); if (code != TSDB_CODE_SUCCESS) { dnodeCleanupMgmt(); @@ -201,38 +177,46 @@ void dnodeCleanupMgmt() { vnodeCleanupResources(); } -void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) { - void *item; +static int32_t dnodeWriteToMgmtQueue(SRpcMsg *pMsg) { + int32_t size = sizeof(SMgmtMsg) + pMsg->contLen; + SMgmtMsg *pMgmt = taosAllocateQitem(size); + if (pMgmt == NULL) { + return TSDB_CODE_DND_OUT_OF_MEMORY; + } - item = taosAllocateQitem(sizeof(SRpcMsg)); - if (item) { - memcpy(item, pMsg, sizeof(SRpcMsg)); - taosWriteQitem(tsMgmtQueue, 1, item); - } else { - SRpcMsg rsp = { - .handle = pMsg->handle, - .pCont = NULL, - .code = TSDB_CODE_DND_OUT_OF_MEMORY - }; - + pMgmt->rpcMsg = *pMsg; + pMgmt->rpcMsg.pCont = pMgmt->pCont; + memcpy(pMgmt->pCont, pMsg->pCont, pMsg->contLen); + taosWriteQitem(tsMgmtQueue, TAOS_QTYPE_RPC, pMgmt); + + return TSDB_CODE_SUCCESS; +} + +void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) { + int32_t code = dnodeWriteToMgmtQueue(pMsg); + if (code != TSDB_CODE_SUCCESS) { + SRpcMsg rsp = {.handle = pMsg->handle, .code = code}; rpcSendResponse(&rsp); - rpcFreeCont(pMsg->pCont); } + + rpcFreeCont(pMsg->pCont); } static void *dnodeProcessMgmtQueue(void *param) { - SRpcMsg *pMsg; - SRpcMsg rsp = {0}; - int type; - void * handle; + SMgmtMsg *pMgmt; + SRpcMsg * pMsg; + SRpcMsg rsp = {0}; + int32_t qtype; + void * handle; while (1) { - if (taosReadQitemFromQset(tsMgmtQset, &type, (void **) &pMsg, &handle) == 0) { + if (taosReadQitemFromQset(tsMgmtQset, &qtype, (void **)&pMgmt, &handle) == 0) { dDebug("qset:%p, dnode mgmt got no message from qset, exit", tsMgmtQset); break; } - dDebug("%p, msg:%s will be processed", pMsg->ahandle, taosMsg[pMsg->msgType]); + pMsg = &pMgmt->rpcMsg; + dDebug("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[pMsg->msgType]); if (dnodeProcessMgmtMsgFp[pMsg->msgType]) { rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg); } else { @@ -240,10 +224,9 @@ static void *dnodeProcessMgmtQueue(void *param) { } rsp.handle = pMsg->handle; - rsp.pCont = NULL; + rsp.pCont = NULL; rpcSendResponse(&rsp); - rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } @@ -381,7 +364,7 @@ static void dnodeCloseVnodes() { } static void* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) { - SMDCreateVnodeMsg *pCreate = rpcMsg->pCont; + SCreateVnodeMsg *pCreate = rpcMsg->pCont; pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); pCreate->cfg.cfgVersion = htonl(pCreate->cfg.cfgVersion); pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); @@ -404,7 +387,7 @@ static void* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) { } static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { - SMDCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg); + SCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg); void *pVnode = vnodeAcquire(pCreate->cfg.vgId); if (pVnode != NULL) { @@ -418,7 +401,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { } static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { - SMDAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg); + SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg); void *pVnode = vnodeAcquire(pAlter->cfg.vgId); if (pVnode != NULL) { @@ -433,14 +416,14 @@ static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { } static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { - SMDDropVnodeMsg *pDrop = rpcMsg->pCont; + SDropVnodeMsg *pDrop = rpcMsg->pCont; pDrop->vgId = htonl(pDrop->vgId); return vnodeDrop(pDrop->vgId); } static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { -// SMDAlterStreamMsg *pStream = pCont; +// SAlterStreamMsg *pStream = pCont; // pStream->uid = htobe64(pStream->uid); // pStream->stime = htobe64(pStream->stime); // pStream->vnode = htonl(pStream->vnode); @@ -453,12 +436,12 @@ static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { } static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) { - SMDCfgDnodeMsg *pCfg = pMsg->pCont; + SCfgDnodeMsg *pCfg = pMsg->pCont; return taosCfgDynamicOptions(pCfg->config); } static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { - SMDCreateMnodeMsg *pCfg = pMsg->pCont; + SCreateMnodeMsg *pCfg = pMsg->pCont; pCfg->dnodeId = htonl(pCfg->dnodeId); if (pCfg->dnodeId != dnodeGetDnodeId()) { dError("dnodeId:%d, in create mnode msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId()); @@ -470,10 +453,10 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { return TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED; } - dDebug("dnodeId:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.nodeNum); - for (int i = 0; i < pCfg->mnodes.nodeNum; ++i) { - pCfg->mnodes.nodeInfos[i].nodeId = htonl(pCfg->mnodes.nodeInfos[i].nodeId); - dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.nodeInfos[i].nodeId, pCfg->mnodes.nodeInfos[i].nodeEp); + dDebug("dnodeId:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum); + for (int i = 0; i < pCfg->mnodes.mnodeNum; ++i) { + pCfg->mnodes.mnodeInfos[i].mnodeId = htonl(pCfg->mnodes.mnodeInfos[i].mnodeId); + dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); } dnodeStartMnode(&pCfg->mnodes); @@ -481,34 +464,6 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet) { - if (pEpSet->numOfEps <= 0) { - dError("mnode EP list for peer is changed, but content is invalid, discard it"); - return; - } - - dInfo("mnode EP list for peer is changed, numOfEps:%d inUse:%d", pEpSet->numOfEps, pEpSet->inUse); - for (int i = 0; i < pEpSet->numOfEps; ++i) { - pEpSet->port[i] -= TSDB_PORT_DNODEDNODE; - dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i]); - } - - tsDMnodeEpSet = *pEpSet; -} - -void dnodeGetMnodeEpSetForPeer(void *epSetRaw) { - SRpcEpSet *epSet = epSetRaw; - *epSet = tsDMnodeEpSet; - - for (int i=0; inumOfEps; ++i) - epSet->port[i] += TSDB_PORT_DNODEDNODE; -} - -void dnodeGetMnodeEpSetForShell(void *epSetRaw) { - SRpcEpSet *epSet = epSetRaw; - *epSet = tsDMnodeEpSet; -} - static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { if (pMsg->code != TSDB_CODE_SUCCESS) { dError("status rsp is received, error:%s", tstrerror(pMsg->code)); @@ -516,202 +471,24 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { return; } - SDMStatusRsp *pStatusRsp = pMsg->pCont; - SDMMnodeInfos *pMnodes = &pStatusRsp->mnodes; - if (pMnodes->nodeNum <= 0) { - dError("status msg is invalid, num of ips is %d", pMnodes->nodeNum); - taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); - return; - } + SStatusRsp *pStatusRsp = pMsg->pCont; + SMnodeInfos *minfos = &pStatusRsp->mnodes; + dnodeUpdateMInfos(minfos); - SDMDnodeCfg *pCfg = &pStatusRsp->dnodeCfg; - pCfg->numOfVnodes = htonl(pCfg->numOfVnodes); + SDnodeCfg *pCfg = &pStatusRsp->dnodeCfg; + pCfg->numOfVnodes = htonl(pCfg->numOfVnodes); pCfg->moduleStatus = htonl(pCfg->moduleStatus); pCfg->dnodeId = htonl(pCfg->dnodeId); - - for (int32_t i = 0; i < pMnodes->nodeNum; ++i) { - SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i]; - pMnodeInfo->nodeId = htonl(pMnodeInfo->nodeId); - } + dnodeUpdateCfg(pCfg); vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes); - // will not set mnode in status msg - // dnodeProcessModuleStatus(pCfg->moduleStatus); - dnodeUpdateDnodeCfg(pCfg); + SDnodeEps *pEps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess)); + dnodeUpdateEps(pEps); - dnodeUpdateMnodeInfos(pMnodes); taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); } -static bool dnodeCheckMnodeInfos(SDMMnodeInfos *pMnodes) { - if (pMnodes->nodeNum <= 0 || pMnodes->nodeNum > 3) { - dError("invalid mnode infos, num:%d", pMnodes->nodeNum); - return false; - } - - for (int32_t i = 0; i < pMnodes->nodeNum; ++i) { - SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i]; - if (pMnodeInfo->nodeId <= 0 || strlen(pMnodeInfo->nodeEp) <= 5) { - dError("invalid mnode info:%d, nodeId:%d nodeEp:%s", i, pMnodeInfo->nodeId, pMnodeInfo->nodeEp); - return false; - } - } - - return true; -} - -static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) { - bool mnodesChanged = (memcmp(&tsDMnodeInfos, pMnodes, sizeof(SDMMnodeInfos)) != 0); - bool mnodesNotInit = (tsDMnodeInfos.nodeNum == 0); - if (!(mnodesChanged || mnodesNotInit)) return; - - if (!dnodeCheckMnodeInfos(pMnodes)) return; - - memcpy(&tsDMnodeInfos, pMnodes, sizeof(SDMMnodeInfos)); - dInfo("mnode infos is changed, nodeNum:%d inUse:%d", tsDMnodeInfos.nodeNum, tsDMnodeInfos.inUse); - for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { - dInfo("mnode index:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp); - } - - tsDMnodeEpSet.inUse = tsDMnodeInfos.inUse; - tsDMnodeEpSet.numOfEps = tsDMnodeInfos.nodeNum; - for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { - taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeEpSet.fqdn[i], &tsDMnodeEpSet.port[i]); - } - - dnodeSaveMnodeInfos(); - sdbUpdateAsync(); -} - -static bool dnodeReadMnodeInfos() { - char ipFile[TSDB_FILENAME_LEN*2] = {0}; - - sprintf(ipFile, "%s/mnodeEpSet.json", tsDnodeDir); - FILE *fp = fopen(ipFile, "r"); - if (!fp) { - dDebug("failed to read mnodeEpSet.json, file not exist"); - return false; - } - - bool ret = false; - int maxLen = 2000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - dError("failed to read mnodeEpSet.json, content is null"); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read mnodeEpSet.json, invalid json format"); - goto PARSE_OVER; - } - - cJSON* inUse = cJSON_GetObjectItem(root, "inUse"); - if (!inUse || inUse->type != cJSON_Number) { - dError("failed to read mnodeEpSet.json, inUse not found"); - goto PARSE_OVER; - } - tsDMnodeInfos.inUse = inUse->valueint; - - cJSON* nodeNum = cJSON_GetObjectItem(root, "nodeNum"); - if (!nodeNum || nodeNum->type != cJSON_Number) { - dError("failed to read mnodeEpSet.json, nodeNum not found"); - goto PARSE_OVER; - } - tsDMnodeInfos.nodeNum = nodeNum->valueint; - - cJSON* nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); - if (!nodeInfos || nodeInfos->type != cJSON_Array) { - dError("failed to read mnodeEpSet.json, nodeInfos not found"); - goto PARSE_OVER; - } - - int size = cJSON_GetArraySize(nodeInfos); - if (size != tsDMnodeInfos.nodeNum) { - dError("failed to read mnodeEpSet.json, nodeInfos size not matched"); - goto PARSE_OVER; - } - - for (int i = 0; i < size; ++i) { - cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); - if (nodeInfo == NULL) continue; - - cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); - if (!nodeId || nodeId->type != cJSON_Number) { - dError("failed to read mnodeEpSet.json, nodeId not found"); - goto PARSE_OVER; - } - tsDMnodeInfos.nodeInfos[i].nodeId = nodeId->valueint; - - cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp"); - if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) { - dError("failed to read mnodeEpSet.json, nodeName not found"); - goto PARSE_OVER; - } - strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN); - } - - ret = true; - - dInfo("read mnode epSet successed, numOfEps:%d inUse:%d", tsDMnodeInfos.nodeNum, tsDMnodeInfos.inUse); - for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { - dInfo("mnode:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp); - } - -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; -} - -static void dnodeSaveMnodeInfos() { - char ipFile[TSDB_FILENAME_LEN] = {0}; - sprintf(ipFile, "%s/mnodeEpSet.json", tsDnodeDir); - FILE *fp = fopen(ipFile, "w"); - if (!fp) return; - - int32_t len = 0; - int32_t maxLen = 2000; - char * content = calloc(1, maxLen + 1); - - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"inUse\": %d,\n", tsDMnodeInfos.inUse); - len += snprintf(content + len, maxLen - len, " \"nodeNum\": %d,\n", tsDMnodeInfos.nodeNum); - len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); - for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { - len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", tsDMnodeInfos.nodeInfos[i].nodeId); - len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", tsDMnodeInfos.nodeInfos[i].nodeEp); - if (i < tsDMnodeInfos.nodeNum -1) { - len += snprintf(content + len, maxLen - len, " },{\n"); - } else { - len += snprintf(content + len, maxLen - len, " }]\n"); - } - } - len += snprintf(content + len, maxLen - len, "}\n"); - - fwrite(content, 1, len, fp); - fflush(fp); - fclose(fp); - free(content); - - dInfo("save mnode epSet successed"); -} - -char *dnodeGetMnodeMasterEp() { - return tsDMnodeInfos.nodeInfos[tsDMnodeEpSet.inUse].nodeEp; -} - -void* dnodeGetMnodeInfos() { - return &tsDMnodeInfos; -} - static void dnodeSendStatusMsg(void *handle, void *tmrId) { if (tsDnodeTmr == NULL) { dError("dnode timer is already released"); @@ -724,22 +501,21 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) { return; } - int32_t contLen = sizeof(SDMStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad); - SDMStatusMsg *pStatus = rpcMallocCont(contLen); + int32_t contLen = sizeof(SStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad); + SStatusMsg *pStatus = rpcMallocCont(contLen); if (pStatus == NULL) { taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); dError("failed to malloc status message"); return; } - //strcpy(pStatus->dnodeName, tsDnodeName); + dnodeGetCfg(&pStatus->dnodeId, pStatus->clusterId); + pStatus->dnodeId = htonl(dnodeGetDnodeId()); pStatus->version = htonl(tsVersion); - pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId); pStatus->lastReboot = htonl(tsRebootTime); pStatus->numOfCores = htons((uint16_t) tsNumOfCores); pStatus->diskAvailable = tsAvailDataDirGB; pStatus->alternativeRole = (uint8_t) tsAlternativeRole; - tstrncpy(pStatus->clusterId, tsDnodeCfg.clusterId, TSDB_CLUSTER_ID_LEN); tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN); // fill cluster cfg parameters @@ -759,7 +535,7 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) { tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN); vnodeBuildStatusMsg(pStatus); - contLen = sizeof(SDMStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad); + contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad); pStatus->openVnodes = htons(pStatus->openVnodes); SRpcMsg rpcMsg = { @@ -769,110 +545,19 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) { }; SRpcEpSet epSet; - dnodeGetMnodeEpSetForPeer(&epSet); + dnodeGetEpSetForPeer(&epSet); dnodeSendMsgToDnode(&epSet, &rpcMsg); } -static bool dnodeReadDnodeCfg() { - char dnodeCfgFile[TSDB_FILENAME_LEN*2] = {0}; - - sprintf(dnodeCfgFile, "%s/dnodeCfg.json", tsDnodeDir); - - FILE *fp = fopen(dnodeCfgFile, "r"); - if (!fp) { - dDebug("failed to read dnodeCfg.json, file not exist"); - return false; - } - - bool ret = false; - int maxLen = 100; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - dError("failed to read dnodeCfg.json, content is null"); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read dnodeCfg.json, invalid json format"); - goto PARSE_CFG_OVER; - } - - cJSON* dnodeId = cJSON_GetObjectItem(root, "dnodeId"); - if (!dnodeId || dnodeId->type != cJSON_Number) { - dError("failed to read dnodeCfg.json, dnodeId not found"); - goto PARSE_CFG_OVER; - } - tsDnodeCfg.dnodeId = dnodeId->valueint; - - cJSON* clusterId = cJSON_GetObjectItem(root, "clusterId"); - if (!clusterId || clusterId->type != cJSON_String) { - dError("failed to read dnodeCfg.json, clusterId not found"); - goto PARSE_CFG_OVER; - } - tstrncpy(tsDnodeCfg.clusterId, clusterId->valuestring, TSDB_CLUSTER_ID_LEN); - - ret = true; - - dInfo("read numOfVnodes successed, dnodeId:%d", tsDnodeCfg.dnodeId); - -PARSE_CFG_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; -} - -static void dnodeSaveDnodeCfg() { - char dnodeCfgFile[TSDB_FILENAME_LEN] = {0}; - sprintf(dnodeCfgFile, "%s/dnodeCfg.json", tsDnodeDir); - - FILE *fp = fopen(dnodeCfgFile, "w"); - if (!fp) return; - - int32_t len = 0; - int32_t maxLen = 200; - char * content = calloc(1, maxLen + 1); - - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsDnodeCfg.dnodeId); - len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsDnodeCfg.clusterId); - len += snprintf(content + len, maxLen - len, "}\n"); - - fwrite(content, 1, len, fp); - fflush(fp); - fclose(fp); - free(content); - - dInfo("save dnodeId successed"); -} - -void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg) { - if (tsDnodeCfg.dnodeId == 0) { - dInfo("dnodeId is set to %d, clusterId is set to %s", pCfg->dnodeId, pCfg->clusterId); - tsDnodeCfg.dnodeId = pCfg->dnodeId; - tstrncpy(tsDnodeCfg.clusterId, pCfg->clusterId, TSDB_CLUSTER_ID_LEN); - dnodeSaveDnodeCfg(); - } -} - -int32_t dnodeGetDnodeId() { - return tsDnodeCfg.dnodeId; -} - void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) { SRpcConnInfo connInfo = {0}; rpcGetConnInfo(rpcMsg->handle, &connInfo); SRpcEpSet epSet = {0}; if (forShell) { - dnodeGetMnodeEpSetForShell(&epSet); + dnodeGetEpSetForShell(&epSet); } else { - dnodeGetMnodeEpSetForPeer(&epSet); + dnodeGetEpSetForPeer(&epSet); } dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfEps:%d inUse:%d", taosMsg[rpcMsg->msgType], diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index 46376159c6782efde7adbd19c75af83aa4cde397..bd9500ba51226d138fd3fe52f027d144d289681b 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -114,6 +114,7 @@ int32_t dnodeInitModules() { } } + dInfo("dnode modules is initialized"); return 0; } @@ -146,8 +147,8 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) { } } -bool dnodeStartMnode(void *pMnodes) { - SDMMnodeInfos *mnodes = pMnodes; +bool dnodeStartMnode(SMnodeInfos *minfos) { + SMnodeInfos *mnodes = minfos; if (tsModuleStatus & (1 << TSDB_MOD_MNODE)) { dDebug("mnode module is already started, module status:%d", tsModuleStatus); diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index 3bc2f7b48b319f3c9e6215a05463ebedc74035fa..4c44924cd0b79bae5b0c83f9ddad8ae30afcf292 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -19,6 +19,7 @@ * to dnode. All theses messages are handled from here */ +#define _DEFAULT_SOURCE #include "os.h" #include "taosmsg.h" #include "tglobal.h" @@ -28,20 +29,20 @@ #include "dnodeMgmt.h" #include "dnodeVWrite.h" #include "dnodeMPeer.h" +#include "dnodeMInfos.h" -extern void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet); static void (*dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *); static void (*dnodeProcessRspMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *rpcMsg); static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet); -static void *tsDnodeServerRpc = NULL; -static void *tsDnodeClientRpc = NULL; +static void *tsServerRpc = NULL; +static void *tsClientRpc = NULL; int32_t dnodeInitServer() { - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = dnodeDispatchToVnodeWriteQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = dnodeDispatchToVnodeWriteQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVnodeWriteQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVnodeWriteQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = dnodeDispatchToVWriteQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = dnodeDispatchToVWriteQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVWriteQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVWriteQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeDispatchToMgmtQueue; @@ -50,11 +51,11 @@ int32_t dnodeInitServer() { dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMnodePeerQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMnodePeerQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_AUTH] = dnodeDispatchToMnodePeerQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMnodePeerQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMnodePeerQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMPeerQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMPeerQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_AUTH] = dnodeDispatchToMPeerQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue; SRpcInit rpcInit; memset(&rpcInit, 0, sizeof(rpcInit)); @@ -66,20 +67,20 @@ int32_t dnodeInitServer() { rpcInit.connType = TAOS_CONN_SERVER; rpcInit.idleTime = tsShellActivityTimer * 1000; - tsDnodeServerRpc = rpcOpen(&rpcInit); - if (tsDnodeServerRpc == NULL) { + tsServerRpc = rpcOpen(&rpcInit); + if (tsServerRpc == NULL) { dError("failed to init inter-dnodes RPC server"); return -1; } - dInfo("inter-dnodes RPC server is opened"); + dInfo("dnode inter-dnodes RPC server is initialized"); return 0; } void dnodeCleanupServer() { - if (tsDnodeServerRpc) { - rpcClose(tsDnodeServerRpc); - tsDnodeServerRpc = NULL; + if (tsServerRpc) { + rpcClose(tsServerRpc); + tsServerRpc = NULL; dInfo("inter-dnodes RPC server is closed"); } } @@ -93,7 +94,7 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pMsg->pCont == NULL) return; - if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) { + if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) { rspMsg.code = TSDB_CODE_APP_NOT_READY; rpcSendResponse(&rspMsg); rpcFreeCont(pMsg->pCont); @@ -131,27 +132,34 @@ int32_t dnodeInitClient() { rpcInit.ckey = "key"; rpcInit.secret = secret; - tsDnodeClientRpc = rpcOpen(&rpcInit); - if (tsDnodeClientRpc == NULL) { + tsClientRpc = rpcOpen(&rpcInit); + if (tsClientRpc == NULL) { dError("failed to init mnode rpc client"); return -1; } - dInfo("inter-dnodes rpc client is opened"); + dInfo("dnode inter-dnodes rpc client is initialized"); return 0; } void dnodeCleanupClient() { - if (tsDnodeClientRpc) { - rpcClose(tsDnodeClientRpc); - tsDnodeClientRpc = NULL; - dInfo("inter-dnodes rpc client is closed"); + if (tsClientRpc) { + rpcClose(tsClientRpc); + tsClientRpc = NULL; + dInfo("dnode inter-dnodes rpc client is closed"); } } static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { + if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) { + if (pMsg == NULL || pMsg->pCont == NULL) return; + dDebug("msg:%p is ignored since dnode not running", pMsg); + rpcFreeCont(pMsg->pCont); + return; + } + if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) { - dnodeUpdateMnodeEpSetForPeer(pEpSet); + dnodeUpdateEpSetForPeer(pEpSet); } if (dnodeProcessRspMsgFp[pMsg->msgType]) { @@ -168,15 +176,15 @@ void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)) { } void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg) { - rpcSendRequest(tsDnodeClientRpc, epSet, rpcMsg); + rpcSendRequest(tsClientRpc, epSet, rpcMsg, NULL); } void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) { SRpcEpSet epSet = {0}; - dnodeGetMnodeEpSetForPeer(&epSet); - rpcSendRecv(tsDnodeClientRpc, &epSet, rpcMsg, rpcRsp); + dnodeGetEpSetForPeer(&epSet); + rpcSendRecv(tsClientRpc, &epSet, rpcMsg, rpcRsp); } void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet) { - rpcSendRecv(tsDnodeClientRpc, epSet, rpcMsg, rpcRsp); -} \ No newline at end of file + rpcSendRecv(tsClientRpc, epSet, rpcMsg, rpcRsp); +} diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 4c6c2100e048e9aaf23f4155d3c54604104a8f9b..89f657f78986b8e57c0b5e1dedb841c451db00ba 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -33,46 +33,46 @@ static void (*dnodeProcessShellMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *); static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey); -static void * tsDnodeShellRpc = NULL; -static int32_t tsDnodeQueryReqNum = 0; -static int32_t tsDnodeSubmitReqNum = 0; +static void * tsShellRpc = NULL; +static int32_t tsQueryReqNum = 0; +static int32_t tsSubmitReqNum = 0; int32_t dnodeInitShell() { - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_SUBMIT] = dnodeDispatchToVnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_QUERY] = dnodeDispatchToVnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_FETCH] = dnodeDispatchToVnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = dnodeDispatchToVnodeWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_SUBMIT] = dnodeDispatchToVWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_QUERY] = dnodeDispatchToVReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_FETCH] = dnodeDispatchToVReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = dnodeDispatchToVWriteQueue; // the following message shall be treated as mnode write - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE]= dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE]= dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TABLE] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TABLE] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_STREAM]= dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_QUERY] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = dnodeDispatchToMnodeWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE]= dnodeDispatchToMnodeWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE]= dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE]= dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TABLE] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TABLE] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_STREAM]= dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_QUERY] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE]= dnodeDispatchToMWriteQueue; // the following message shall be treated as mnode query - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMnodeReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMnodeReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue; int32_t numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore; numOfThreads = (int32_t) ((1.0 - tsRatioOfQueryThreads) * numOfThreads / 2.0); @@ -91,24 +91,24 @@ int32_t dnodeInitShell() { rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.afp = dnodeRetrieveUserAuthInfo; - tsDnodeShellRpc = rpcOpen(&rpcInit); - if (tsDnodeShellRpc == NULL) { + tsShellRpc = rpcOpen(&rpcInit); + if (tsShellRpc == NULL) { dError("failed to init shell rpc server"); return -1; } - dInfo("shell rpc server is opened"); + dInfo("dnode shell rpc server is initialized"); return 0; } void dnodeCleanupShell() { - if (tsDnodeShellRpc) { - rpcClose(tsDnodeShellRpc); - tsDnodeShellRpc = NULL; + if (tsShellRpc) { + rpcClose(tsShellRpc); + tsShellRpc = NULL; } } -void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { +static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { SRpcMsg rpcMsg = { .handle = pMsg->handle, .pCont = NULL, @@ -117,7 +117,7 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pMsg->pCont == NULL) return; - if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) { + if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) { dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); rpcMsg.code = TSDB_CODE_APP_NOT_READY; rpcSendResponse(&rpcMsg); @@ -126,9 +126,9 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { } if (pMsg->msgType == TSDB_MSG_TYPE_QUERY) { - atomic_fetch_add_32(&tsDnodeQueryReqNum, 1); + atomic_fetch_add_32(&tsQueryReqNum, 1); } else if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { - atomic_fetch_add_32(&tsDnodeSubmitReqNum, 1); + atomic_fetch_add_32(&tsSubmitReqNum, 1); } else {} if ( dnodeProcessShellMsgFp[pMsg->msgType] ) { @@ -146,12 +146,12 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char int code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey); if (code != TSDB_CODE_APP_NOT_READY) return code; - SDMAuthMsg *pMsg = rpcMallocCont(sizeof(SDMAuthMsg)); + SAuthMsg *pMsg = rpcMallocCont(sizeof(SAuthMsg)); tstrncpy(pMsg->user, user, sizeof(pMsg->user)); SRpcMsg rpcMsg = {0}; rpcMsg.pCont = pMsg; - rpcMsg.contLen = sizeof(SDMAuthMsg); + rpcMsg.contLen = sizeof(SAuthMsg); rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; dDebug("user:%s, send auth msg to mnodes", user); @@ -161,7 +161,7 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char if (rpcRsp.code != 0) { dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code)); } else { - SDMAuthRsp *pRsp = rpcRsp.pCont; + SAuthRsp *pRsp = rpcRsp.pCont; dDebug("user:%s, auth msg received from mnodes", user); memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); @@ -176,8 +176,8 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { dDebug("vgId:%d, tid:%d send config table msg to mnode", vgId, tid); - int32_t contLen = sizeof(SDMConfigTableMsg); - SDMConfigTableMsg *pMsg = rpcMallocCont(contLen); + int32_t contLen = sizeof(SConfigTableMsg); + SConfigTableMsg *pMsg = rpcMallocCont(contLen); pMsg->dnodeId = htonl(dnodeGetDnodeId()); pMsg->vgId = htonl(vgId); @@ -211,12 +211,12 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { } } -SDnodeStatisInfo dnodeGetStatisInfo() { - SDnodeStatisInfo info = {0}; - if (dnodeGetRunStatus() == TSDB_DNODE_RUN_STATUS_RUNING) { +SStatisInfo dnodeGetStatisInfo() { + SStatisInfo info = {0}; + if (dnodeGetRunStatus() == TSDB_RUN_STATUS_RUNING) { info.httpReqNum = httpGetReqCount(); - info.queryReqNum = atomic_exchange_32(&tsDnodeQueryReqNum, 0); - info.submitReqNum = atomic_exchange_32(&tsDnodeSubmitReqNum, 0); + info.queryReqNum = atomic_exchange_32(&tsQueryReqNum, 0); + info.submitReqNum = atomic_exchange_32(&tsSubmitReqNum, 0); } return info; diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 4fdc0b8a73b58efa801f33ea99ffb7d123a72dcf..e973f9901f19b7aa6f4d4958d88e01ba258a82bb 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -268,7 +268,7 @@ static void dnodeGetEmail(char* filepath) { return; } - if (taosTRead(fd, (void *)tsEmail, TSDB_FQDN_LEN) < 0) { + if (taosRead(fd, (void *)tsEmail, TSDB_FQDN_LEN) < 0) { dError("failed to read %d bytes from file %s since %s", TSDB_FQDN_LEN, filepath, strerror(errno)); } @@ -299,6 +299,7 @@ int32_t dnodeInitTelemetry() { dTrace("failed to create telemetry thread, reason:%s", strerror(errno)); } + dInfo("dnode telemetry is initialized"); return 0; } diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index e61158ef30dddd6037af36c61be1fd522f8af10b..4cce54bf59eecbef6a7056e005819d456178279a 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -17,225 +17,182 @@ #include "os.h" #include "taoserror.h" #include "taosmsg.h" -#include "tutil.h" -#include "tqueue.h" -#include "twal.h" #include "tglobal.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" -#include "dnodeVRead.h" +#include "tqueue.h" #include "vnode.h" +#include "dnodeInt.h" typedef struct { - pthread_t thread; // thread - int32_t workerId; // worker ID -} SReadWorker; + pthread_t thread; // thread + int32_t workerId; // worker ID +} SVReadWorker; typedef struct { - int32_t max; // max number of workers - int32_t min; // min number of workers - int32_t num; // current number of workers - SReadWorker *readWorker; + int32_t max; // max number of workers + int32_t min; // min number of workers + int32_t num; // current number of workers + SVReadWorker * worker; pthread_mutex_t mutex; -} SReadWorkerPool; +} SVReadWorkerPool; -static void *dnodeProcessReadQueue(void *param); -static void dnodeHandleIdleReadWorker(SReadWorker *); +static void *dnodeProcessReadQueue(void *pWorker); // module global variable -static SReadWorkerPool readPool; -static taos_qset readQset; +static SVReadWorkerPool tsVReadWP; +static taos_qset tsVReadQset; -int32_t dnodeInitVnodeRead() { - readQset = taosOpenQset(); +int32_t dnodeInitVRead() { + tsVReadQset = taosOpenQset(); - readPool.min = tsNumOfCores; - readPool.max = tsNumOfCores * tsNumOfThreadsPerCore; - if (readPool.max <= readPool.min * 2) readPool.max = 2 * readPool.min; - readPool.readWorker = (SReadWorker *)calloc(sizeof(SReadWorker), readPool.max); - pthread_mutex_init(&readPool.mutex, NULL); + tsVReadWP.min = tsNumOfCores; + tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore; + if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min; + tsVReadWP.worker = calloc(sizeof(SVReadWorker), tsVReadWP.max); + pthread_mutex_init(&tsVReadWP.mutex, NULL); - if (readPool.readWorker == NULL) return -1; - for (int i = 0; i < readPool.max; ++i) { - SReadWorker *pWorker = readPool.readWorker + i; + if (tsVReadWP.worker == NULL) return -1; + for (int i = 0; i < tsVReadWP.max; ++i) { + SVReadWorker *pWorker = tsVReadWP.worker + i; pWorker->workerId = i; } - dInfo("dnode read is opened, min worker:%d max worker:%d", readPool.min, readPool.max); + dInfo("dnode vread is initialized, min worker:%d max worker:%d", tsVReadWP.min, tsVReadWP.max); return 0; } -void dnodeCleanupVnodeRead() { - for (int i = 0; i < readPool.max; ++i) { - SReadWorker *pWorker = readPool.readWorker + i; +void dnodeCleanupVRead() { + for (int i = 0; i < tsVReadWP.max; ++i) { + SVReadWorker *pWorker = tsVReadWP.worker + i; if (pWorker->thread) { - taosQsetThreadResume(readQset); + taosQsetThreadResume(tsVReadQset); } } - for (int i = 0; i < readPool.max; ++i) { - SReadWorker *pWorker = readPool.readWorker + i; + for (int i = 0; i < tsVReadWP.max; ++i) { + SVReadWorker *pWorker = tsVReadWP.worker + i; if (pWorker->thread) { pthread_join(pWorker->thread, NULL); } } - free(readPool.readWorker); - taosCloseQset(readQset); - pthread_mutex_destroy(&readPool.mutex); + free(tsVReadWP.worker); + taosCloseQset(tsVReadQset); + pthread_mutex_destroy(&tsVReadWP.mutex); - dInfo("dnode read is closed"); + dInfo("dnode vread is closed"); } -void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) { - int32_t queuedMsgNum = 0; - int32_t leftLen = pMsg->contLen; - char *pCont = (char *) pMsg->pCont; +void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { + int32_t queuedMsgNum = 0; + int32_t leftLen = pMsg->contLen; + char * pCont = pMsg->pCont; while (leftLen > 0) { - SMsgHead *pHead = (SMsgHead *) pCont; - pHead->vgId = htonl(pHead->vgId); + SMsgHead *pHead = (SMsgHead *)pCont; + pHead->vgId = htonl(pHead->vgId); pHead->contLen = htonl(pHead->contLen); - taos_queue queue = vnodeAcquireRqueue(pHead->vgId); - - if (queue == NULL) { - leftLen -= pHead->contLen; - pCont -= pHead->contLen; - continue; + void *pVnode = vnodeAcquire(pHead->vgId); + if (pVnode != NULL) { + int32_t code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg); + if (code == TSDB_CODE_SUCCESS) queuedMsgNum++; + vnodeRelease(pVnode); } - // put message into queue - SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); - pRead->rpcMsg = *pMsg; - pRead->pCont = pCont; - pRead->contLen = pHead->contLen; - - // next vnode leftLen -= pHead->contLen; pCont -= pHead->contLen; - queuedMsgNum++; - - taosWriteQitem(queue, TAOS_QTYPE_RPC, pRead); } if (queuedMsgNum == 0) { - SRpcMsg rpcRsp = { - .handle = pMsg->handle, - .pCont = NULL, - .contLen = 0, - .code = TSDB_CODE_VND_INVALID_VGROUP_ID, - .msgType = 0 - }; + SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = TSDB_CODE_VND_INVALID_VGROUP_ID}; rpcSendResponse(&rpcRsp); - rpcFreeCont(pMsg->pCont); } + + rpcFreeCont(pMsg->pCont); } -void *dnodeAllocateVnodeRqueue(void *pVnode) { - pthread_mutex_lock(&readPool.mutex); +void *dnodeAllocVReadQueue(void *pVnode) { + pthread_mutex_lock(&tsVReadWP.mutex); taos_queue queue = taosOpenQueue(); if (queue == NULL) { - pthread_mutex_unlock(&readPool.mutex); + pthread_mutex_unlock(&tsVReadWP.mutex); return NULL; } - taosAddIntoQset(readQset, queue, pVnode); + taosAddIntoQset(tsVReadQset, queue, pVnode); // spawn a thread to process queue - if (readPool.num < readPool.max) { + if (tsVReadWP.num < tsVReadWP.max) { do { - SReadWorker *pWorker = readPool.readWorker + readPool.num; + SVReadWorker *pWorker = tsVReadWP.worker + tsVReadWP.num; pthread_attr_t thAttr; pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessReadQueue, pWorker) != 0) { - dError("failed to create thread to process read queue, reason:%s", strerror(errno)); + dError("failed to create thread to process vread vqueue since %s", strerror(errno)); } pthread_attr_destroy(&thAttr); - readPool.num++; - dDebug("read worker:%d is launched, total:%d", pWorker->workerId, readPool.num); - } while (readPool.num < readPool.min); + tsVReadWP.num++; + dDebug("dnode vread worker:%d is launched, total:%d", pWorker->workerId, tsVReadWP.num); + } while (tsVReadWP.num < tsVReadWP.min); } - pthread_mutex_unlock(&readPool.mutex); - dDebug("pVnode:%p, read queue:%p is allocated", pVnode, queue); + pthread_mutex_unlock(&tsVReadWP.mutex); + dDebug("pVnode:%p, dnode vread queue:%p is allocated", pVnode, queue); return queue; } -void dnodeFreeVnodeRqueue(void *rqueue) { - taosCloseQueue(rqueue); - - // dynamically adjust the number of threads +void dnodeFreeVReadQueue(void *pRqueue) { + taosCloseQueue(pRqueue); } -void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) { +void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { SRpcMsg rpcRsp = { - .handle = pRead->rpcMsg.handle, + .handle = pRead->rpcHandle, .pCont = pRead->rspRet.rsp, .contLen = pRead->rspRet.len, .code = code, }; rpcSendResponse(&rpcRsp); - rpcFreeCont(pRead->rpcMsg.pCont); - vnodeRelease(pVnode); } -void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) { - rpcFreeCont(pRead->rpcMsg.pCont); - vnodeRelease(pVnode); +void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) { } -static void *dnodeProcessReadQueue(void *param) { - SReadMsg *pReadMsg; - int type; - void *pVnode; +static void *dnodeProcessReadQueue(void *pWorker) { + SVReadMsg *pRead; + int32_t qtype; + void * pVnode; while (1) { - if (taosReadQitemFromQset(readQset, &type, (void **)&pReadMsg, &pVnode) == 0) { - dDebug("qset:%p dnode read got no message from qset, exiting", readQset); + if (taosReadQitemFromQset(tsVReadQset, &qtype, (void **)&pRead, &pVnode) == 0) { + dDebug("qset:%p dnode vread got no message from qset, exiting", tsVReadQset); break; } - dDebug("%p, msg:%s will be processed in vread queue, qtype:%d, msg:%p", pReadMsg->rpcMsg.ahandle, - taosMsg[pReadMsg->rpcMsg.msgType], type, pReadMsg); + dDebug("msg:%p, app:%p type:%s will be processed in vread queue, qtype:%d", pRead, pRead->rpcAhandle, + taosMsg[pRead->msgType], qtype); - int32_t code = vnodeProcessRead(pVnode, pReadMsg); + int32_t code = vnodeProcessRead(pVnode, pRead); - if (type == TAOS_QTYPE_RPC && code != TSDB_CODE_QRY_NOT_READY) { - dnodeSendRpcReadRsp(pVnode, pReadMsg, code); + if (qtype == TAOS_QTYPE_RPC && code != TSDB_CODE_QRY_NOT_READY) { + dnodeSendRpcVReadRsp(pVnode, pRead, code); } else { if (code == TSDB_CODE_QRY_HAS_RSP) { - dnodeSendRpcReadRsp(pVnode, pReadMsg, pReadMsg->rpcMsg.code); - } else { // code == TSDB_CODE_QRY_NOT_READY, do not return msg to client - assert(pReadMsg->rpcMsg.handle == NULL || (pReadMsg->rpcMsg.handle != NULL && pReadMsg->rpcMsg.msgType == 5)); - dnodeDispatchNonRspMsg(pVnode, pReadMsg, code); + dnodeSendRpcVReadRsp(pVnode, pRead, pRead->code); + } else { // code == TSDB_CODE_QRY_NOT_READY, do not return msg to client + assert(pRead->rpcHandle == NULL || (pRead->rpcHandle != NULL && pRead->msgType == 5)); + dnodeDispatchNonRspMsg(pVnode, pRead, code); } } - taosFreeQitem(pReadMsg); + vnodeFreeFromRQueue(pVnode, pRead); } return NULL; } - - -UNUSED_FUNC -static void dnodeHandleIdleReadWorker(SReadWorker *pWorker) { - int32_t num = taosGetQueueNumber(readQset); - - if (num == 0 || (num <= readPool.min && readPool.num > readPool.min)) { - readPool.num--; - dDebug("read worker:%d is released, total:%d", pWorker->workerId, readPool.num); - pthread_exit(NULL); - } else { - usleep(30000); - sched_yield(); - } -} - diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index f2740bf6b810197283926a602db4fc423067e231..959eb3c9c532c249a04a67ae2d8b34992e9bcdc6 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -15,74 +15,55 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taosmsg.h" #include "taoserror.h" -#include "tutil.h" +#include "taosmsg.h" +#include "tglobal.h" #include "tqueue.h" -#include "trpc.h" -#include "tsdb.h" #include "twal.h" -#include "tdataformat.h" -#include "tglobal.h" -#include "tsync.h" #include "vnode.h" #include "dnodeInt.h" -#include "syncInt.h" -#include "dnodeVWrite.h" -#include "dnodeMgmt.h" - -typedef struct { - taos_qall qall; - taos_qset qset; // queue set - pthread_t thread; // thread - int32_t workerId; // worker ID -} SWriteWorker; typedef struct { - SRspRet rspRet; - int32_t processedCount; - int32_t code; - void *pCont; - int32_t contLen; - SRpcMsg rpcMsg; -} SWriteMsg; + taos_qall qall; + taos_qset qset; // queue set + int32_t workerId; // worker ID + pthread_t thread; // thread +} SVWriteWorker; typedef struct { - int32_t max; // max number of workers - int32_t nextId; // from 0 to max-1, cyclic - SWriteWorker *writeWorker; + int32_t max; // max number of workers + int32_t nextId; // from 0 to max-1, cyclic + SVWriteWorker * worker; pthread_mutex_t mutex; -} SWriteWorkerPool; +} SVWriteWorkerPool; -static void *dnodeProcessWriteQueue(void *param); -static void dnodeHandleIdleWorker(SWriteWorker *pWorker); +static SVWriteWorkerPool tsVWriteWP; +static void *dnodeProcessVWriteQueue(void *pWorker); -SWriteWorkerPool wWorkerPool; +int32_t dnodeInitVWrite() { + tsVWriteWP.max = tsNumOfCores; + tsVWriteWP.worker = tcalloc(sizeof(SVWriteWorker), tsVWriteWP.max); + if (tsVWriteWP.worker == NULL) return -1; + pthread_mutex_init(&tsVWriteWP.mutex, NULL); -int32_t dnodeInitVnodeWrite() { - wWorkerPool.max = tsNumOfCores; - wWorkerPool.writeWorker = (SWriteWorker *)calloc(sizeof(SWriteWorker), wWorkerPool.max); - if (wWorkerPool.writeWorker == NULL) return -1; - pthread_mutex_init(&wWorkerPool.mutex, NULL); - - for (int32_t i = 0; i < wWorkerPool.max; ++i) { - wWorkerPool.writeWorker[i].workerId = i; + for (int32_t i = 0; i < tsVWriteWP.max; ++i) { + tsVWriteWP.worker[i].workerId = i; } - dInfo("dnode write is opened, max worker %d", wWorkerPool.max); + dInfo("dnode vwrite is initialized, max worker %d", tsVWriteWP.max); return 0; } -void dnodeCleanupVnodeWrite() { - for (int32_t i = 0; i < wWorkerPool.max; ++i) { - SWriteWorker *pWorker = wWorkerPool.writeWorker + i; +void dnodeCleanupVWrite() { + for (int32_t i = 0; i < tsVWriteWP.max; ++i) { + SVWriteWorker *pWorker = tsVWriteWP.worker + i; if (pWorker->thread) { taosQsetThreadResume(pWorker->qset); } } - for (int32_t i = 0; i < wWorkerPool.max; ++i) { - SWriteWorker *pWorker = wWorkerPool.writeWorker + i; + for (int32_t i = 0; i < tsVWriteWP.max; ++i) { + SVWriteWorker *pWorker = tsVWriteWP.worker + i; if (pWorker->thread) { pthread_join(pWorker->thread, NULL); taosFreeQall(pWorker->qall); @@ -90,52 +71,51 @@ void dnodeCleanupVnodeWrite() { } } - pthread_mutex_destroy(&wWorkerPool.mutex); - free(wWorkerPool.writeWorker); - dInfo("dnode write is closed"); + pthread_mutex_destroy(&tsVWriteWP.mutex); + tfree(tsVWriteWP.worker); + dInfo("dnode vwrite is closed"); } -void dnodeDispatchToVnodeWriteQueue(SRpcMsg *pMsg) { - char *pCont = (char *)pMsg->pCont; +void dnodeDispatchToVWriteQueue(SRpcMsg *pRpcMsg) { + int32_t code; + char *pCont = pRpcMsg->pCont; - if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { + if (pRpcMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { SMsgDesc *pDesc = (SMsgDesc *)pCont; pDesc->numOfVnodes = htonl(pDesc->numOfVnodes); pCont += sizeof(SMsgDesc); } - SMsgHead *pHead = (SMsgHead *) pCont; - pHead->vgId = htonl(pHead->vgId); - pHead->contLen = htonl(pHead->contLen); - - taos_queue queue = vnodeAcquireWqueue(pHead->vgId); - if (queue) { - // put message into queue - SWriteMsg *pWrite = (SWriteMsg *)taosAllocateQitem(sizeof(SWriteMsg)); - pWrite->rpcMsg = *pMsg; - pWrite->pCont = pCont; - pWrite->contLen = pHead->contLen; + SMsgHead *pMsg = (SMsgHead *)pCont; + pMsg->vgId = htonl(pMsg->vgId); + pMsg->contLen = htonl(pMsg->contLen); - taosWriteQitem(queue, TAOS_QTYPE_RPC, pWrite); + void *pVnode = vnodeAcquire(pMsg->vgId); + if (pVnode == NULL) { + code = TSDB_CODE_VND_INVALID_VGROUP_ID; } else { - SRpcMsg rpcRsp = { - .handle = pMsg->handle, - .pCont = NULL, - .contLen = 0, - .code = TSDB_CODE_VND_INVALID_VGROUP_ID, - .msgType = 0 - }; + SWalHead *pHead = (SWalHead *)(pCont - sizeof(SWalHead)); + pHead->msgType = pRpcMsg->msgType; + pHead->version = 0; + pHead->len = pMsg->contLen; + code = vnodeWriteToWQueue(pVnode, pHead, TAOS_QTYPE_RPC, pRpcMsg); + } + + if (code != TSDB_CODE_SUCCESS) { + SRpcMsg rpcRsp = {.handle = pRpcMsg->handle, .code = code}; rpcSendResponse(&rpcRsp); - rpcFreeCont(pMsg->pCont); } + + vnodeRelease(pVnode); + rpcFreeCont(pRpcMsg->pCont); } -void *dnodeAllocateVnodeWqueue(void *pVnode) { - pthread_mutex_lock(&wWorkerPool.mutex); - SWriteWorker *pWorker = wWorkerPool.writeWorker + wWorkerPool.nextId; - void *queue = taosOpenQueue(); +void *dnodeAllocVWriteQueue(void *pVnode) { + pthread_mutex_lock(&tsVWriteWP.mutex); + SVWriteWorker *pWorker = tsVWriteWP.worker + tsVWriteWP.nextId; + taos_queue *queue = taosOpenQueue(); if (queue == NULL) { - pthread_mutex_unlock(&wWorkerPool.mutex); + pthread_mutex_unlock(&tsVWriteWP.mutex); return NULL; } @@ -143,7 +123,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode) { pWorker->qset = taosOpenQset(); if (pWorker->qset == NULL) { taosCloseQueue(queue); - pthread_mutex_unlock(&wWorkerPool.mutex); + pthread_mutex_unlock(&tsVWriteWP.mutex); return NULL; } @@ -152,45 +132,43 @@ void *dnodeAllocateVnodeWqueue(void *pVnode) { if (pWorker->qall == NULL) { taosCloseQset(pWorker->qset); taosCloseQueue(queue); - pthread_mutex_unlock(&wWorkerPool.mutex); + pthread_mutex_unlock(&tsVWriteWP.mutex); return NULL; } pthread_attr_t thAttr; pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessWriteQueue, pWorker) != 0) { - dError("failed to create thread to process read queue, reason:%s", strerror(errno)); + if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessVWriteQueue, pWorker) != 0) { + dError("failed to create thread to process vwrite queue since %s", strerror(errno)); taosFreeQall(pWorker->qall); taosCloseQset(pWorker->qset); taosCloseQueue(queue); queue = NULL; } else { - dDebug("write worker:%d is launched", pWorker->workerId); - wWorkerPool.nextId = (wWorkerPool.nextId + 1) % wWorkerPool.max; + dDebug("dnode vwrite worker:%d is launched", pWorker->workerId); + tsVWriteWP.nextId = (tsVWriteWP.nextId + 1) % tsVWriteWP.max; } pthread_attr_destroy(&thAttr); } else { taosAddIntoQset(pWorker->qset, queue, pVnode); - wWorkerPool.nextId = (wWorkerPool.nextId + 1) % wWorkerPool.max; + tsVWriteWP.nextId = (tsVWriteWP.nextId + 1) % tsVWriteWP.max; } - pthread_mutex_unlock(&wWorkerPool.mutex); - dDebug("pVnode:%p, write queue:%p is allocated", pVnode, queue); + pthread_mutex_unlock(&tsVWriteWP.mutex); + dDebug("pVnode:%p, dnode vwrite queue:%p is allocated", pVnode, queue); return queue; } -void dnodeFreeVnodeWqueue(void *wqueue) { - taosCloseQueue(wqueue); - - // dynamically adjust the number of threads +void dnodeFreeVWriteQueue(void *pWqueue) { + taosCloseQueue(pWqueue); } -void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code) { - SWriteMsg *pWrite = (SWriteMsg *)param; - if (pWrite == NULL) return; +void dnodeSendRpcVWriteRsp(void *pVnode, void *wparam, int32_t code) { + if (wparam == NULL) return; + SVWriteMsg *pWrite = wparam; if (code < 0) pWrite->code = code; int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1); @@ -198,106 +176,64 @@ void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code) { if (count <= 1) return; SRpcMsg rpcRsp = { - .handle = pWrite->rpcMsg.handle, + .handle = pWrite->rpcHandle, .pCont = pWrite->rspRet.rsp, .contLen = pWrite->rspRet.len, .code = pWrite->code, }; rpcSendResponse(&rpcRsp); - rpcFreeCont(pWrite->rpcMsg.pCont); - taosFreeQitem(pWrite); - - vnodeRelease(pVnode); + vnodeFreeFromWQueue(pVnode, pWrite); } -static void *dnodeProcessWriteQueue(void *param) { - SWriteWorker *pWorker = (SWriteWorker *)param; - SWriteMsg * pWrite; - SWalHead * pHead; - int32_t numOfMsgs; - int type; - void * pVnode, *item; - SRspRet * pRspRet; +static void *dnodeProcessVWriteQueue(void *wparam) { + SVWriteWorker *pWorker = wparam; + SVWriteMsg * pWrite; + void * pVnode; + int32_t numOfMsgs; + int32_t qtype; - dDebug("write worker:%d is running", pWorker->workerId); + dDebug("dnode vwrite worker:%d is running", pWorker->workerId); while (1) { numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode); if (numOfMsgs == 0) { - dDebug("qset:%p, dnode write got no message from qset, exiting", pWorker->qset); + dDebug("qset:%p, dnode vwrite got no message from qset, exiting", pWorker->qset); break; } + bool forceFsync = false; for (int32_t i = 0; i < numOfMsgs; ++i) { - pWrite = NULL; - pRspRet = NULL; - taosGetQitem(pWorker->qall, &type, &item); - if (type == TAOS_QTYPE_RPC) { - pWrite = (SWriteMsg *)item; - pRspRet = &pWrite->rspRet; - pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead)); - pHead->msgType = pWrite->rpcMsg.msgType; - pHead->version = 0; - pHead->len = pWrite->contLen; - dDebug("%p, rpc msg:%s will be processed in vwrite queue", pWrite->rpcMsg.ahandle, - taosMsg[pWrite->rpcMsg.msgType]); - } else if (type == TAOS_QTYPE_CQ) { - pHead = (SWalHead *)((char*)item + sizeof(SSyncHead)); - dTrace("%p, CQ wal msg:%s will be processed in vwrite queue, version:%" PRIu64, pHead, taosMsg[pHead->msgType], - pHead->version); - } else { - pHead = (SWalHead *)item; - dTrace("%p, wal msg:%s will be processed in vwrite queue, version:%" PRIu64, pHead, taosMsg[pHead->msgType], - pHead->version); - } + taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite); + dTrace("msg:%p, app:%p type:%s will be processed in vwrite queue, qtype:%s hver:%" PRIu64, pWrite, + pWrite->rpcAhandle, taosMsg[pWrite->pHead->msgType], qtypeStr[qtype], pWrite->pHead->version); - int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet); - dTrace("%p, msg:%s is processed in vwrite queue, version:%" PRIu64 ", result:%s", pHead, taosMsg[pHead->msgType], - pHead->version, tstrerror(code)); + pWrite->code = vnodeProcessWrite(pVnode, pWrite->pHead, qtype, &pWrite->rspRet); + if (pWrite->code <= 0) pWrite->processedCount = 1; + if (pWrite->code == 0 && pWrite->pHead->msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; - if (pWrite) { - pWrite->rpcMsg.code = code; - if (code <= 0) pWrite->processedCount = 1; - } + dTrace("msg:%p is processed in vwrite queue, result:%s", pWrite, tstrerror(pWrite->code)); } - walFsync(vnodeGetWal(pVnode)); + walFsync(vnodeGetWal(pVnode), forceFsync); // browse all items, and process them one by one taosResetQitems(pWorker->qall); for (int32_t i = 0; i < numOfMsgs; ++i) { - taosGetQitem(pWorker->qall, &type, &item); - if (type == TAOS_QTYPE_RPC) { - pWrite = (SWriteMsg *)item; - dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code); - } else if (type == TAOS_QTYPE_FWD) { - pHead = (SWalHead *)item; - vnodeConfirmForward(pVnode, pHead->version, 0); - taosFreeQitem(item); - vnodeRelease(pVnode); + taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite); + if (qtype == TAOS_QTYPE_RPC) { + dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code); } else { - taosFreeQitem(item); - vnodeRelease(pVnode); + if (qtype == TAOS_QTYPE_FWD) { + vnodeConfirmForward(pVnode, pWrite->pHead->version, 0); + } + if (pWrite->rspRet.rsp) { + rpcFreeCont(pWrite->rspRet.rsp); + } + vnodeFreeFromWQueue(pVnode, pWrite); } } } return NULL; } - -UNUSED_FUNC -static void dnodeHandleIdleWorker(SWriteWorker *pWorker) { - int32_t num = taosGetQueueNumber(pWorker->qset); - - if (num > 0) { - usleep(30000); - sched_yield(); - } else { - taosFreeQall(pWorker->qall); - taosCloseQset(pWorker->qset); - pWorker->qset = NULL; - dDebug("write worker:%d is released", pWorker->workerId); - pthread_exit(NULL); - } -} diff --git a/src/inc/dnode.h b/src/inc/dnode.h index e84545be1753f3fedb4ee78acf397c91d824ad8b..eef4490800a4191c2dee55c450cf99b8381bb64d 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -21,29 +21,31 @@ extern "C" { #endif #include "trpc.h" +#include "taosmsg.h" typedef struct { int32_t queryReqNum; int32_t submitReqNum; int32_t httpReqNum; -} SDnodeStatisInfo; +} SStatisInfo; typedef enum { - TSDB_DNODE_RUN_STATUS_INITIALIZE, - TSDB_DNODE_RUN_STATUS_RUNING, - TSDB_DNODE_RUN_STATUS_STOPPED -} SDnodeRunStatus; + TSDB_RUN_STATUS_INITIALIZE, + TSDB_RUN_STATUS_RUNING, + TSDB_RUN_STATUS_STOPPED +} SRunStatus; -SDnodeRunStatus dnodeGetRunStatus(); -SDnodeStatisInfo dnodeGetStatisInfo(); +SRunStatus dnodeGetRunStatus(); +SStatisInfo dnodeGetStatisInfo(); bool dnodeIsFirstDeploy(); -char * dnodeGetMnodeMasterEp(); -void dnodeGetMnodeEpSetForPeer(void *epSet); -void dnodeGetMnodeEpSetForShell(void *epSet); -void * dnodeGetMnodeInfos(); +bool dnodeIsMasterEp(char *ep); +void dnodeGetEpSetForPeer(SRpcEpSet *epSet); +void dnodeGetEpSetForShell(SRpcEpSet *epSet); int32_t dnodeGetDnodeId(); -bool dnodeStartMnode(void *pModes); +void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); +bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr); +bool dnodeStartMnode(SMnodeInfos *minfos); void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); @@ -51,21 +53,21 @@ void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp); void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet); void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid); -void *dnodeAllocateVnodeWqueue(void *pVnode); -void dnodeFreeVnodeWqueue(void *queue); -void *dnodeAllocateVnodeRqueue(void *pVnode); -void dnodeFreeVnodeRqueue(void *rqueue); -void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code); +void *dnodeAllocVWriteQueue(void *pVnode); +void dnodeFreeVWriteQueue(void *pWqueue); +void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code); +void *dnodeAllocVReadQueue(void *pVnode); +void dnodeFreeVReadQueue(void *pRqueue); -int32_t dnodeAllocateMnodePqueue(); -void dnodeFreeMnodePqueue(); -int32_t dnodeAllocateMnodeRqueue(); -void dnodeFreeMnodeRqueue(); -int32_t dnodeAllocateMnodeWqueue(); -void dnodeFreeMnodeWqueue(); -void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code); -void dnodeReprocessMnodeWriteMsg(void *pMsg); -void dnodeDelayReprocessMnodeWriteMsg(void *pMsg); +int32_t dnodeAllocateMPeerQueue(); +void dnodeFreeMPeerQueue(); +int32_t dnodeAllocMReadQueue(); +void dnodeFreeMReadQueue(); +int32_t dnodeAllocMWritequeue(); +void dnodeFreeMWritequeue(); +void dnodeSendRpcMWriteRsp(void *pMsg, int32_t code); +void dnodeReprocessMWriteMsg(void *pMsg); +void dnodeDelayReprocessMWriteMsg(void *pMsg); void dnodeSendStatusMsgToMnode(); diff --git a/src/inc/mnode.h b/src/inc/mnode.h index 5bef7402e30a0cc9f1964cc2430e0b1f5141590c..bdc30b0c46ced0961715bd48623fdb9e52fb440e 100644 --- a/src/inc/mnode.h +++ b/src/inc/mnode.h @@ -35,24 +35,26 @@ typedef struct { } SMnodeRsp; typedef struct SMnodeMsg { - SRpcMsg rpcMsg; + struct SAcctObj * pAcct; + struct SDnodeObj *pDnode; + struct SUserObj * pUser; + struct SDbObj * pDb; + struct SVgObj * pVgroup; + struct STableObj *pTable; + struct SSTableObj*pSTable; SMnodeRsp rpcRsp; int8_t received; int8_t successed; int8_t expected; int8_t retry; + int32_t incomingTs; int32_t code; void * pObj; - struct SAcctObj * pAcct; - struct SDnodeObj *pDnode; - struct SUserObj * pUser; - struct SDbObj * pDb; - struct SVgObj * pVgroup; - struct STableObj *pTable; - struct SSuperTableObj *pSTable; + SRpcMsg rpcMsg; + char pCont[]; } SMnodeMsg; -void mnodeCreateMsg(SMnodeMsg *pMsg, SRpcMsg *rpcMsg); +void * mnodeCreateMsg(SRpcMsg *pRpcMsg); int32_t mnodeInitMsg(SMnodeMsg *pMsg); void mnodeCleanupMsg(SMnodeMsg *pMsg); diff --git a/src/inc/query.h b/src/inc/query.h index 0c18f85dc31bae5e77bae7228d5390a8d32df07a..5e1de77889cc469566cc94b729c55622e5462bd6 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -78,7 +78,6 @@ int32_t qKillQuery(qinfo_t qinfo); int32_t qQueryCompleted(qinfo_t qinfo); - /** * destroy query info structure * @param qHandle diff --git a/src/inc/taos.h b/src/inc/taos.h index 315313734753de73bf477b1f67783a45c38c87c9..cd863587a635ea0aee1e4a288f617e4601b0a09a 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -17,6 +17,7 @@ #define TDENGINE_TAOS_H #include +#include #ifdef __cplusplus extern "C" { @@ -64,7 +65,7 @@ typedef struct taosField { #endif DLL_EXPORT void taos_init(); -DLL_EXPORT void taos_cleanup(); +DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); DLL_EXPORT void taos_close(TAOS *taos); @@ -109,13 +110,14 @@ DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result DLL_EXPORT void taos_free_result(TAOS_RES *res); -DLL_EXPORT int taos_field_count(TAOS_RES *tres); +DLL_EXPORT int taos_field_count(TAOS_RES *res); DLL_EXPORT int taos_num_fields(TAOS_RES *res); DLL_EXPORT int taos_affected_rows(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); DLL_EXPORT void taos_stop_query(TAOS_RES *res); +DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); int taos_validate_sql(TAOS *taos, const char *sql); diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index aee60da20147c674979540aa73da5ac101fae7f0..20c7af6a21d9a385d00fb3e52fe2a6ffd3d0729e 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -61,13 +61,23 @@ typedef struct tstr { // Bytes for each type. extern const int32_t TYPE_BYTES[11]; + // TODO: replace and remove code below -#define CHAR_BYTES sizeof(char) -#define SHORT_BYTES sizeof(int16_t) -#define INT_BYTES sizeof(int32_t) -#define LONG_BYTES sizeof(int64_t) -#define FLOAT_BYTES sizeof(float) -#define DOUBLE_BYTES sizeof(double) +#define CHAR_BYTES sizeof(char) +#define SHORT_BYTES sizeof(int16_t) +#define INT_BYTES sizeof(int32_t) +#define LONG_BYTES sizeof(int64_t) +#define FLOAT_BYTES sizeof(float) +#define DOUBLE_BYTES sizeof(double) +#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) + +#define TSDB_KEYSIZE sizeof(TSKEY) + +#if LINUX +#define TSDB_NCHAR_SIZE sizeof(wchar_t) +#else +#define TSDB_NCHAR_SIZE sizeof(int32_t) +#endif // NULL definition #define TSDB_DATA_BOOL_NULL 0x02 @@ -75,6 +85,7 @@ extern const int32_t TYPE_BYTES[11]; #define TSDB_DATA_SMALLINT_NULL 0x8000 #define TSDB_DATA_INT_NULL 0x80000000L #define TSDB_DATA_BIGINT_NULL 0x8000000000000000L +#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL #define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN #define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN @@ -101,10 +112,12 @@ extern const int32_t TYPE_BYTES[11]; #define TSDB_TIME_PRECISION_MILLI 0 #define TSDB_TIME_PRECISION_MICRO 1 #define TSDB_TIME_PRECISION_NANO 2 -#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) #define TSDB_TIME_PRECISION_MILLI_STR "ms" #define TSDB_TIME_PRECISION_MICRO_STR "us" +#define TSDB_TIME_PRECISION_NANO_STR "ns" + +#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) #define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #define T_APPEND_MEMBER(dst, ptr, type, member) \ @@ -118,15 +131,6 @@ do { \ (src) = (void *)((char *)src + sizeof(type));\ } while(0) -#define TSDB_KEYSIZE sizeof(TSKEY) - -#if LINUX - #define TSDB_NCHAR_SIZE sizeof(wchar_t) -#else - #define TSDB_NCHAR_SIZE 4 -#endif -//#define TSDB_CHAR_TERMINATED_SPACE 1 - #define GET_INT8_VAL(x) (*(int8_t *)(x)) #define GET_INT16_VAL(x) (*(int16_t *)(x)) #define GET_INT32_VAL(x) (*(int32_t *)(x)) @@ -172,7 +176,6 @@ typedef struct tDataTypeDescriptor { } tDataTypeDescriptor; extern tDataTypeDescriptor tDataTypeDesc[11]; -#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) bool isValidDataType(int32_t type); //bool isNull(const char *val, int32_t type); @@ -266,10 +269,6 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 #define TSDB_VERSION_LEN 12 -#define TSDB_STREET_LEN 64 -#define TSDB_CITY_LEN 20 -#define TSDB_STATE_LEN 20 -#define TSDB_COUNTRY_LEN 20 #define TSDB_LOCALE_LEN 64 #define TSDB_TIMEZONE_LEN 96 #define TSDB_LABEL_LEN 8 @@ -333,7 +332,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MAX_DAYS_PER_FILE 3650 -#define TSDB_DEFAULT_DAYS_PER_FILE 2 +#define TSDB_DEFAULT_DAYS_PER_FILE 10 #define TSDB_MIN_KEEP 1 // data in db to be reserved. #define TSDB_MAX_KEEP 365000 // data in db to be reserved. @@ -363,6 +362,10 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_MAX_WAL_LEVEL 2 #define TSDB_DEFAULT_WAL_LEVEL 1 +#define TSDB_MIN_DB_UPDATE 0 +#define TSDB_MAX_DB_UPDATE 1 +#define TSDB_DEFAULT_DB_UPDATE_OPTION 0 + #define TSDB_MIN_FSYNC_PERIOD 0 #define TSDB_MAX_FSYNC_PERIOD 180000 // millisecond #define TSDB_DEFAULT_FSYNC_PERIOD 3000 // three second @@ -388,27 +391,27 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf * 1. ordinary sub query for select * from super_table * 2. all sqlobj generated by createSubqueryObj with this flag */ -#define TSDB_QUERY_TYPE_SUBQUERY 0x02u -#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table +#define TSDB_QUERY_TYPE_SUBQUERY 0x02u +#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table -#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side -#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table -#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query -#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query -#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80u // join sub query at the second stage +#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side +#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table +#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query +#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query +#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80u // join sub query at the second stage -#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u -#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type -#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u -#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type +#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u +#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type +#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u +#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type #define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0) #define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type)) #define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type)) #define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE) -#define TSDB_ORDER_ASC 1 -#define TSDB_ORDER_DESC 2 +#define TSDB_ORDER_ASC 1 +#define TSDB_ORDER_DESC 2 #define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1 #define TSDB_DEFAULT_MNODES_HASH_SIZE 5 @@ -420,46 +423,56 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_DEFAULT_STABLES_HASH_SIZE 100 #define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000 -#define TSDB_PORT_DNODESHELL 0 -#define TSDB_PORT_DNODEDNODE 5 -#define TSDB_PORT_SYNC 10 -#define TSDB_PORT_HTTP 11 -#define TSDB_PORT_ARBITRATOR 12 +#define TSDB_PORT_DNODESHELL 0 +#define TSDB_PORT_DNODEDNODE 5 +#define TSDB_PORT_SYNC 10 +#define TSDB_PORT_HTTP 11 +#define TSDB_PORT_ARBITRATOR 12 +#define TSDB_PORT_DNODESHELL 0 +#define TSDB_PORT_DNODEDNODE 5 +#define TSDB_PORT_SYNC 10 +#define TSDB_PORT_HTTP 11 +#define TSDB_PORT_ARBITRATOR 12 + +#define TSDB_MAX_WAL_SIZE (1024*1024) -#define TAOS_QTYPE_RPC 0 -#define TAOS_QTYPE_FWD 1 -#define TAOS_QTYPE_WAL 2 -#define TAOS_QTYPE_CQ 3 -#define TAOS_QTYPE_QUERY 4 +typedef enum { + TAOS_QTYPE_RPC = 0, + TAOS_QTYPE_FWD = 1, + TAOS_QTYPE_WAL = 2, + TAOS_QTYPE_CQ = 3, + TAOS_QTYPE_QUERY = 4 +} EQType; typedef enum { - TSDB_SUPER_TABLE = 0, // super table - TSDB_CHILD_TABLE = 1, // table created from super table - TSDB_NORMAL_TABLE = 2, // ordinary table - TSDB_STREAM_TABLE = 3, // table created from stream computing - TSDB_TABLE_MAX = 4 + TSDB_SUPER_TABLE = 0, // super table + TSDB_CHILD_TABLE = 1, // table created from super table + TSDB_NORMAL_TABLE = 2, // ordinary table + TSDB_STREAM_TABLE = 3, // table created from stream computing + TSDB_TABLE_MAX = 4 } ETableType; typedef enum { - TSDB_MOD_MNODE, - TSDB_MOD_HTTP, - TSDB_MOD_MONITOR, - TSDB_MOD_MQTT, - TSDB_MOD_MAX + TSDB_MOD_MNODE = 0, + TSDB_MOD_HTTP = 1, + TSDB_MOD_MONITOR = 2, + TSDB_MOD_MQTT = 3, + TSDB_MOD_MAX = 4 } EModuleType; - typedef enum { - TSDB_CHECK_ITEM_NETWORK, - TSDB_CHECK_ITEM_MEM, - TSDB_CHECK_ITEM_CPU, - TSDB_CHECK_ITEM_DISK, - TSDB_CHECK_ITEM_OS, - TSDB_CHECK_ITEM_ACCESS, - TSDB_CHECK_ITEM_VERSION, - TSDB_CHECK_ITEM_DATAFILE, - TSDB_CHECK_ITEM_MAX - } ECheckItemType; - +typedef enum { + TSDB_CHECK_ITEM_NETWORK, + TSDB_CHECK_ITEM_MEM, + TSDB_CHECK_ITEM_CPU, + TSDB_CHECK_ITEM_DISK, + TSDB_CHECK_ITEM_OS, + TSDB_CHECK_ITEM_ACCESS, + TSDB_CHECK_ITEM_VERSION, + TSDB_CHECK_ITEM_DATAFILE, + TSDB_CHECK_ITEM_MAX +} ECheckItemType; + +extern char *qtypeStr[]; #ifdef __cplusplus } diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index bb111d2da0da75e6a3e3812ac21364f9a18fb6f3..ff91989e5f15b00775fd02505704a3afccaab500 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -24,11 +24,11 @@ extern "C" { #include #ifdef TAOS_ERROR_C -#define TAOS_DEFINE_ERROR(name, mod, code, msg) {.val = (0x80000000 | ((mod)<<16) | (code)), .str=(msg)}, +#define TAOS_DEFINE_ERROR(name, mod, code, msg) {.val = (int32_t)((0x80000000 | ((mod)<<16) | (code))), .str=(msg)}, #else -#define TAOS_DEFINE_ERROR(name, mod, code, msg) static const int32_t name = (0x80000000 | ((mod)<<16) | (code)); +#define TAOS_DEFINE_ERROR(name, mod, code, msg) static const int32_t name = (int32_t)((0x80000000 | ((mod)<<16) | (code))); #endif - + #define TAOS_SYSTEM_ERROR(code) (0x80ff0000 | (code)) #define TAOS_SUCCEEDED(err) ((err) >= 0) #define TAOS_FAILED(err) ((err) < 0) @@ -37,7 +37,7 @@ const char* tstrerror(int32_t err); int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) - + #define TSDB_CODE_SUCCESS 0 #ifdef TAOS_ERROR_C @@ -74,6 +74,12 @@ TAOS_DEFINE_ERROR(TSDB_CODE_COM_MEMORY_CORRUPTED, 0, 0x0101, "Memory cor TAOS_DEFINE_ERROR(TSDB_CODE_COM_OUT_OF_MEMORY, 0, 0x0102, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_COM_INVALID_CFG_MSG, 0, 0x0103, "Invalid config message") TAOS_DEFINE_ERROR(TSDB_CODE_COM_FILE_CORRUPTED, 0, 0x0104, "Data file corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, 0, 0x0105, "Ref out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, 0, 0x0106, "too many Ref Objs") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ID_REMOVED, 0, 0x0107, "Ref ID is removed") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_INVALID_ID, 0, 0x0108, "Invalid Ref ID") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ALREADY_EXIST, 0, 0x0109, "Ref is already there") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NOT_EXIST, 0, 0x010A, "Ref is not there") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_SQL, 0, 0x0200, "Invalid SQL statement") @@ -101,6 +107,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax error in SQL") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, 0, 0x0217, "Database not specified or available") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, 0, 0x0218, "Table does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, 0, 0x0219, "SQL statement too long, check maxSQLLength config") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed") @@ -175,6 +182,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, 0, 0x0383, "Invalid da TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, 0, 0x0384, "Cannot delete monitor database") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, 0, 0x0385, "Too many databases for account") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "Database not available") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, 0, 0x0387, "Database unsynced") // dnode TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "Message not processed") @@ -182,7 +190,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "Dnode out TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "No permission for disk files in dnode") TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "Invalid message length") -// vnode +// vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_VND_MSG_NOT_PROCESSED, 0, 0x0501, "Message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_NEED_REPROCESSED, 0, 0x0502, "Action need to be reprocessed") @@ -194,6 +202,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode") TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Vnode memory is full because commit failed") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied") @@ -230,6 +239,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, 0, 0x070A, "Too many time window in query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, 0, 0x070B, "Query buffer limit has reached") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired") @@ -253,6 +263,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_VERSION, 0, 0x0902, "Invalid Sy // wal TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, 0, 0x1001, "WAL file is corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, 0, 0x1002, "WAL size exceeds limit") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, 0, 0x1100, "http server is not onlin") @@ -355,20 +366,28 @@ TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, 0, 0x11A5, "value not TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, 0, 0x11A6, "value type should be boolean, number or string") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, 0, 0x2101, "out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, 0, 0x2100, "out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, 0, 0x2101, "convertion not a valid literal input") TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_UNDEF, 0, 0x2102, "convertion undefined") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, 0, 0x2103, "convertion truncated") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, 0, 0x2104, "convertion not supported") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, 0, 0x2105, "out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, 0, 0x2106, "not supported yet") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, 0, 0x2107, "invalid handle") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, 0, 0x2108, "no result set") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, 0, 0x2109, "no fields returned") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, 0, 0x2110, "invalid cursor") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, 0, 0x2111, "statement not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, 0, 0x2112, "connection still busy") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, 0, 0x2113, "bad connection string") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, 0, 0x2114, "bad argument") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC_FRAC, 0, 0x2103, "convertion fractional truncated") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, 0, 0x2104, "convertion truncated") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, 0, 0x2105, "convertion not supported") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_OOR, 0, 0x2106, "convertion numeric value out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, 0, 0x2107, "out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, 0, 0x2108, "not supported yet") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, 0, 0x2109, "invalid handle") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, 0, 0x210a, "no result set") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, 0, 0x210b, "no fields returned") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, 0, 0x210c, "invalid cursor") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, 0, 0x210d, "statement not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, 0, 0x210e, "connection still busy") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, 0, 0x210f, "bad connection string") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, 0, 0x2110, "bad argument") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_VALID_TS, 0, 0x2111, "not a valid timestamp") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE, 0, 0x2112, "src too large") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, 0, 0x2113, "src bad sequence") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, 0, 0x2114, "src incomplete") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_GENERAL, 0, 0x2115, "src general") #ifdef TAOS_ERROR_C diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 600347c44fdb2a89e03bf24413f22240bb2815f2..681fa4492959347d5af5591615c0b39dbacbef7e 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -106,6 +106,10 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY12, "dummy12" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY13, "dummy13" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY14, "dummy14" ) + +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "network-test" ) + + #ifndef TAOS_MESSAGE_C TSDB_MSG_TYPE_MAX // 105 #endif @@ -291,7 +295,7 @@ typedef struct { SSchema schema[]; // tagVal is padded after schema // char tagVal[]; -} SCMAlterTableMsg; +} SAlterTableMsg; typedef struct { SMsgHead head; @@ -308,12 +312,12 @@ typedef struct { } SUpdateTableTagValMsg; typedef struct { - char clientVersion[TSDB_VERSION_LEN]; - char msgVersion[TSDB_VERSION_LEN]; - char db[TSDB_TABLE_FNAME_LEN]; - char appName[TSDB_APPNAME_LEN]; + char clientVersion[TSDB_VERSION_LEN]; + char msgVersion[TSDB_VERSION_LEN]; + char db[TSDB_TABLE_FNAME_LEN]; + char appName[TSDB_APPNAME_LEN]; int32_t pid; -} SCMConnectMsg; +} SConnectMsg; typedef struct { char acctId[TSDB_ACCT_LEN]; @@ -324,7 +328,7 @@ typedef struct { int8_t reserved2; int32_t connId; SRpcEpSet epSet; -} SCMConnectRsp; +} SConnectRsp; typedef struct { int32_t maxUsers; @@ -344,18 +348,18 @@ typedef struct { char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; SAcctCfg cfg; -} SCMCreateAcctMsg, SCMAlterAcctMsg; +} SCreateAcctMsg, SAlterAcctMsg; typedef struct { char user[TSDB_USER_LEN]; -} SCMDropUserMsg, SCMDropAcctMsg; +} SDropUserMsg, SDropAcctMsg; typedef struct { char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; int8_t privilege; int8_t flag; -} SCMCreateUserMsg, SCMAlterUserMsg; +} SCreateUserMsg, SAlterUserMsg; typedef struct { int32_t contLen; @@ -370,11 +374,11 @@ typedef struct { int32_t vgId; uint64_t uid; char tableId[TSDB_TABLE_FNAME_LEN]; -} SMDDropSTableMsg; +} SDropSTableMsg; typedef struct { int32_t vgId; -} SMDDropVnodeMsg; +} SDropVnodeMsg; typedef struct SColIndex { int16_t colId; // column id @@ -388,6 +392,7 @@ typedef struct SColIndex { typedef struct SSqlFuncMsg { int16_t functionId; int16_t numOfParams; + int16_t resColId; // result column id, id of the current output column SColIndex colInfo; struct ArgElem { @@ -457,11 +462,6 @@ typedef struct STimeWindow { TSKEY ekey; } STimeWindow; -/* - * the outputCols is equalled to or larger than numOfCols - * e.g., select min(colName), max(colName), avg(colName) from table - * the outputCols will be 3 while the numOfCols is 1. - */ typedef struct { SMsgHead head; STimeWindow window; @@ -477,10 +477,11 @@ typedef struct { int64_t limit; int64_t offset; uint32_t queryType; // denote another query process - int16_t numOfOutput; // final output columns numbers + int16_t numOfOutput; // final output columns numbers int16_t tagNameRelType; // relation of tag criteria and tbname criteria int16_t fillType; // interpolate type uint64_t fillVal; // default value array list + int32_t secondStageOutput; int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed int32_t tsLen; // total length of ts comp block int32_t tsNumOfBlocks; // ts comp block numbers @@ -540,12 +541,14 @@ typedef struct { int8_t replications; int8_t quorum; int8_t ignoreExist; -} SCMCreateDbMsg, SCMAlterDbMsg; + int8_t update; + int8_t reserve[9]; +} SCreateDbMsg, SAlterDbMsg; typedef struct { char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; -} SCMDropDbMsg, SCMUseDbMsg; +} SDropDbMsg, SUseDbMsg; // IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed // TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE @@ -560,7 +563,7 @@ typedef struct { typedef struct { int32_t vgId; int8_t accessState; -} SDMVgroupAccess; +} SVgroupAccess; typedef struct { int32_t dnodeId; @@ -568,18 +571,29 @@ typedef struct { uint32_t numOfVnodes; char clusterId[TSDB_CLUSTER_ID_LEN]; char reserved[16]; -} SDMDnodeCfg; +} SDnodeCfg; typedef struct { - int32_t nodeId; - char nodeEp[TSDB_EP_LEN]; -} SDMMnodeInfo; + int32_t dnodeId; + uint16_t dnodePort; + char dnodeFqdn[TSDB_FQDN_LEN]; +} SDnodeEp; typedef struct { - int8_t inUse; - int8_t nodeNum; - SDMMnodeInfo nodeInfos[TSDB_MAX_REPLICA]; -} SDMMnodeInfos; + int32_t dnodeNum; + SDnodeEp dnodeEps[]; +} SDnodeEps; + +typedef struct { + int32_t mnodeId; + char mnodeEp[TSDB_EP_LEN]; +} SMnodeInfo; + +typedef struct { + int8_t inUse; + int8_t mnodeNum; + SMnodeInfo mnodeInfos[TSDB_MAX_REPLICA]; +} SMnodeInfos; typedef struct { int32_t numOfMnodes; // tsNumOfMnodes @@ -611,13 +625,13 @@ typedef struct { uint8_t reserve2[15]; SClusterCfg clusterCfg; SVnodeLoad load[]; -} SDMStatusMsg; +} SStatusMsg; typedef struct { - SDMMnodeInfos mnodes; - SDMDnodeCfg dnodeCfg; - SDMVgroupAccess vgAccess[]; -} SDMStatusRsp; + SMnodeInfos mnodes; + SDnodeCfg dnodeCfg; + SVgroupAccess vgAccess[]; +} SStatusRsp; typedef struct { uint32_t vgId; @@ -639,55 +653,56 @@ typedef struct { int8_t replications; int8_t wals; int8_t quorum; - int8_t reserved[16]; -} SMDVnodeCfg; + int8_t update; + int8_t reserved[15]; +} SVnodeCfg; typedef struct { int32_t nodeId; char nodeEp[TSDB_EP_LEN]; -} SMDVnodeDesc; +} SVnodeDesc; typedef struct { - char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; - SMDVnodeCfg cfg; - SMDVnodeDesc nodes[TSDB_MAX_REPLICA]; -} SMDCreateVnodeMsg, SMDAlterVnodeMsg; + char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; + SVnodeCfg cfg; + SVnodeDesc nodes[TSDB_MAX_REPLICA]; +} SCreateVnodeMsg, SAlterVnodeMsg; typedef struct { char tableId[TSDB_TABLE_FNAME_LEN]; int16_t createFlag; char tags[]; -} SCMTableInfoMsg; +} STableInfoMsg; typedef struct { int32_t numOfTables; char tableIds[]; -} SCMMultiTableInfoMsg; +} SMultiTableInfoMsg; -typedef struct SCMSTableVgroupMsg { +typedef struct SSTableVgroupMsg { int32_t numOfTables; -} SCMSTableVgroupMsg, SCMSTableVgroupRspMsg; +} SSTableVgroupMsg, SSTableVgroupRspMsg; typedef struct { int32_t vgId; int8_t numOfEps; SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -} SCMVgroupInfo; +} SVgroupInfo; typedef struct { int32_t vgId; int8_t numOfEps; SEpAddrMsg epAddr[TSDB_MAX_REPLICA]; -} SCMVgroupMsg; +} SVgroupMsg; typedef struct { int32_t numOfVgroups; - SCMVgroupInfo vgroups[]; + SVgroupInfo vgroups[]; } SVgroupsInfo; typedef struct { int32_t numOfVgroups; - SCMVgroupMsg vgroups[]; + SVgroupMsg vgroups[]; } SVgroupsMsg; typedef struct STableMetaMsg { @@ -702,7 +717,7 @@ typedef struct STableMetaMsg { int16_t tversion; int32_t tid; uint64_t uid; - SCMVgroupMsg vgroup; + SVgroupMsg vgroup; SSchema schema[]; } STableMetaMsg; @@ -728,38 +743,38 @@ typedef struct { char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; uint16_t payloadLen; char payload[]; -} SCMShowMsg; +} SShowMsg; -typedef struct SCMShowRsp { +typedef struct SShowRsp { uint64_t qhandle; STableMetaMsg tableMeta; -} SCMShowRsp; +} SShowRsp; typedef struct { - char ep[TSDB_EP_LEN]; // end point, hostname:port -} SCMCreateDnodeMsg, SCMDropDnodeMsg; + char ep[TSDB_EP_LEN]; // end point, hostname:port +} SCreateDnodeMsg, SDropDnodeMsg; typedef struct { int32_t dnodeId; char dnodeEp[TSDB_EP_LEN]; // end point, hostname:port - SDMMnodeInfos mnodes; -} SMDCreateMnodeMsg; + SMnodeInfos mnodes; +} SCreateMnodeMsg; typedef struct { int32_t dnodeId; int32_t vgId; int32_t tid; -} SDMConfigTableMsg; +} SConfigTableMsg; typedef struct { uint32_t dnodeId; int32_t vgId; -} SDMConfigVnodeMsg; +} SConfigVnodeMsg; typedef struct { char ep[TSDB_EP_LEN]; // end point, hostname:port char config[64]; -} SMDCfgDnodeMsg, SCMCfgDnodeMsg; +} SCfgDnodeMsg; typedef struct { char sql[TSDB_SHOW_SQL_LEN]; @@ -781,13 +796,14 @@ typedef struct { } SStreamDesc; typedef struct { + char clientVer[TSDB_VERSION_LEN]; uint32_t connId; int32_t pid; int32_t numOfQueries; int32_t numOfStreams; char appName[TSDB_APPNAME_LEN]; char pData[]; -} SCMHeartBeatMsg; +} SHeartBeatMsg; typedef struct { uint32_t queryId; @@ -797,11 +813,11 @@ typedef struct { uint32_t connId; int8_t killConnection; SRpcEpSet epSet; -} SCMHeartBeatRsp; +} SHeartBeatRsp; typedef struct { char queryId[TSDB_KILL_MSG_LEN + 1]; -} SCMKillQueryMsg, SCMKillStreamMsg, SCMKillConnMsg; +} SKillQueryMsg, SKillStreamMsg, SKillConnMsg; typedef struct { int32_t vnode; @@ -810,7 +826,7 @@ typedef struct { uint64_t stime; // stream starting time int32_t status; char tableId[TSDB_TABLE_FNAME_LEN]; -} SMDAlterStreamMsg; +} SAlterStreamMsg; typedef struct { char user[TSDB_USER_LEN]; @@ -818,7 +834,7 @@ typedef struct { char encrypt; char secret[TSDB_KEY_LEN]; char ckey[TSDB_KEY_LEN]; -} SDMAuthMsg, SDMAuthRsp; +} SAuthMsg, SAuthRsp; #pragma pack(pop) diff --git a/src/inc/tcq.h b/src/inc/tcq.h index 32b75674c3278b3273fd4b98dd645f4168543155..7a0727f1b8dd7cdea1a815c174b9f85004eedf87 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -21,10 +21,10 @@ extern "C" { #include "tdataformat.h" -typedef int (*FCqWrite)(void *ahandle, void *pHead, int type); +typedef int32_t (*FCqWrite)(void *ahandle, void *pHead, int32_t qtype, void *pMsg); typedef struct { - int vgId; + int32_t vgId; char user[TSDB_USER_LEN]; char pass[TSDB_PASSWORD_LEN]; char db[TSDB_DB_NAME_LEN]; @@ -42,12 +42,12 @@ void cqStart(void *handle); void cqStop(void *handle); // cqCreate is called by TSDB to start an instance of CQ -void *cqCreate(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema); +void *cqCreate(void *handle, uint64_t uid, int32_t sid, char *sqlStr, STSchema *pSchema); // cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate void cqDrop(void *handle); -extern int cqDebugFlag; +extern int32_t cqDebugFlag; #ifdef __cplusplus diff --git a/src/inc/trpc.h b/src/inc/trpc.h index bdee917b5e8203743a79ca27d8fbc987569c35ab..0ce2e3da14d1cec204fc755db13da53f08295bff 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -78,18 +78,20 @@ typedef struct SRpcInit { int (*afp)(char *tableId, char *spi, char *encrypt, char *secret, char *ckey); } SRpcInit; +int32_t rpcInit(); +void rpcCleanup(); void *rpcOpen(const SRpcInit *pRpc); void rpcClose(void *); void *rpcMallocCont(int contLen); void rpcFreeCont(void *pCont); void *rpcReallocCont(void *ptr, int contLen); -void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg); +void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); void rpcSendResponse(const SRpcMsg *pMsg); void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet); int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp); int rpcReportProgress(void *pConn, char *pCont, int contLen); -void rpcCancelRequest(void *pContext); +void rpcCancelRequest(int64_t rid); #ifdef __cplusplus } diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 85f9b3bdc7f1cf806309f597ffad6955151b4c36..58859f42bc80daa3317d789950c1625c1533cf5f 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -46,7 +46,7 @@ extern "C" { typedef struct { void *appH; void *cqH; - int (*notifyStatus)(void *, int status); + int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema); void (*cqDropFunc)(void *handle); @@ -65,6 +65,7 @@ typedef struct { int32_t maxRowsPerFileBlock; // maximum rows per file block int8_t precision; int8_t compression; + int8_t update; } STsdbCfg; // --------- TSDB REPOSITORY USAGE STATISTICS @@ -82,7 +83,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo); int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg); int32_t tsdbDropRepo(char *rootDir); TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH); -void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit); +int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit); int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg); int tsdbGetState(TSDB_REPO_T *repo); @@ -125,7 +126,7 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ // the TSDB repository info typedef struct STsdbRepoInfo { STsdbCfg tsdbCfg; - int64_t version; // version of the repository + uint64_t version; // version of the repository int64_t tsdbTotalDataSize; // the original inserted data size int64_t tsdbTotalDiskSize; // the total disk size taken by this TSDB repository // TODO: Other informations to add @@ -135,7 +136,7 @@ STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo); // the meter information report structure typedef struct { STableCfg tableCfg; - int64_t version; + uint64_t version; int64_t tableTotalDataSize; // In bytes int64_t tableTotalDiskSize; // In bytes } STableInfo; @@ -163,6 +164,12 @@ typedef struct STsdbQueryCond { SColumnInfo *colList; } STsdbQueryCond; +typedef struct SMemRef { + int32_t ref; + void *mem; + void *imem; +} SMemRef; + typedef struct SDataBlockInfo { STimeWindow window; int32_t rows; @@ -192,7 +199,7 @@ typedef struct { * @param qinfo query info handle from query processor * @return */ -TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo); +TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo, SMemRef* pRef); /** * Get the last row of the given query time window for all the tables in STableGroupInfo object. @@ -204,7 +211,7 @@ TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STab * @param tableInfo table list. * @return */ -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo); +TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo, SMemRef* pRef); /** * get the queried table object list @@ -222,7 +229,7 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle); * @return */ TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, - void *qinfo); + void *qinfo, SMemRef* pRef); /** * move to next block if exists @@ -314,6 +321,12 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle); */ void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage); +int tsdbInitCommitQueue(int nthreads); +void tsdbDestroyCommitQueue(); +int tsdbSyncCommit(TSDB_REPO_T *repo); +void tsdbIncCommitRef(int vgId); +void tsdbDecCommitRef(int vgId); + #ifdef __cplusplus } #endif diff --git a/src/inc/tsync.h b/src/inc/tsync.h index ca0f70d104d603d176d89dd5b92979433f390466..967b254992f339ece749757584611bf061b9d7f4 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -24,18 +24,18 @@ extern "C" { #define TAOS_SYNC_MAX_INDEX 0x7FFFFFFF typedef enum _TAOS_SYNC_ROLE { - TAOS_SYNC_ROLE_OFFLINE, - TAOS_SYNC_ROLE_UNSYNCED, - TAOS_SYNC_ROLE_SYNCING, - TAOS_SYNC_ROLE_SLAVE, - TAOS_SYNC_ROLE_MASTER, + TAOS_SYNC_ROLE_OFFLINE = 0, + TAOS_SYNC_ROLE_UNSYNCED = 1, + TAOS_SYNC_ROLE_SYNCING = 2, + TAOS_SYNC_ROLE_SLAVE = 3, + TAOS_SYNC_ROLE_MASTER = 4 } ESyncRole; typedef enum _TAOS_SYNC_STATUS { - TAOS_SYNC_STATUS_INIT, - TAOS_SYNC_STATUS_START, - TAOS_SYNC_STATUS_FILE, - TAOS_SYNC_STATUS_CACHE, + TAOS_SYNC_STATUS_INIT = 0, + TAOS_SYNC_STATUS_START = 1, + TAOS_SYNC_STATUS_FILE = 2, + TAOS_SYNC_STATUS_CACHE = 3 } ESyncStatus; typedef struct { @@ -51,9 +51,9 @@ typedef struct { } SSyncCfg; typedef struct { - int selfIndex; - uint32_t nodeId[TAOS_SYNC_MAX_REPLICA]; - int role[TAOS_SYNC_MAX_REPLICA]; + int32_t selfIndex; + uint32_t nodeId[TAOS_SYNC_MAX_REPLICA]; + int32_t role[TAOS_SYNC_MAX_REPLICA]; } SNodesRole; /* @@ -68,10 +68,10 @@ typedef uint32_t (*FGetFileInfo)(void *ahandle, char *name, uint32_t *index, uin // get the wal file from index or after // return value, -1: error, 1:more wal files, 0:last WAL. if name[0]==0, no WAL file -typedef int (*FGetWalInfo)(void *ahandle, char *name, uint32_t *index); +typedef int32_t (*FGetWalInfo)(void *ahandle, char *fileName, int64_t *fileId); -// when a forward pkt is received, call this to handle data -typedef int (*FWriteToCache)(void *ahandle, void *pHead, int type); +// when a forward pkt is received, call this to handle data +typedef int32_t (*FWriteToCache)(void *ahandle, void *pHead, int32_t qtype, void *pMsg); // when forward is confirmed by peer, master call this API to notify app typedef void (*FConfirmForward)(void *ahandle, void *mhandle, int32_t code); @@ -83,48 +83,47 @@ typedef void (*FNotifyRole)(void *ahandle, int8_t role); typedef void (*FNotifyFlowCtrl)(void *ahandle, int32_t mseconds); // when data file is synced successfully, notity app -typedef int (*FNotifyFileSynced)(void *ahandle, uint64_t fversion); +typedef int32_t (*FNotifyFileSynced)(void *ahandle, uint64_t fversion); typedef struct { - int32_t vgId; // vgroup ID - uint64_t version; // initial version - SSyncCfg syncCfg; // configuration from mgmt - char path[128]; // path to the file - - void *ahandle; // handle provided by APP - FGetFileInfo getFileInfo; - FGetWalInfo getWalInfo; - FWriteToCache writeToCache; - FConfirmForward confirmForward; - FNotifyRole notifyRole; - FNotifyFlowCtrl notifyFlowCtrl; + int32_t vgId; // vgroup ID + uint64_t version; // initial version + SSyncCfg syncCfg; // configuration from mgmt + char path[128]; // path to the file + void * ahandle; // handle provided by APP + FGetFileInfo getFileInfo; + FGetWalInfo getWalInfo; + FWriteToCache writeToCache; + FConfirmForward confirmForward; + FNotifyRole notifyRole; + FNotifyFlowCtrl notifyFlowCtrl; FNotifyFileSynced notifyFileSynced; } SSyncInfo; -typedef void* tsync_h; +typedef void *tsync_h; int32_t syncInit(); void syncCleanUp(); -tsync_h syncStart(const SSyncInfo *); -void syncStop(tsync_h shandle); -int32_t syncReconfig(tsync_h shandle, const SSyncCfg *); -int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype); -void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code); -void syncRecover(tsync_h shandle); // recover from other nodes: -int syncGetNodesRole(tsync_h shandle, SNodesRole *); +int64_t syncStart(const SSyncInfo *); +void syncStop(int64_t rid); +int32_t syncReconfig(int64_t rid, const SSyncCfg *); +int32_t syncForwardToPeer(int64_t rid, void *pHead, void *mhandle, int32_t qtype); +void syncConfirmForward(int64_t rid, uint64_t version, int32_t code); +void syncRecover(int64_t rid); // recover from other nodes: +int32_t syncGetNodesRole(int64_t rid, SNodesRole *); -extern char *syncRole[]; +extern char *syncRole[]; //global configurable parameters -extern int tsMaxSyncNum; -extern int tsSyncTcpThreads; -extern int tsMaxWatchFiles; -extern int tsSyncTimer; -extern int tsMaxFwdInfo; -extern int sDebugFlag; -extern char tsArbitrator[]; -extern uint16_t tsSyncPort; +extern int32_t tsMaxSyncNum; +extern int32_t tsSyncTcpThreads; +extern int32_t tsMaxWatchFiles; +extern int32_t tsSyncTimer; +extern int32_t tsMaxFwdInfo; +extern int32_t sDebugFlag; +extern char tsArbitrator[]; +extern uint16_t tsSyncPort; #ifdef __cplusplus } diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index a94cdaad15a5c2abe35b7a02502ff8ae95102cd9..0a5a3d2fa402c3dd03f1feef8cf05e446922972b 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -16,7 +16,6 @@ #ifndef TDENGINE_TTOKENDEF_H #define TDENGINE_TTOKENDEF_H - #define TK_ID 1 #define TK_BOOL 2 #define TK_TINYINT 3 @@ -114,114 +113,116 @@ #define TK_FSYNC 95 #define TK_COMP 96 #define TK_PRECISION 97 -#define TK_LP 98 -#define TK_RP 99 -#define TK_TAGS 100 -#define TK_USING 101 -#define TK_AS 102 -#define TK_COMMA 103 -#define TK_NULL 104 -#define TK_SELECT 105 -#define TK_UNION 106 -#define TK_ALL 107 -#define TK_FROM 108 -#define TK_VARIABLE 109 -#define TK_INTERVAL 110 -#define TK_FILL 111 -#define TK_SLIDING 112 -#define TK_ORDER 113 -#define TK_BY 114 -#define TK_ASC 115 -#define TK_DESC 116 -#define TK_GROUP 117 -#define TK_HAVING 118 -#define TK_LIMIT 119 -#define TK_OFFSET 120 -#define TK_SLIMIT 121 -#define TK_SOFFSET 122 -#define TK_WHERE 123 -#define TK_NOW 124 -#define TK_RESET 125 -#define TK_QUERY 126 -#define TK_ADD 127 -#define TK_COLUMN 128 -#define TK_TAG 129 -#define TK_CHANGE 130 -#define TK_SET 131 -#define TK_KILL 132 -#define TK_CONNECTION 133 -#define TK_STREAM 134 -#define TK_COLON 135 -#define TK_ABORT 136 -#define TK_AFTER 137 -#define TK_ATTACH 138 -#define TK_BEFORE 139 -#define TK_BEGIN 140 -#define TK_CASCADE 141 -#define TK_CLUSTER 142 -#define TK_CONFLICT 143 -#define TK_COPY 144 -#define TK_DEFERRED 145 -#define TK_DELIMITERS 146 -#define TK_DETACH 147 -#define TK_EACH 148 -#define TK_END 149 -#define TK_EXPLAIN 150 -#define TK_FAIL 151 -#define TK_FOR 152 -#define TK_IGNORE 153 -#define TK_IMMEDIATE 154 -#define TK_INITIALLY 155 -#define TK_INSTEAD 156 -#define TK_MATCH 157 -#define TK_KEY 158 -#define TK_OF 159 -#define TK_RAISE 160 -#define TK_REPLACE 161 -#define TK_RESTRICT 162 -#define TK_ROW 163 -#define TK_STATEMENT 164 -#define TK_TRIGGER 165 -#define TK_VIEW 166 -#define TK_COUNT 167 -#define TK_SUM 168 -#define TK_AVG 169 -#define TK_MIN 170 -#define TK_MAX 171 -#define TK_FIRST 172 -#define TK_LAST 173 -#define TK_TOP 174 -#define TK_BOTTOM 175 -#define TK_STDDEV 176 -#define TK_PERCENTILE 177 -#define TK_APERCENTILE 178 -#define TK_LEASTSQUARES 179 -#define TK_HISTOGRAM 180 -#define TK_DIFF 181 -#define TK_SPREAD 182 -#define TK_TWA 183 -#define TK_INTERP 184 -#define TK_LAST_ROW 185 -#define TK_RATE 186 -#define TK_IRATE 187 -#define TK_SUM_RATE 188 -#define TK_SUM_IRATE 189 -#define TK_AVG_RATE 190 -#define TK_AVG_IRATE 191 -#define TK_TBID 192 -#define TK_SEMI 193 -#define TK_NONE 194 -#define TK_PREV 195 -#define TK_LINEAR 196 -#define TK_IMPORT 197 -#define TK_METRIC 198 -#define TK_TBNAME 199 -#define TK_JOIN 200 -#define TK_METRICS 201 -#define TK_STABLE 202 -#define TK_INSERT 203 -#define TK_INTO 204 -#define TK_VALUES 205 +#define TK_UPDATE 98 +#define TK_LP 99 +#define TK_RP 100 +#define TK_TAGS 101 +#define TK_USING 102 +#define TK_AS 103 +#define TK_COMMA 104 +#define TK_NULL 105 +#define TK_SELECT 106 +#define TK_UNION 107 +#define TK_ALL 108 +#define TK_FROM 109 +#define TK_VARIABLE 110 +#define TK_INTERVAL 111 +#define TK_FILL 112 +#define TK_SLIDING 113 +#define TK_ORDER 114 +#define TK_BY 115 +#define TK_ASC 116 +#define TK_DESC 117 +#define TK_GROUP 118 +#define TK_HAVING 119 +#define TK_LIMIT 120 +#define TK_OFFSET 121 +#define TK_SLIMIT 122 +#define TK_SOFFSET 123 +#define TK_WHERE 124 +#define TK_NOW 125 +#define TK_RESET 126 +#define TK_QUERY 127 +#define TK_ADD 128 +#define TK_COLUMN 129 +#define TK_TAG 130 +#define TK_CHANGE 131 +#define TK_SET 132 +#define TK_KILL 133 +#define TK_CONNECTION 134 +#define TK_STREAM 135 +#define TK_COLON 136 +#define TK_ABORT 137 +#define TK_AFTER 138 +#define TK_ATTACH 139 +#define TK_BEFORE 140 +#define TK_BEGIN 141 +#define TK_CASCADE 142 +#define TK_CLUSTER 143 +#define TK_CONFLICT 144 +#define TK_COPY 145 +#define TK_DEFERRED 146 +#define TK_DELIMITERS 147 +#define TK_DETACH 148 +#define TK_EACH 149 +#define TK_END 150 +#define TK_EXPLAIN 151 +#define TK_FAIL 152 +#define TK_FOR 153 +#define TK_IGNORE 154 +#define TK_IMMEDIATE 155 +#define TK_INITIALLY 156 +#define TK_INSTEAD 157 +#define TK_MATCH 158 +#define TK_KEY 159 +#define TK_OF 160 +#define TK_RAISE 161 +#define TK_REPLACE 162 +#define TK_RESTRICT 163 +#define TK_ROW 164 +#define TK_STATEMENT 165 +#define TK_TRIGGER 166 +#define TK_VIEW 167 +#define TK_COUNT 168 +#define TK_SUM 169 +#define TK_AVG 170 +#define TK_MIN 171 +#define TK_MAX 172 +#define TK_FIRST 173 +#define TK_LAST 174 +#define TK_TOP 175 +#define TK_BOTTOM 176 +#define TK_STDDEV 177 +#define TK_PERCENTILE 178 +#define TK_APERCENTILE 179 +#define TK_LEASTSQUARES 180 +#define TK_HISTOGRAM 181 +#define TK_DIFF 182 +#define TK_SPREAD 183 +#define TK_TWA 184 +#define TK_INTERP 185 +#define TK_LAST_ROW 186 +#define TK_RATE 187 +#define TK_IRATE 188 +#define TK_SUM_RATE 189 +#define TK_SUM_IRATE 190 +#define TK_AVG_RATE 191 +#define TK_AVG_IRATE 192 +#define TK_TBID 193 +#define TK_SEMI 194 +#define TK_NONE 195 +#define TK_PREV 196 +#define TK_LINEAR 197 +#define TK_IMPORT 198 +#define TK_METRIC 199 +#define TK_TBNAME 200 +#define TK_JOIN 201 +#define TK_METRICS 202 +#define TK_STABLE 203 +#define TK_INSERT 204 +#define TK_INTO 205 +#define TK_VALUES 206 + #define TK_SPACE 300 #define TK_COMMENT 301 diff --git a/src/inc/ttype.h b/src/inc/ttype.h new file mode 100644 index 0000000000000000000000000000000000000000..7d5779c43f7c05ed7675cb59ef3036c14f852938 --- /dev/null +++ b/src/inc/ttype.h @@ -0,0 +1,36 @@ +#ifndef TDENGINE_TTYPE_H +#define TDENGINE_TTYPE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosdef.h" + +#define GET_TYPED_DATA(_v, _finalType, _type, _data) \ + switch (_type) { \ + case TSDB_DATA_TYPE_TINYINT: \ + (_v) = (_finalType)GET_INT8_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + (_v) = (_finalType)GET_INT16_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + (_v) = (_finalType)(GET_INT64_VAL(_data)); \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + (_v) = (_finalType)GET_FLOAT_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + (_v) = (_finalType)GET_DOUBLE_VAL(_data); \ + break; \ + default: \ + (_v) = (_finalType)GET_INT32_VAL(_data); \ + break; \ + }; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTYPE_H diff --git a/src/inc/twal.h b/src/inc/twal.h index 92204abd7d34a9ee2eebf1b74c2e8d58b9599f17..8dd3a8a91209e840abeb9560f94a52ce362492a9 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -19,42 +19,53 @@ extern "C" { #endif -#define TAOS_WAL_NOLOG 0 -#define TAOS_WAL_WRITE 1 -#define TAOS_WAL_FSYNC 2 - +typedef enum { + TAOS_WAL_NOLOG = 0, + TAOS_WAL_WRITE = 1, + TAOS_WAL_FSYNC = 2 +} EWalType; + +typedef enum { + TAOS_WAL_NOT_KEEP = 0, + TAOS_WAL_KEEP = 1 +} EWalKeep; + typedef struct { - int8_t msgType; - int8_t reserved[3]; - int32_t len; - uint64_t version; - uint32_t signature; - uint32_t cksum; - char cont[]; + int8_t msgType; + int8_t sver; + int8_t reserved[2]; + int32_t len; + uint64_t version; + uint32_t signature; + uint32_t cksum; + char cont[]; } SWalHead; typedef struct { - int8_t walLevel; // wal level - int32_t fsyncPeriod; // millisecond - int8_t wals; // number of WAL files; - int8_t keep; // keep the wal file when closed + int32_t vgId; + int32_t fsyncPeriod; // millisecond + EWalType walLevel; // wal level + EWalKeep keep; // keep the wal file when closed } SWalCfg; -typedef void* twalh; // WAL HANDLE -typedef int (*FWalWrite)(void *ahandle, void *pHead, int type); - -twalh walOpen(const char *path, const SWalCfg *pCfg); -int walAlter(twalh pWal, const SWalCfg *pCfg); -void walClose(twalh); -int walRenew(twalh); -int walWrite(twalh, SWalHead *); -void walFsync(twalh); -int walRestore(twalh, void *pVnode, FWalWrite writeFp); -int walGetWalFile(twalh, char *name, uint32_t *index); -int64_t walGetVersion(twalh); +typedef void * twalh; // WAL HANDLE +typedef int32_t FWalWrite(void *ahandle, void *pHead, int32_t qtype, void *pMsg); -extern int wDebugFlag; +int32_t walInit(); +void walCleanUp(); +twalh walOpen(char *path, SWalCfg *pCfg); +int32_t walAlter(twalh pWal, SWalCfg *pCfg); +void walStop(twalh); +void walClose(twalh); +int32_t walRenew(twalh); +void walRemoveOneOldFile(twalh); +void walRemoveAllOldFiles(twalh); +int32_t walWrite(twalh, SWalHead *); +void walFsync(twalh, bool forceFsync); +int32_t walRestore(twalh, void *pVnode, FWalWrite writeFp); +int32_t walGetWalFile(twalh, char *fileName, int64_t *fileId); +uint64_t walGetVersion(twalh); #ifdef __cplusplus } diff --git a/src/inc/vnode.h b/src/inc/vnode.h index fdce4d62794075bd2e7027b125780fbd7a2deaed..4e8389498b5ced87f4ff07dddac817614f5af368 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -20,53 +20,70 @@ extern "C" { #endif +#include "twal.h" + typedef enum _VN_STATUS { - TAOS_VN_STATUS_INIT, - TAOS_VN_STATUS_READY, - TAOS_VN_STATUS_CLOSING, - TAOS_VN_STATUS_UPDATING, - TAOS_VN_STATUS_RESET, -} EVnStatus; + TAOS_VN_STATUS_INIT = 0, + TAOS_VN_STATUS_READY = 1, + TAOS_VN_STATUS_CLOSING = 2, + TAOS_VN_STATUS_UPDATING = 3, + TAOS_VN_STATUS_RESET = 4, +} EVnodeStatus; typedef struct { - int len; - void *rsp; - void *qhandle; //used by query and retrieve msg + int32_t len; + void * rsp; + void * qhandle; // used by query and retrieve msg } SRspRet; typedef struct { + int32_t code; + int32_t contLen; + void * rpcHandle; + void * rpcAhandle; + void * qhandle; + int8_t qtype; + int8_t msgType; + SRspRet rspRet; + char pCont[]; +} SVReadMsg; + +typedef struct { + int32_t code; + int32_t processedCount; + void * rpcHandle; + void * rpcAhandle; SRspRet rspRet; - void *pCont; - int32_t contLen; - SRpcMsg rpcMsg; -} SReadMsg; + char reserveForSync[16]; + SWalHead pHead[]; +} SVWriteMsg; extern char *vnodeStatus[]; -int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg); +int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId, char *rootDir); -int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg); +int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); int32_t vnodeClose(int32_t vgId); void* vnodeAcquire(int32_t vgId); // add refcount -void* vnodeAcquireRqueue(int32_t vgId); // add refCount, get read queue -void* vnodeAcquireWqueue(int32_t vgId); // add recCount, get write queue void vnodeRelease(void *pVnode); // dec refCount void* vnodeGetWal(void *pVnode); -int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item); -int32_t vnodeCheckWrite(void *pVnode); +int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg); +void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite); +int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet); int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); -void vnodeBuildStatusMsg(void *param); -void vnodeConfirmForward(void *param, uint64_t version, int32_t code); -void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes); +void vnodeBuildStatusMsg(void *pStatus); +void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code); +void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes); int32_t vnodeInitResources(); void vnodeCleanupResources(); -int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg); -int32_t vnodeCheckRead(void *pVnode); +int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qtype, void *rparam); +void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); +int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); #ifdef __cplusplus } diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index 77db79e22003d04701bf7417cc9ebc06b202533e..66e8cf73988ab25db7544b9a52215d2279630c63 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -3,3 +3,4 @@ PROJECT(TDengine) ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(taosdemo) +ADD_SUBDIRECTORY(taosdump) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index f508d186083c84482080f2f8fe251173733f1366..d65c943e28d7d5a63aba4fc1839a4ba9cf744746 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -60,7 +60,7 @@ typedef struct SShellArguments { extern void shellParseArgument(int argc, char* argv[], SShellArguments* arguments); extern TAOS* shellInit(SShellArguments* args); extern void* shellLoopQuery(void* arg); -extern void taos_error(TAOS_RES* tres); +extern void taos_error(TAOS_RES* tres, int64_t st); extern int regex_match(const char* s, const char* reg, int cflags); void shellReadCommand(TAOS* con, char command[]); int32_t shellRunCommand(TAOS* con, char* command); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index ffe537dd91834247460f04c47efff5ebe8687d7e..995b56f341b36b84e413aec154ae1abcc832741d 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -229,8 +229,8 @@ void shellReadCommand(TAOS *con, char *command) { printf("\n"); if (isReadyGo(&cmd)) { sprintf(command, "%s%s", cmd.buffer, cmd.command); - taosTFree(cmd.buffer); - taosTFree(cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); return; } else { updateBuffer(&cmd); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 748b7e792982352de611227d82a844448d251ee2..22f01ac142c72603b9fa0595ce8c1fc89c626d7e 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -193,7 +193,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { history.hist[(history.hend + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE] == NULL || strcmp(command, history.hist[(history.hend + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE]) != 0) { if (history.hist[history.hend] != NULL) { - taosTFree(history.hist[history.hend]); + tfree(history.hist[history.hend]); } history.hist[history.hend] = strdup(command); @@ -244,7 +244,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { } *p++ = c; - if (c == ';') { + if (c == ';' && quote == 0) { c = *p; *p = 0; if (shellRunSingleCommand(con, cmd) < 0) { @@ -296,7 +296,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { TAOS_RES* pSql = taos_query_h(con, command, &result); if (taos_errno(pSql)) { - taos_error(pSql); + taos_error(pSql, st); return; } @@ -770,7 +770,7 @@ void read_history() { return; } - while ((read_size = taosGetline(&line, &line_size, f)) != -1) { + while ((read_size = tgetline(&line, &line_size, f)) != -1) { line[read_size - 1] = '\0'; history.hist[history.hend] = strdup(line); @@ -800,16 +800,17 @@ void write_history() { for (int i = history.hstart; i != history.hend;) { if (history.hist[i] != NULL) { fprintf(f, "%s\n", history.hist[i]); - taosTFree(history.hist[i]); + tfree(history.hist[i]); } i = (i + 1) % MAX_HISTORY_SIZE; } fclose(f); } -void taos_error(TAOS_RES *tres) { +void taos_error(TAOS_RES *tres, int64_t st) { + int64_t et = taosGetTimestampUs(); atomic_store_ptr(&result, 0); - fprintf(stderr, "\nDB error: %s\n", taos_errstr(tres)); + fprintf(stderr, "\nDB error: %s (%.6fs)\n", taos_errstr(tres), (et - st) / 1E6); taos_free_result(tres); } @@ -853,7 +854,7 @@ void source_file(TAOS *con, char *fptr) { return; } - while ((read_len = taosGetline(&line, &line_len, f)) != -1) { + while ((read_len = tgetline(&line, &line_len, f)) != -1) { if (read_len >= tsMaxSQLStringLen) continue; line[--read_len] = '\0'; diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 6f5ea33d79f09219f49ee4094de2c377648b2290..04f5824d8da547557a4dc7fce599d721c1885611 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -46,7 +46,7 @@ static struct argp_option options[] = { {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, - {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."}, + {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, options: client|clients|server."}, {"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."}, {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {0}}; @@ -232,8 +232,8 @@ void shellReadCommand(TAOS *con, char *command) { printf("\n"); if (isReadyGo(&cmd)) { sprintf(command, "%s%s", cmd.buffer, cmd.command); - taosTFree(cmd.buffer); - taosTFree(cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); return; } else { updateBuffer(&cmd); @@ -351,7 +351,7 @@ void *shellLoopQuery(void *arg) { reset_terminal_mode(); } while (shellRunCommand(con, command) == 0); - taosTFree(command); + tfree(command); exitShell(); pthread_cleanup_pop(1); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 6cb7c669cc7a08434b2558588067d007b51b3595..2083ad3e9b7d5a101f35af4fe20dcd38957d5be2 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -80,7 +80,10 @@ int main(int argc, char* argv[]) { shellParseArgument(argc, argv, &args); if (args.netTestRole && args.netTestRole[0] != 0) { - taosNetTest(args.host, (uint16_t)args.port, (uint16_t)args.endPort, args.pktLen, args.netTestRole); + taos_init(); + CmdArguments cmdArgs; + memcpy(&cmdArgs, &args, sizeof(SShellArguments)); + taosNetTest(&cmdArgs); exit(0); } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 699e96428e201e998c1a4bb9cb8cd3c835e61364..53e7d2398450fe22a11da1e7254e0c2ab5f02ea4 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -475,6 +475,7 @@ typedef struct { tsem_t mutex_sem; int notFinished; tsem_t lock_sem; + int counter; } info; typedef struct { @@ -766,6 +767,7 @@ int main(int argc, char *argv[]) { t_info->data_of_rate = rate; t_info->end_table_id = i < b ? last + a : last + a - 1; last = t_info->end_table_id + 1; + t_info->counter = 0; tsem_init(&(t_info->mutex_sem), 0, 1); t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; @@ -788,14 +790,14 @@ int main(int argc, char *argv[]) { printf("ASYNC Insert with %d connections:\n", threads); } - fprintf(fp, "|%10.d | %10.2f | %10.2f | %10.4f |\n\n", - ntables * nrecords_per_table, ntables * nrecords_per_table / t, - (ntables * nrecords_per_table) / (t * nrecords_per_request), + fprintf(fp, "|%"PRIu64" | %10.2f | %10.2f | %10.4f |\n\n", + (int64_t)ntables * nrecords_per_table, ntables * nrecords_per_table / t, + ((int64_t)ntables * nrecords_per_table) / (t * nrecords_per_request), t * 1000); - printf("Spent %.4f seconds to insert %d records with %d record(s) per request: %.2f records/second\n", - t, ntables * nrecords_per_table, nrecords_per_request, - ntables * nrecords_per_table / t); + printf("Spent %.4f seconds to insert %"PRIu64" records with %d record(s) per request: %.2f records/second\n", + t, (int64_t)ntables * nrecords_per_table, nrecords_per_request, + (int64_t)ntables * nrecords_per_table / t); for (int i = 0; i < threads; i++) { info *t_info = infos + i; @@ -879,6 +881,7 @@ int main(int argc, char *argv[]) { taos_close(rInfo->taos); } + taos_cleanup(); return 0; } @@ -955,7 +958,7 @@ void querySqlFile(TAOS* taos, char* sqlFile) double t = getCurrentTime(); - while ((read_len = taosGetline(&line, &line_len, fp)) != -1) { + while ((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; line[--read_len] = '\0'; @@ -1283,68 +1286,39 @@ void *syncWrite(void *sarg) { void *asyncWrite(void *sarg) { info *winfo = (info *)sarg; - - sTable *tb_infos = (sTable *)malloc(sizeof(sTable) * (winfo->end_table_id - winfo->start_table_id + 1)); - - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { - sTable *tb_info = tb_infos + tID - winfo->start_table_id; - tb_info->data_type = winfo->datatype; - tb_info->ncols_per_record = winfo->ncols_per_record; - tb_info->taos = winfo->taos; - sprintf(tb_info->tb_name, "%s.%s%d", winfo->db_name, winfo->tb_prefix, tID); - tb_info->timestamp = winfo->start_time; - tb_info->counter = 0; - tb_info->target = winfo->nrecords_per_table; - tb_info->len_of_binary = winfo->len_of_binary; - tb_info->nrecords_per_request = winfo->nrecords_per_request; - tb_info->mutex_sem = &(winfo->mutex_sem); - tb_info->notFinished = &(winfo->notFinished); - tb_info->lock_sem = &(winfo->lock_sem); - tb_info->data_of_order = winfo->data_of_order; - tb_info->data_of_rate = winfo->data_of_rate; - - /* char buff[BUFFER_SIZE] = "\0"; */ - /* sprintf(buff, "insert into %s values (0, 0)", tb_info->tb_name); */ - /* queryDB(tb_info->taos,buff); */ - - taos_query_a(winfo->taos, "show databases", callBack, tb_info); - } + taos_query_a(winfo->taos, "show databases", callBack, winfo); tsem_wait(&(winfo->lock_sem)); - free(tb_infos); return NULL; } void callBack(void *param, TAOS_RES *res, int code) { - sTable *tb_info = (sTable *)param; - char **datatype = tb_info->data_type; - int ncols_per_record = tb_info->ncols_per_record; - int len_of_binary = tb_info->len_of_binary; - int64_t tmp_time = tb_info->timestamp; - - if (code < 0) { - fprintf(stderr, "failed to insert data %d:reason; %s\n", code, taos_errstr(res)); - exit(EXIT_FAILURE); - } + info* winfo = (info*)param; + char **datatype = winfo->datatype; + int ncols_per_record = winfo->ncols_per_record; + int len_of_binary = winfo->len_of_binary; - // If finished; - if (tb_info->counter >= tb_info->target) { - tsem_wait(tb_info->mutex_sem); - (*(tb_info->notFinished))--; - if (*(tb_info->notFinished) == 0) tsem_post(tb_info->lock_sem); - tsem_post(tb_info->mutex_sem); + int64_t tmp_time = winfo->start_time; + char *buffer = calloc(1, BUFFER_SIZE); + char *data = calloc(1, MAX_DATA_SIZE); + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); + if (winfo->counter >= winfo->nrecords_per_table) { + winfo->start_table_id++; + winfo->counter = 0; + } + if (winfo->start_table_id > winfo->end_table_id) { + tsem_post(&winfo->lock_sem); + free(buffer); + free(data); + taos_free_result(res); return; } - - char buffer[BUFFER_SIZE] = "\0"; - char data[MAX_DATA_SIZE]; - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s values", tb_info->tb_name); - - for (int i = 0; i < tb_info->nrecords_per_request; i++) { + + for (int i = 0; i < winfo->nrecords_per_request; i++) { int rand_num = rand() % 100; - if (tb_info->data_of_order ==1 && rand_num < tb_info->data_of_rate) + if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) { int64_t d = tmp_time - rand() % 1000000 + rand_num; generateData(data, datatype, ncols_per_record, d, len_of_binary); @@ -1353,15 +1327,15 @@ void callBack(void *param, TAOS_RES *res, int code) { generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); } pstr += sprintf(pstr, "%s", data); - tb_info->counter++; + winfo->counter++; - if (tb_info->counter >= tb_info->target) { + if (winfo->counter >= winfo->nrecords_per_table) { break; } } - tb_info->timestamp = tmp_time; - - taos_query_a(tb_info->taos, buffer, callBack, tb_info); + taos_query_a(winfo->taos, buffer, callBack, winfo); + free(buffer); + free(data); taos_free_result(res); } diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..dcdbd486154b2673459e25754dc75457851a4df4 --- /dev/null +++ b/src/kit/taosdump/CMakeLists.txt @@ -0,0 +1,16 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(inc) +AUX_SOURCE_DIRECTORY(. SRC) + +IF (TD_LINUX) + ADD_EXECUTABLE(taosdump ${SRC}) + IF (TD_SOMODE_STATIC) + TARGET_LINK_LIBRARIES(taosdump taos_static) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos) + ENDIF () +ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c new file mode 100644 index 0000000000000000000000000000000000000000..88f07ee60238609f4ae0d100bcae368f1980d8d2 --- /dev/null +++ b/src/kit/taosdump/taosdump.c @@ -0,0 +1,2282 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include "os.h" +#include "taos.h" +#include "taosdef.h" +#include "taosmsg.h" +#include "tglobal.h" +#include "tsclient.h" +#include "tsdb.h" +#include "tutil.h" + +#define COMMAND_SIZE 65536 +//#define DEFAULT_DUMP_FILE "taosdump.sql" + +int converStringToReadable(char *str, int size, char *buf, int bufsize); +int convertNCharToReadable(char *str, int size, char *buf, int bufsize); +void taosDumpCharset(FILE *fp); +void taosLoadFileCharset(FILE *fp, char *fcharset); + +typedef struct { + short bytes; + int8_t type; +} SOColInfo; + +// -------------------------- SHOW DATABASE INTERFACE----------------------- +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_tables_index { + TSDB_SHOW_TABLES_NAME_INDEX, + TSDB_SHOW_TABLES_CREATED_TIME_INDEX, + TSDB_SHOW_TABLES_COLUMNS_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_MAX_SHOW_TABLES +}; + +// ---------------------------------- DESCRIBE METRIC CONFIGURE ------------------------------ +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct { + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[128]; +} SColDes; + +typedef struct { + char name[TSDB_COL_NAME_LEN + 1]; + SColDes cols[]; +} STableDef; + +extern char version[]; + +typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + int32_t tables; + int32_t vgroups; + int16_t replications; + int16_t quorum; + int16_t daysPerFile; + int16_t daysToKeep; + int16_t daysToKeep1; + int16_t daysToKeep2; + int32_t cacheBlockSize; //MB + int32_t totalBlocks; + int32_t minRowsPerFileBlock; + int32_t maxRowsPerFileBlock; + int8_t walLevel; + int32_t fsyncPeriod; + int8_t compression; + int8_t precision; // time resolution + int8_t update; +} SDbInfo; + +typedef struct { + char name[TSDB_TABLE_NAME_LEN + 1]; + char metric[TSDB_TABLE_NAME_LEN + 1]; +} STableRecord; + +typedef struct { + bool isMetric; + STableRecord tableRecord; +} STableRecordInfo; + +typedef struct { + pthread_t threadID; + int32_t threadIndex; + int32_t totalThreads; + char dbName[TSDB_TABLE_NAME_LEN + 1]; + void *taosCon; +} SThreadParaObj; + +static int64_t totalDumpOutRows = 0; + +SDbInfo **dbInfos = NULL; + +const char *argp_program_version = version; +const char *argp_program_bug_address = ""; + +/* Program documentation. */ +static char doc[] = ""; +/* "Argp example #4 -- a program with somewhat more complicated\ */ +/* options\ */ +/* \vThis part of the documentation comes *after* the options;\ */ +/* note that the text is automatically filled, but it's possible\ */ +/* to force a line-break, e.g.\n<-- here."; */ + +/* A description of the arguments we accept. */ +static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i inpath\n-o outpath"; + +/* Keys for options without short-options. */ +#define OPT_ABORT 1 /* –abort */ + +/* The options we understand. */ +static struct argp_option options[] = { + // connection option + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + #ifdef _TD_POWER_ + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + #else + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + #endif + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, + // input/output file + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + #ifdef _TD_POWER_ + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, + #else + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + #endif + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + // dump unit options + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + // dump format options + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {0}}; + +/* Used by main to communicate with parse_opt. */ +struct arguments { + // connection option + char *host; + char *user; + char *password; + uint16_t port; + char cversion[12]; + uint16_t mysqlFlag; + // output file + char outpath[TSDB_FILENAME_LEN+1]; + char inpath[TSDB_FILENAME_LEN+1]; + char *encode; + // dump unit option + bool all_databases; + bool databases; + // dump format option + bool schemaonly; + bool with_property; + int64_t start_time; + int64_t end_time; + int32_t data_batch; + int32_t max_sql_len; + int32_t table_batch; // num of table which will be dump into one output file. + bool allow_sys; + // other options + int32_t thread_num; + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; +}; + +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + struct arguments *arguments = state->input; + wordexp_t full_path; + + switch (key) { + // connection option + case 'a': + arguments->allow_sys = true; + break; + case 'h': + arguments->host = arg; + break; + case 'u': + arguments->user = arg; + break; + case 'p': + arguments->password = arg; + break; + case 'P': + arguments->port = atoi(arg); + break; + case 'q': + arguments->mysqlFlag = atoi(arg); + break; + case 'v': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid client vesion %s\n", arg); + return -1; + } + tstrncpy(arguments->cversion, full_path.we_wordv[0], 11); + wordfree(&full_path); + break; + // output file path + case 'o': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'i': + arguments->isDumpIn = true; + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'e': + arguments->encode = arg; + break; + // dump unit option + case 'A': + arguments->all_databases = true; + break; + case 'B': + arguments->databases = true; + break; + // dump format option + case 's': + arguments->schemaonly = true; + break; + case 'M': + arguments->with_property = true; + break; + case 'S': + // parse time here. + arguments->start_time = atol(arg); + break; + case 'E': + arguments->end_time = atol(arg); + break; + case 'N': + arguments->data_batch = atoi(arg); + break; + case 'L': + { + int32_t len = atoi(arg); + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < TSDB_MAX_SQL_LEN) { + len = TSDB_MAX_SQL_LEN; + } + arguments->max_sql_len = len; + break; + } + case 't': + arguments->table_batch = atoi(arg); + break; + case 'T': + arguments->thread_num = atoi(arg); + break; + case OPT_ABORT: + arguments->abort = 1; + break; + case ARGP_KEY_ARG: + arguments->arg_list = &state->argv[state->next - 1]; + arguments->arg_list_len = state->argc - state->next + 1; + state->next = state->argc; + break; + + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +/* Our argp parser. */ +static struct argp argp = {options, parse_opt, args_doc, doc}; + +int taosDumpOut(struct arguments *arguments); +int taosDumpIn(struct arguments *arguments); +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int taosCheckParam(struct arguments *arguments); +void taosFreeDbInfos(); +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); + +struct arguments tsArguments = { + // connection option + NULL, + "root", + #ifdef _TD_POWER_ + "powerdb", + #else + "taosdata", + #endif + 0, + "", + 0, + // outpath and inpath + "", + "", + NULL, + // dump unit option + false, + false, + // dump format option + false, + false, + 0, + INT64_MAX, + 1, + TSDB_MAX_SQL_LEN, + 1, + false, + // other options + 5, + 0, + NULL, + 0, + false +}; + +int queryDB(TAOS *taos, char *command) { + TAOS_RES *pSql = NULL; + int32_t code = -1; + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (code) { + fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); + } + taos_free_result(pSql); + return code; +} + +int main(int argc, char *argv[]) { + + /* Parse our arguments; every option seen by parse_opt will be + reflected in arguments. */ + argp_parse(&argp, argc, argv, 0, 0, &tsArguments); + + if (tsArguments.abort) { + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif + } + + printf("====== arguments config ======\n"); + { + printf("host: %s\n", tsArguments.host); + printf("user: %s\n", tsArguments.user); + printf("password: %s\n", tsArguments.password); + printf("port: %u\n", tsArguments.port); + printf("cversion: %s\n", tsArguments.cversion); + printf("mysqlFlag: %d\n", tsArguments.mysqlFlag); + printf("outpath: %s\n", tsArguments.outpath); + printf("inpath: %s\n", tsArguments.inpath); + printf("encode: %s\n", tsArguments.encode); + printf("all_databases: %d\n", tsArguments.all_databases); + printf("databases: %d\n", tsArguments.databases); + printf("schemaonly: %d\n", tsArguments.schemaonly); + printf("with_property: %d\n", tsArguments.with_property); + printf("start_time: %" PRId64 "\n", tsArguments.start_time); + printf("end_time: %" PRId64 "\n", tsArguments.end_time); + printf("data_batch: %d\n", tsArguments.data_batch); + printf("max_sql_len: %d\n", tsArguments.max_sql_len); + printf("table_batch: %d\n", tsArguments.table_batch); + printf("thread_num: %d\n", tsArguments.thread_num); + printf("allow_sys: %d\n", tsArguments.allow_sys); + printf("abort: %d\n", tsArguments.abort); + printf("isDumpIn: %d\n", tsArguments.isDumpIn); + printf("arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } + } + printf("==============================\n"); + + if (tsArguments.cversion[0] != 0){ + tstrncpy(version, tsArguments.cversion, 11); + } + + if (taosCheckParam(&tsArguments) < 0) { + exit(EXIT_FAILURE); + } + + if (tsArguments.isDumpIn) { + if (taosDumpIn(&tsArguments) < 0) return -1; + } else { + if (taosDumpOut(&tsArguments) < 0) return -1; + } + + return 0; +} + +void taosFreeDbInfos() { + if (dbInfos == NULL) return; + for (int i = 0; i < 128; i++) tfree(dbInfos[i]); + tfree(dbInfos); +} + +// check table is normal table or super table +int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { + TAOS_ROW row = NULL; + bool isSet = false; + TAOS_RES *result = NULL; + + memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tempCommand, "show tables like %s", table); + + result = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = false; + strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) { + free(tempCommand); + return 0; + } + + sprintf(tempCommand, "show stables like %s", table); + + result = taos_query(taosCon, tempCommand); + code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = true; + tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) { + free(tempCommand); + return 0; + } + fprintf(stderr, "invalid table/metric %s\n", table); + free(tempCommand); + return -1; +} + + +int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) { + STableRecord tableRecord; + + if (-1 == *fd) { + *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (*fd == -1) { + fprintf(stderr, "failed to open temp file: .tables.tmp.0\n"); + return -1; + } + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + + taosWrite(*fd, &tableRecord, sizeof(STableRecord)); + return 0; +} + + +int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tmpCommand, "select tbname from %s", metric); + + TAOS_RES *result = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t numOfTable = 0; + int32_t numOfThread = *totalNumOfThread; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(result)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(result); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + close(fd); + fd = -1; + } + } + + if (fd >= 0) { + close(fd); + fd = -1; + } + + taos_free_result(result); + + *totalNumOfThread = numOfThread; + + free(tmpCommand); + + return 0; +} + +int taosDumpOut(struct arguments *arguments) { + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; + STableRecordInfo tableRecordInfo; + + char tmpBuf[TSDB_FILENAME_LEN+9] = {0}; + if (arguments->outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + return -1; + } + + dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + /* Connect to server */ + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + goto _exit_failure; + } + + /* --------------------------------- Main Code -------------------------------- */ + /* if (arguments->databases || arguments->all_databases) { // dump part of databases or all databases */ + /* */ + taosDumpCharset(fp); + + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); + goto _exit_failure; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && + (!arguments->allow_sys)) + continue; + + if (arguments->databases) { // input multi dbs + for (int i = 0; arguments->arg_list[i]; i++) { + if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!arguments->all_databases) { // only input one db + if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } + + _dump_db_point: + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); +#if 0 + if (arguments->with_property) { + dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->daysToKeep1; + dbInfos[count]->daysToKeep2; + dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + } +#endif + count++; + + if (arguments->databases) { + if (count > arguments->arg_list_len) break; + + } else if (!arguments->all_databases) { + if (count >= 1) break; + } + } + + if (count == 0) { + fprintf(stderr, "No databases valid to dump\n"); + goto _exit_failure; + } + + if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases + for (int i = 0; i < count; i++) { + taosDumpDb(dbInfos[i], arguments, fp, taos); + } + } else { + if (arguments->arg_list_len == 1) { // case: taosdump + taosDumpDb(dbInfos[0], arguments, fp, taos); + } else { // case: taosdump tablex tabley ... + taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); + + sprintf(command, "use %s", dbInfos[0]->name); + + result = taos_query(taos, command); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); + goto _exit_failure; + } + + fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + + int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int normalTblFd = -1; + int32_t retCode; + for (int i = 1; arguments->arg_list[i]; i++) { + if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { + fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); + continue; + } + + if (tableRecordInfo.isMetric) { // dump all table of this metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); + } else { + if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + } + retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); + } + + if (retCode < 0) { + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } + goto _clean_tmp_file; + } + } + + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + _clean_tmp_file: + for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + } + } + + /* Close the handle and return */ + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); + return 0; + +_exit_failure: + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); + return -1; +} + +int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { + TAOS_ROW row = NULL; + TAOS_RES *tmpResult = NULL; + int count = 0; + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + char* tbuf = (char *)malloc(COMMAND_SIZE); + if (tbuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tempCommand); + return -1; + } + + sprintf(tempCommand, "describe %s", table); + + tmpResult = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + + count++; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + + if (isSuperTable) { + free(tempCommand); + free(tbuf); + return count; + } + + // if chidl-table have tag, using select tagName from table to get tagValue + for (int i = 0 ; i < count; i++) { + if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; + + + sprintf(tempCommand, "select %s from %s", tableDes->cols[i].field, table); + + tmpResult = taos_query(taosCon, tempCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + fields = taos_fetch_fields(tmpResult); + + row = taos_fetch_row(tmpResult); + if (NULL == row) { + fprintf(stderr, " fetch failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + if (row[0] == NULL) { + sprintf(tableDes->cols[i].note, "%s", "NULL"); + taos_free_result(tmpResult); + tmpResult = NULL; + continue; + } + + int32_t* length = taos_fetch_lengths(tmpResult); + + //int32_t* length = taos_fetch_lengths(tmpResult); + switch (fields[0].type) { + case TSDB_DATA_TYPE_BOOL: + sprintf(tableDes->cols[i].note, "%d", ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0])); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0])); + break; + case TSDB_DATA_TYPE_INT: + sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0])); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0])); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0])); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); + break; + case TSDB_DATA_TYPE_BINARY: + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + tableDes->cols[i].note[0] = '\''; + converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); + *(pstr++) = '\''; + break; + case TSDB_DATA_TYPE_NCHAR: + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); + #if 0 + if (!arguments->mysqlFlag) { + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[0]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } + #endif + break; + default: + break; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + } + + free(tempCommand); + free(tbuf); + + return count; +} + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table + /* + count = taosGetTableDes(metric, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + */ + + count = taosGetTableDes(table, tableDes, taosCon, false); + + if (count < 0) { + free(tableDes); + return -1; + } + + // create child-table using super-table + taosDumpCreateMTableClause(tableDes, metric, count, fp); + + } else { // dump table definition + count = taosGetTableDes(table, tableDes, taosCon, false); + + if (count < 0) { + free(tableDes); + return -1; + } + + // create normal-table or super-table + taosDumpCreateTableClause(tableDes, count, fp); + } + + free(tableDes); + + return taosDumpTableData(fp, table, arguments, taosCon); +} + +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = tmpCommand; + + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); + if (isDumpProperty) { + #if 0 + pstr += sprintf(pstr, + "TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d", + dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize, + dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression, + dbInfo->precision, dbInfo->update); + #endif + } + + pstr += sprintf(pstr, ";"); + + fprintf(fp, "%s\n\n", tmpCommand); + free(tmpCommand); +} + +void* taosDumpOutWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + STableRecord tableRecord; + int fd; + + char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + return NULL; + } + + FILE *fp = NULL; + memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + + if (tsArguments.outpath[0] != 0) { + sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + } else { + sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + } + + fp = fopen(tmpFileName, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpFileName); + close(fd); + return NULL; + } + + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, "use %s", pThread->dbName); + + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", pThread->dbName); + taos_free_result(tmpResult); + fclose(fp); + close(fd); + return NULL; + } + + fprintf(fp, "USE %s;\n\n", pThread->dbName); + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; + taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + } + + taos_free_result(tmpResult); + close(fd); + fclose(fp); + + return NULL; +} + +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +{ + pthread_attr_t thattr; + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + for (int t = 0; t < numOfThread; ++t) { + SThreadParaObj *pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = numOfThread; + tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int32_t t = 0; t < numOfThread; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int32_t t = 0; t < numOfThread; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + + +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + if (NULL == tableDes) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + count = taosGetTableDes(table, tableDes, taosCon, true); + + if (count < 0) { + free(tableDes); + fprintf(stderr, "failed to get stable schema\n"); + exit(-1); + } + + taosDumpCreateTableClause(tableDes, count, fp); + + free(tableDes); + return 0; +} + + +int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +{ + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + sprintf(tmpCommand, "use %s", dbName); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + taos_free_result(tmpResult); + + sprintf(tmpCommand, "show stables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".stables.tmp"); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + free(tmpCommand); + (void)remove(".stables.tmp"); + exit(-1); + } + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(tmpResult); + (void)lseek(fd, 0, SEEK_SET); + + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; + + (void)taosDumpStable(tableRecord.name, fp, taosCon); + } + + close(fd); + (void)remove(".stables.tmp"); + + free(tmpCommand); + return 0; +} + + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tmpCommand, "use %s", dbInfo->name); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfo->name); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + taos_free_result(tmpResult); + + fprintf(fp, "USE %s;\n\n", dbInfo->name); + + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); + + sprintf(tmpCommand, "show tables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + int32_t numOfTable = 0; + int32_t numOfThread = 0; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + close(fd); + fd = -1; + } + } + + if (fd >= 0) { + close(fd); + fd = -1; + } + + taos_free_result(tmpResult); + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + + free(tmpCommand); + + return 0; +} + +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { + int counter = 0; + int count_temp = 0; + + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char* pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + + if (counter == 0) { + pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter == count_temp) { + pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + pstr += sprintf(pstr, ");"); + + fprintf(fp, "%s\n", tmpBuf); + + free(tmpBuf); +} + +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { + int counter = 0; + int count_temp = 0; + + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = NULL; + pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + } + + assert(counter < numOfCols); + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter != count_temp) { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } + } else { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } + /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ + } + + /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") + * == 0) { */ + /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ + /* } */ + } + + pstr += sprintf(pstr, ");"); + + fprintf(fp, "%s\n", tmpBuf); + free(tmpBuf); +} + +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { + /* char temp[MAX_COMMAND_SIZE] = "\0"; */ + int64_t totalRows = 0; + int count = 0; + char *pstr = NULL; + TAOS_ROW row = NULL; + int numFields = 0; + char *tbuf = NULL; + + char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + int32_t sql_buf_len = arguments->max_sql_len; + char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); + if (tmpBuffer == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tmpCommand); + return -1; + } + + pstr = tmpBuffer; + + if (arguments->schemaonly) { + free(tmpCommand); + free(tmpBuffer); + return 0; + } + + sprintf(tmpCommand, + "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", + tbname, + arguments->start_time, + arguments->end_time); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + numFields = taos_field_count(tmpResult); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + tbuf = (char *)malloc(COMMAND_SIZE); + if (tbuf == NULL) { + fprintf(stderr, "No enough memory\n"); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + int rowFlag = 0; + int32_t curr_sqlstr_len = 0; + int32_t total_sqlstr_len = 0; + count = 0; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + pstr = tmpBuffer; + curr_sqlstr_len = 0; + + int32_t* length = taos_fetch_lengths(tmpResult); // act len + + if (count == 0) { + total_sqlstr_len = 0; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s VALUES (", tbname); + } else { + if (arguments->mysqlFlag) { + if (0 == rowFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + rowFlag++; + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); + } + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + } + } + + for (int col = 0; col < numFields; col++) { + if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + + if (row[col] == NULL) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); + continue; + } + + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); + break; + case TSDB_DATA_TYPE_SMALLINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); + break; + case TSDB_DATA_TYPE_INT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); + break; + case TSDB_DATA_TYPE_BIGINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col])); + break; + case TSDB_DATA_TYPE_FLOAT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); + break; + case TSDB_DATA_TYPE_DOUBLE: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); + break; + case TSDB_DATA_TYPE_BINARY: + //*(pstr++) = '\''; + converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + //pstr = stpcpy(pstr, tbuf); + //*(pstr++) = '\''; + pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + break; + case TSDB_DATA_TYPE_NCHAR: + convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (!arguments->mysqlFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } + break; + default: + break; + } + } + + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); + + totalRows++; + count++; + fprintf(fp, "%s", tmpBuffer); + + total_sqlstr_len += curr_sqlstr_len; + + if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + fprintf(fp, ";\n"); + count = 0; + } //else { + //fprintf(fp, "\\\n"); + //} + } + + atomic_add_fetch_64(&totalDumpOutRows, totalRows); + + fprintf(fp, "\n"); + + if (tbuf) { + free(tbuf); + } + + taos_free_result(tmpResult); + tmpResult = NULL; + free(tmpCommand); + free(tmpBuffer); + return 0; +} + +int taosCheckParam(struct arguments *arguments) { + if (arguments->all_databases && arguments->databases) { + fprintf(stderr, "conflict option --all-databases and --databases\n"); + return -1; + } + + if (arguments->start_time > arguments->end_time) { + fprintf(stderr, "start time is larger than end time\n"); + return -1; + } + + if (arguments->arg_list_len == 0) { + if ((!arguments->all_databases) && (!arguments->isDumpIn)) { + fprintf(stderr, "taosdump requires parameters\n"); + return -1; + } + } +/* + if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } +*/ + if (!arguments->isDumpIn && arguments->encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + if (arguments->table_batch <= 0) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + return 0; +} + +bool isEmptyCommand(char *cmd) { + char *pchar = cmd; + + while (*pchar != '\0') { + if (*pchar != ' ') return false; + pchar++; + } + + return true; +} + +void taosReplaceCtrlChar(char *str) { + _Bool ctrlOn = false; + char *pstr = NULL; + + for (pstr = str; *str != '\0'; ++str) { + if (ctrlOn) { + switch (*str) { + case 'n': + *pstr = '\n'; + pstr++; + break; + case 'r': + *pstr = '\r'; + pstr++; + break; + case 't': + *pstr = '\t'; + pstr++; + break; + case '\\': + *pstr = '\\'; + pstr++; + break; + case '\'': + *pstr = '\''; + pstr++; + break; + default: + break; + } + ctrlOn = false; + } else { + if (*str == '\\') { + ctrlOn = true; + } else { + *pstr = *str; + pstr++; + } + } + } + + *pstr = '\0'; +} + +char *ascii_literal_list[] = { + "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", + "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", + "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", + "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", + "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", + "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", + "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", + "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", + "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", + "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", + "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", + "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", + "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", + "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", + "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", + "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + +int converStringToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + while (size > 0) { + if (*pstr == '\0') break; + pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); + pstr++; + size--; + } + *pbuf = '\0'; + return 0; +} + +int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + // TODO + wchar_t wc; + while (size > 0) { + if (*pstr == '\0') break; + int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); + if (byte_width < 0) { + fprintf(stderr, "mbtowc() return fail.\n"); + exit(-1); + } + + if ((int)wc < 256) { + pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + } else { + memcpy(pbuf, pstr, byte_width); + pbuf += byte_width; + } + pstr += byte_width; + } + + *pbuf = '\0'; + + return 0; +} + +void taosDumpCharset(FILE *fp) { + char charsetline[256]; + + (void)fseek(fp, 0, SEEK_SET); + sprintf(charsetline, "#!%s\n", tsCharset); + (void)fwrite(charsetline, strlen(charsetline), 1, fp); +} + +void taosLoadFileCharset(FILE *fp, char *fcharset) { + char * line = NULL; + size_t line_size = 0; + + (void)fseek(fp, 0, SEEK_SET); + ssize_t size = getline(&line, &line_size, fp); + if (size <= 2) { + goto _exit_no_charset; + } + + if (strncmp(line, "#!", 2) != 0) { + goto _exit_no_charset; + } + if (line[size - 1] == '\n') { + line[size - 1] = '\0'; + size--; + } + strcpy(fcharset, line + 2); + + tfree(line); + return; + +_exit_no_charset: + (void)fseek(fp, 0, SEEK_SET); + *fcharset = '\0'; + tfree(line); + return; +} + +// ======== dumpIn support multi threads functions ================================// + +static char **tsDumpInSqlFiles = NULL; +static int32_t tsSqlFileNum = 0; +static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0}; +static char tsfCharset[64] = {0}; +static int taosGetFilesNum(const char *directoryName, const char *prefix) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); + exit(0); + } + + if (fileNum <= 0) { + fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); + exit(0); + } + + pclose(fp); + return fileNum; +} + +static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + while (fscanf(fp, "%128s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } + } + + if (fileNum != totalFiles) { + fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); + pclose(fp); + exit(0); + } + + pclose(fp); +} + +static void taosCheckTablesSQLFile(const char *directoryName) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/dbs.sql", directoryName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + while (fscanf(fp, "%128s", tsDbSqlFile)) { + break; + } + + pclose(fp); +} + +static void taosMallocSQLFiles() +{ + tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*)); + for (int i = 0; i < tsSqlFileNum; i++) { + tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN); + } +} + +static void taosFreeSQLFiles() +{ + for (int i = 0; i < tsSqlFileNum; i++) { + tfree(tsDumpInSqlFiles[i]); + } + tfree(tsDumpInSqlFiles); +} + +static void taosGetDirectoryFileList(char *inputDir) +{ + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + fprintf(stderr, "ERROR: %s not exist\n", inputDir); + exit(0); + } + + if (fileStat.st_mode & S_IFDIR) { + taosCheckTablesSQLFile(inputDir); + tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); + int totalSQLFileNum = tsSqlFileNum; + if (tsDbSqlFile[0] != 0) { + tsSqlFileNum--; + } + taosMallocSQLFiles(); + taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNum); + fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + } + else { + fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); + exit(0); + } +} + +static FILE* taosOpenDumpInFile(char *fptr) { + wordexp_t full_path; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name: %s\n", fptr); + return NULL; + } + + char *fname = full_path.we_wordv[0]; + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return NULL; + } + + wordfree(&full_path); + + return f; +} + +int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { + char *command = NULL; + char *lcommand = NULL; + int tsize = 0; + char *line = NULL; + _Bool isRun = true; + size_t line_size = 0; + char *pstr = NULL; + char *lstr = NULL; + size_t inbytesleft = 0; + size_t outbytesleft = COMMAND_SIZE; + char *tcommand = NULL; + char *charsetOfFile = NULL; + iconv_t cd = (iconv_t)(-1); + + command = (char *)malloc(COMMAND_SIZE); + lcommand = (char *)malloc(COMMAND_SIZE); + if (command == NULL || lcommand == NULL) { + fprintf(stderr, "failed to connect to allocate memory\n"); + goto _dumpin_exit_failure; + } + + // Resolve locale + if (*fcharset != '\0') { + charsetOfFile = fcharset; + } else { + charsetOfFile = encode; + } + + if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { + cd = iconv_open(tsCharset, charsetOfFile); + if (cd == ((iconv_t)(-1))) { + fprintf(stderr, "Failed to open iconv handle\n"); + goto _dumpin_exit_failure; + } + } + + pstr = command; + int64_t linenu = 0; + while (1) { + ssize_t size = getline(&line, &line_size, fp); + linenu++; + if (size <= 0) break; + if (size == 1) { + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + + taosReplaceCtrlChar(tcommand); + + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); + exit(0); + } + + pstr = command; + pstr[0] = '\0'; + tsize = 0; + isRun = true; + } + + continue; + } + + /* if (line[0] == '-' && line[1] == '-') continue; */ + + line[size - 1] = 0; + + if (tsize + size - 1 > COMMAND_SIZE) { + fprintf(stderr, "command is too long\n"); + goto _dumpin_exit_failure; + } + + if (line[size - 2] == '\\') { + line[size - 2] = ' '; + isRun = false; + } else { + isRun = true; + } + + memcpy(pstr, line, size - 1); + pstr += (size - 1); + *pstr = '\0'; + + if (!isRun) continue; + + if (command != pstr && !isEmptyCommand(command)) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(tcommand); + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); + exit(0); + } + } + + pstr = command; + *pstr = '\0'; + tsize = 0; + } + + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(lcommand); + if (queryDB(taos, tcommand) != 0) + fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); + } + + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(line); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return 0; + +_dumpin_exit_failure: + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return -1; +} + +int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + cmd = (char *)malloc(COMMAND_SIZE); + if (cmd == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= COMMAND_SIZE) continue; + line[--read_len] = '\0'; + + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + cmd[read_len + cmd_len]= '\0'; + if (queryDB(taos, cmd)) { + fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + } + + memset(cmd, 0, COMMAND_SIZE); + cmd_len = 0; + } + + tfree(cmd); + tfree(line); + fclose(fp); + return 0; +} + +void* taosDumpInWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + for (int32_t f = 0; f < tsSqlFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = tsDumpInSqlFiles[f]; + FILE* fp = taosOpenDumpInFile(SQLFileName); + if (NULL == fp) { + continue; + } + fprintf(stderr, "Success Open input file: %s\n", SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName); + } + } + + return NULL; +} + +static void taosStartDumpInWorkThreads(struct arguments *args) +{ + pthread_attr_t thattr; + SThreadParaObj *pThread; + int32_t totalThreads = args->thread_num; + + if (totalThreads > tsSqlFileNum) { + totalThreads = tsSqlFileNum; + } + + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); + for (int32_t t = 0; t < totalThreads; ++t) { + pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = totalThreads; + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int t = 0; t < totalThreads; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int t = 0; t < totalThreads; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + +int taosDumpIn(struct arguments *arguments) { + assert(arguments->isDumpIn); + + TAOS *taos = NULL; + FILE *fp = NULL; + + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + return -1; + } + + taosGetDirectoryFileList(arguments->inpath); + + if (tsDbSqlFile[0] != 0) { + fp = taosOpenDumpInFile(tsDbSqlFile); + if (NULL == fp) { + fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); + return -1; + } + fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); + + taosLoadFileCharset(fp, tsfCharset); + + taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); + } + + taosStartDumpInWorkThreads(arguments); + + taos_close(taos); + taosFreeSQLFiles(); + return 0; +} + + diff --git a/src/kit/taosdump/taosdump.sh b/src/kit/taosdump/taosdump.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d32c090dbb0f538b0fc0abb4a9588ee08037a95 --- /dev/null +++ b/src/kit/taosdump/taosdump.sh @@ -0,0 +1,48 @@ +taos1_6="/root/mnt/work/test/td1.6/build/bin/taos" +taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump" +taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg" + +taos2_0="/root/mnt/work/test/td2.0/build/bin/taos" +taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump" +taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg" + +data_dir="/root/mnt/work/test/td1.6/output" +table_list="/root/mnt/work/test/td1.6/tables" + +DBNAME="test" +NTABLES=$(wc -l ${table_list} | awk '{print $1;}') +NTABLES_PER_DUMP=101 + +mkdir -p ${data_dir} +i=0 +round=0 +command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" +while IFS= read -r line +do + i=$((i+1)) + + command="${command} ${line}" + + if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then + round=$((round+1)) + echo "Starting round ${round} dump out..." + rm -f ${data_dir}/* + ${command} + echo "Starting round ${round} dump in..." + ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} + + # Reset variables + # command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}" + command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" + i=0 + fi +done < "${table_list}" + +if [[ ${i} -ne "0" ]]; then + round=$((round+1)) + echo "Starting round ${round} dump out..." + rm -f ${data_dir}/* + ${command} + echo "Starting round ${round} dump in..." + ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} +fi diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h index 4bc840f026d41dc4fe4e8d61c84d53140501bff6..6d3061c4269f54f0075a07ee75115f43e856479b 100644 --- a/src/mnode/inc/mnodeDef.h +++ b/src/mnode/inc/mnodeDef.h @@ -89,7 +89,7 @@ typedef struct STableObj { int8_t type; } STableObj; -typedef struct SSuperTableObj { +typedef struct SSTableObj { STableObj info; int8_t reserved0[9]; // for fill struct STableObj to 4byte align int16_t nextColId; @@ -104,7 +104,7 @@ typedef struct SSuperTableObj { int32_t numOfTables; SSchema * schema; void * vgHash; -} SSuperTableObj; +} SSTableObj; typedef struct { STableObj info; @@ -122,8 +122,8 @@ typedef struct { int32_t refCount; char* sql; //used by normal table SSchema* schema; //used by normal table - SSuperTableObj *superTable; -} SChildTableObj; + SSTableObj*superTable; +} SCTableObj; typedef struct { int32_t dnodeId; @@ -172,7 +172,8 @@ typedef struct { int8_t walLevel; int8_t replications; int8_t quorum; - int8_t reserved[12]; + int8_t update; + int8_t reserved[11]; } SDbCfg; typedef struct SDbObj { diff --git a/src/mnode/inc/mnodeProfile.h b/src/mnode/inc/mnodeProfile.h index 1e5b1c0f9c7c1c220a88339f00d46538c78cf938..ee57c5da1f66b620e8624a74ba774643c589e818 100644 --- a/src/mnode/inc/mnodeProfile.h +++ b/src/mnode/inc/mnodeProfile.h @@ -45,7 +45,7 @@ void mnodeCleanupProfile(); SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port, int32_t pid, const char* app); SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port); void mnodeReleaseConn(SConnObj *pConn); -int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg); +int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SHeartBeatMsg *pHBMsg); #ifdef __cplusplus } diff --git a/src/mnode/inc/mnodeSdb.h b/src/mnode/inc/mnodeSdb.h index f4854f69a0f315d06d402eef3a125b2a3e45ab0e..29d8cf1207c0cb82773780b5aa212bfa08791acd 100644 --- a/src/mnode/inc/mnodeSdb.h +++ b/src/mnode/inc/mnodeSdb.h @@ -20,7 +20,8 @@ extern "C" { #endif -struct SMnodeMsg; +#include "mnode.h" +#include "twal.h" typedef enum { SDB_TABLE_CLUSTER = 0, @@ -36,44 +37,46 @@ typedef enum { } ESdbTable; typedef enum { - SDB_KEY_STRING, - SDB_KEY_INT, - SDB_KEY_AUTO, - SDB_KEY_VAR_STRING, + SDB_KEY_STRING = 0, + SDB_KEY_INT = 1, + SDB_KEY_AUTO = 2, + SDB_KEY_VAR_STRING = 3, } ESdbKey; typedef enum { - SDB_OPER_GLOBAL, - SDB_OPER_LOCAL + SDB_OPER_GLOBAL = 0, + SDB_OPER_LOCAL = 1 } ESdbOper; -typedef struct SSdbOper { - ESdbOper type; - int32_t rowSize; - int32_t retCode; // for callback in sdb queue - int32_t processedCount; // for sync fwd callback - int32_t (*reqFp)(struct SMnodeMsg *pMsg); - int32_t (*writeCb)(struct SMnodeMsg *pMsg, int32_t code); - void * table; - void * pObj; - void * rowData; - struct SMnodeMsg *pMsg; -} SSdbOper; +typedef struct SSdbRow { + ESdbOper type; + int32_t processedCount; // for sync fwd callback + int32_t code; // for callback in sdb queue + int32_t rowSize; + void * rowData; + void * pObj; + void * pTable; + SMnodeMsg *pMsg; + int32_t (*fpReq)(SMnodeMsg *pMsg); + int32_t (*fpRsp)(SMnodeMsg *pMsg, int32_t code); + char reserveForSync[16]; + SWalHead pHead[]; +} SSdbRow; typedef struct { - char *tableName; - int32_t hashSessions; - int32_t maxRowSize; - int32_t refCountPos; - ESdbTable tableId; + char * name; + int32_t hashSessions; + int32_t maxRowSize; + int32_t refCountPos; + ESdbTable id; ESdbKey keyType; - int32_t (*insertFp)(SSdbOper *pOper); - int32_t (*deleteFp)(SSdbOper *pOper); - int32_t (*updateFp)(SSdbOper *pOper); - int32_t (*encodeFp)(SSdbOper *pOper); - int32_t (*decodeFp)(SSdbOper *pDesc); - int32_t (*destroyFp)(SSdbOper *pDesc); - int32_t (*restoredFp)(); + int32_t (*fpInsert)(SSdbRow *pRow); + int32_t (*fpDelete)(SSdbRow *pRow); + int32_t (*fpUpdate)(SSdbRow *pRow); + int32_t (*fpEncode)(SSdbRow *pRow); + int32_t (*fpDecode)(SSdbRow *pRow); + int32_t (*fpDestroy)(SSdbRow *pRow); + int32_t (*fpRestored)(); } SSdbTableDesc; int32_t sdbInit(); @@ -84,20 +87,20 @@ bool sdbIsMaster(); bool sdbIsServing(); void sdbUpdateMnodeRoles(); -int32_t sdbInsertRow(SSdbOper *pOper); -int32_t sdbDeleteRow(SSdbOper *pOper); -int32_t sdbUpdateRow(SSdbOper *pOper); -int32_t sdbInsertRowImp(SSdbOper *pOper); +int32_t sdbInsertRow(SSdbRow *pRow); +int32_t sdbDeleteRow(SSdbRow *pRow); +int32_t sdbUpdateRow(SSdbRow *pRow); +int32_t sdbInsertRowToQueue(SSdbRow *pRow); -void *sdbGetRow(void *handle, void *key); -void *sdbFetchRow(void *handle, void *pIter, void **ppRow); +void *sdbGetRow(void *pTable, void *key); +void *sdbFetchRow(void *pTable, void *pIter, void **ppRow); void sdbFreeIter(void *pIter); -void sdbIncRef(void *thandle, void *pRow); -void sdbDecRef(void *thandle, void *pRow); -int64_t sdbGetNumOfRows(void *handle); -int32_t sdbGetId(void *handle); +void sdbIncRef(void *pTable, void *pRow); +void sdbDecRef(void *pTable, void *pRow); +int64_t sdbGetNumOfRows(void *pTable); +int32_t sdbGetId(void *pTable); uint64_t sdbGetVersion(); -bool sdbCheckRowDeleted(void *thandle, void *pRow); +bool sdbCheckRowDeleted(void *pTable, void *pRow); #ifdef __cplusplus } diff --git a/src/mnode/inc/mnodeTable.h b/src/mnode/inc/mnodeTable.h index ed0dbe4ecfc4b344052c70487c3b740d81eaa0f3..7c0077aa609843aa99e9f5114573aea8f7446fc1 100644 --- a/src/mnode/inc/mnodeTable.h +++ b/src/mnode/inc/mnodeTable.h @@ -29,8 +29,8 @@ int64_t mnodeGetChildTableNum(); void * mnodeGetTable(char *tableId); void mnodeIncTableRef(void *pTable); void mnodeDecTableRef(void *pTable); -void * mnodeGetNextChildTable(void *pIter, SChildTableObj **pTable); -void * mnodeGetNextSuperTable(void *pIter, SSuperTableObj **pTable); +void * mnodeGetNextChildTable(void *pIter, SCTableObj **pTable); +void * mnodeGetNextSuperTable(void *pIter, SSTableObj **pTable); void mnodeDropAllChildTables(SDbObj *pDropDb); void mnodeDropAllSuperTables(SDbObj *pDropDb); void mnodeDropAllChildTablesInVgroups(SVgObj *pVgroup); diff --git a/src/mnode/inc/mnodeVgroup.h b/src/mnode/inc/mnodeVgroup.h index 7aa662b81cc08c90d6453ff5c73191ad81416073..0e6d9dfde463dc50810654921b2006fda212e496 100644 --- a/src/mnode/inc/mnodeVgroup.h +++ b/src/mnode/inc/mnodeVgroup.h @@ -43,8 +43,8 @@ void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle); void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle); int32_t mnodeGetAvailableVgroup(struct SMnodeMsg *pMsg, SVgObj **pVgroup, int32_t *sid); -void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable); -void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable); +void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SCTableObj *pTable); +void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SCTableObj *pTable); void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle); void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle); void mnodeSendAlterVgroupMsg(SVgObj *pVgroup); diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index e161940a2b1a07572d9beff5222b8eb8d3829075..9fff2f02291d6b23163c0e58d3d2bfe545da6eb5 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taoserror.h" +#include "tglobal.h" #include "dnode.h" #include "mnodeDef.h" #include "mnodeInt.h" @@ -25,36 +26,34 @@ #include "mnodeUser.h" #include "mnodeVgroup.h" -#include "tglobal.h" - void * tsAcctSdb = NULL; static int32_t tsAcctUpdateSize; static int32_t mnodeCreateRootAcct(); -static int32_t mnodeAcctActionDestroy(SSdbOper *pOper) { - SAcctObj *pAcct = pOper->pObj; +static int32_t mnodeAcctActionDestroy(SSdbRow *pRow) { + SAcctObj *pAcct = pRow->pObj; pthread_mutex_destroy(&pAcct->mutex); - taosTFree(pOper->pObj); + tfree(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeAcctActionInsert(SSdbOper *pOper) { - SAcctObj *pAcct = pOper->pObj; +static int32_t mnodeAcctActionInsert(SSdbRow *pRow) { + SAcctObj *pAcct = pRow->pObj; memset(&pAcct->acctInfo, 0, sizeof(SAcctInfo)); pAcct->acctInfo.accessState = TSDB_VN_ALL_ACCCESS; pthread_mutex_init(&pAcct->mutex, NULL); return TSDB_CODE_SUCCESS; } -static int32_t mnodeAcctActionDelete(SSdbOper *pOper) { - SAcctObj *pAcct = pOper->pObj; +static int32_t mnodeAcctActionDelete(SSdbRow *pRow) { + SAcctObj *pAcct = pRow->pObj; mnodeDropAllUsers(pAcct); mnodeDropAllDbs(pAcct); return TSDB_CODE_SUCCESS; } -static int32_t mnodeAcctActionUpdate(SSdbOper *pOper) { - SAcctObj *pAcct = pOper->pObj; +static int32_t mnodeAcctActionUpdate(SSdbRow *pRow) { + SAcctObj *pAcct = pRow->pObj; SAcctObj *pSaved = mnodeGetAcct(pAcct->user); if (pAcct != pSaved) { memcpy(pSaved, pAcct, tsAcctUpdateSize); @@ -64,19 +63,19 @@ static int32_t mnodeAcctActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeAcctActionEncode(SSdbOper *pOper) { - SAcctObj *pAcct = pOper->pObj; - memcpy(pOper->rowData, pAcct, tsAcctUpdateSize); - pOper->rowSize = tsAcctUpdateSize; +static int32_t mnodeAcctActionEncode(SSdbRow *pRow) { + SAcctObj *pAcct = pRow->pObj; + memcpy(pRow->rowData, pAcct, tsAcctUpdateSize); + pRow->rowSize = tsAcctUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeAcctActionDecode(SSdbOper *pOper) { +static int32_t mnodeAcctActionDecode(SSdbRow *pRow) { SAcctObj *pAcct = (SAcctObj *) calloc(1, sizeof(SAcctObj)); if (pAcct == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pAcct, pOper->rowData, tsAcctUpdateSize); - pOper->pObj = pAcct; + memcpy(pAcct, pRow->rowData, tsAcctUpdateSize); + pRow->pObj = pAcct; return TSDB_CODE_SUCCESS; } @@ -99,29 +98,29 @@ int32_t mnodeInitAccts() { SAcctObj tObj; tsAcctUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_ACCOUNT, - .tableName = "accounts", + SSdbTableDesc desc = { + .id = SDB_TABLE_ACCOUNT, + .name = "accounts", .hashSessions = TSDB_DEFAULT_ACCOUNTS_HASH_SIZE, .maxRowSize = tsAcctUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_STRING, - .insertFp = mnodeAcctActionInsert, - .deleteFp = mnodeAcctActionDelete, - .updateFp = mnodeAcctActionUpdate, - .encodeFp = mnodeAcctActionEncode, - .decodeFp = mnodeAcctActionDecode, - .destroyFp = mnodeAcctActionDestroy, - .restoredFp = mnodeAcctActionRestored + .fpInsert = mnodeAcctActionInsert, + .fpDelete = mnodeAcctActionDelete, + .fpUpdate = mnodeAcctActionUpdate, + .fpEncode = mnodeAcctActionEncode, + .fpDecode = mnodeAcctActionDecode, + .fpDestroy = mnodeAcctActionDestroy, + .fpRestored = mnodeAcctActionRestored }; - tsAcctSdb = sdbOpenTable(&tableDesc); + tsAcctSdb = sdbOpenTable(&desc); if (tsAcctSdb == NULL) { - mError("table:%s, failed to create hash", tableDesc.tableName); + mError("table:%s, failed to create hash", desc.name); return -1; } - mDebug("table:%s, hash is created", tableDesc.tableName); + mDebug("table:%s, hash is created", desc.name); return TSDB_CODE_SUCCESS; } @@ -226,13 +225,13 @@ static int32_t mnodeCreateRootAcct() { pAcct->acctId = sdbGetId(tsAcctSdb); pAcct->createdTime = taosGetTimestampMs(); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsAcctSdb, - .pObj = pAcct, + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsAcctSdb, + .pObj = pAcct, }; - return sdbInsertRow(&oper); + return sdbInsertRow(&row); } #ifndef _ACCT diff --git a/src/mnode/src/mnodeCluster.c b/src/mnode/src/mnodeCluster.c index 35b6a67ab24d5ee50a4f4da7bb902ab0d7232b7e..5be67e4ad9b1b1746598331997e428f4e78607cf 100644 --- a/src/mnode/src/mnodeCluster.c +++ b/src/mnode/src/mnodeCluster.c @@ -32,36 +32,36 @@ static int32_t mnodeCreateCluster(); static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn); -static int32_t mnodeClusterActionDestroy(SSdbOper *pOper) { - taosTFree(pOper->pObj); +static int32_t mnodeClusterActionDestroy(SSdbRow *pRow) { + tfree(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeClusterActionInsert(SSdbOper *pOper) { +static int32_t mnodeClusterActionInsert(SSdbRow *pRow) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeClusterActionDelete(SSdbOper *pOper) { +static int32_t mnodeClusterActionDelete(SSdbRow *pRow) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeClusterActionUpdate(SSdbOper *pOper) { +static int32_t mnodeClusterActionUpdate(SSdbRow *pRow) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeClusterActionEncode(SSdbOper *pOper) { - SClusterObj *pCluster = pOper->pObj; - memcpy(pOper->rowData, pCluster, tsClusterUpdateSize); - pOper->rowSize = tsClusterUpdateSize; +static int32_t mnodeClusterActionEncode(SSdbRow *pRow) { + SClusterObj *pCluster = pRow->pObj; + memcpy(pRow->rowData, pCluster, tsClusterUpdateSize); + pRow->rowSize = tsClusterUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeClusterActionDecode(SSdbOper *pOper) { +static int32_t mnodeClusterActionDecode(SSdbRow *pRow) { SClusterObj *pCluster = (SClusterObj *) calloc(1, sizeof(SClusterObj)); if (pCluster == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pCluster, pOper->rowData, tsClusterUpdateSize); - pOper->pObj = pCluster; + memcpy(pCluster, pRow->rowData, tsClusterUpdateSize); + pRow->pObj = pCluster; return TSDB_CODE_SUCCESS; } @@ -84,32 +84,32 @@ int32_t mnodeInitCluster() { SClusterObj tObj; tsClusterUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_CLUSTER, - .tableName = "cluster", + SSdbTableDesc desc = { + .id = SDB_TABLE_CLUSTER, + .name = "cluster", .hashSessions = TSDB_DEFAULT_CLUSTER_HASH_SIZE, .maxRowSize = tsClusterUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_STRING, - .insertFp = mnodeClusterActionInsert, - .deleteFp = mnodeClusterActionDelete, - .updateFp = mnodeClusterActionUpdate, - .encodeFp = mnodeClusterActionEncode, - .decodeFp = mnodeClusterActionDecode, - .destroyFp = mnodeClusterActionDestroy, - .restoredFp = mnodeClusterActionRestored + .fpInsert = mnodeClusterActionInsert, + .fpDelete = mnodeClusterActionDelete, + .fpUpdate = mnodeClusterActionUpdate, + .fpEncode = mnodeClusterActionEncode, + .fpDecode = mnodeClusterActionDecode, + .fpDestroy = mnodeClusterActionDestroy, + .fpRestored = mnodeClusterActionRestored }; - tsClusterSdb = sdbOpenTable(&tableDesc); + tsClusterSdb = sdbOpenTable(&desc); if (tsClusterSdb == NULL) { - mError("table:%s, failed to create hash", tableDesc.tableName); + mError("table:%s, failed to create hash", desc.name); return -1; } mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeGetClusterMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeRetrieveClusters); - mDebug("table:%s, hash is created", tableDesc.tableName); + mDebug("table:%s, hash is created", desc.name); return TSDB_CODE_SUCCESS; } @@ -145,13 +145,13 @@ static int32_t mnodeCreateCluster() { mDebug("uid is %s", pCluster->uid); } - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsClusterSdb, - .pObj = pCluster, + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsClusterSdb, + .pObj = pCluster, }; - return sdbInsertRow(&oper); + return sdbInsertRow(&row); } const char* mnodeGetClusterId() { diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 8d7c267ab7cb328bb187518a88be3d344b0d53e2..d121208447c498280fb6f397e2181e2993ad1dbe 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -41,7 +41,7 @@ static void * tsDbSdb = NULL; static int32_t tsDbUpdateSize; -static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, SMnodeMsg *pMsg); +static int32_t mnodeCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate, SMnodeMsg *pMsg); static int32_t mnodeDropDb(SMnodeMsg *newMsg); static int32_t mnodeSetDbDropping(SDbObj *pDb); static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); @@ -52,12 +52,12 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg); static void mnodeDestroyDb(SDbObj *pDb) { pthread_mutex_destroy(&pDb->mutex); - taosTFree(pDb->vgList); - taosTFree(pDb); + tfree(pDb->vgList); + tfree(pDb); } -static int32_t mnodeDbActionDestroy(SSdbOper *pOper) { - mnodeDestroyDb(pOper->pObj); +static int32_t mnodeDbActionDestroy(SSdbRow *pRow) { + mnodeDestroyDb(pRow->pObj); return TSDB_CODE_SUCCESS; } @@ -65,8 +65,8 @@ int64_t mnodeGetDbNum() { return sdbGetNumOfRows(tsDbSdb); } -static int32_t mnodeDbActionInsert(SSdbOper *pOper) { - SDbObj *pDb = pOper->pObj; +static int32_t mnodeDbActionInsert(SSdbRow *pRow) { + SDbObj *pDb = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pDb->acct); pthread_mutex_init(&pDb->mutex, NULL); @@ -91,8 +91,8 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeDbActionDelete(SSdbOper *pOper) { - SDbObj *pDb = pOper->pObj; +static int32_t mnodeDbActionDelete(SSdbRow *pRow) { + SDbObj *pDb = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pDb->acct); mnodeDropAllChildTables(pDb); @@ -107,11 +107,11 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeDbActionUpdate(SSdbOper *pOper) { - SDbObj *pNew = pOper->pObj; +static int32_t mnodeDbActionUpdate(SSdbRow *pRow) { + SDbObj *pNew = pRow->pObj; SDbObj *pDb = mnodeGetDb(pNew->name); if (pDb != NULL && pNew != pDb) { - memcpy(pDb, pNew, pOper->rowSize); + memcpy(pDb, pNew, pRow->rowSize); free(pNew->vgList); free(pNew); } @@ -120,19 +120,19 @@ static int32_t mnodeDbActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeDbActionEncode(SSdbOper *pOper) { - SDbObj *pDb = pOper->pObj; - memcpy(pOper->rowData, pDb, tsDbUpdateSize); - pOper->rowSize = tsDbUpdateSize; +static int32_t mnodeDbActionEncode(SSdbRow *pRow) { + SDbObj *pDb = pRow->pObj; + memcpy(pRow->rowData, pDb, tsDbUpdateSize); + pRow->rowSize = tsDbUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeDbActionDecode(SSdbOper *pOper) { +static int32_t mnodeDbActionDecode(SSdbRow *pRow) { SDbObj *pDb = (SDbObj *) calloc(1, sizeof(SDbObj)); if (pDb == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pDb, pOper->rowData, tsDbUpdateSize); - pOper->pObj = pDb; + memcpy(pDb, pRow->rowData, tsDbUpdateSize); + pRow->pObj = pDb; return TSDB_CODE_SUCCESS; } @@ -144,23 +144,23 @@ int32_t mnodeInitDbs() { SDbObj tObj; tsDbUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_DB, - .tableName = "dbs", + SSdbTableDesc desc = { + .id = SDB_TABLE_DB, + .name = "dbs", .hashSessions = TSDB_DEFAULT_DBS_HASH_SIZE, .maxRowSize = tsDbUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_STRING, - .insertFp = mnodeDbActionInsert, - .deleteFp = mnodeDbActionDelete, - .updateFp = mnodeDbActionUpdate, - .encodeFp = mnodeDbActionEncode, - .decodeFp = mnodeDbActionDecode, - .destroyFp = mnodeDbActionDestroy, - .restoredFp = mnodeDbActionRestored + .fpInsert = mnodeDbActionInsert, + .fpDelete = mnodeDbActionDelete, + .fpUpdate = mnodeDbActionUpdate, + .fpEncode = mnodeDbActionEncode, + .fpDecode = mnodeDbActionDecode, + .fpDestroy = mnodeDbActionDestroy, + .fpRestored = mnodeDbActionRestored }; - tsDbSdb = sdbOpenTable(&tableDesc); + tsDbSdb = sdbOpenTable(&desc); if (tsDbSdb == NULL) { mError("failed to init db data"); return -1; @@ -319,6 +319,11 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { } #endif + if (pCfg->update < TSDB_MIN_DB_UPDATE || pCfg->update > TSDB_MAX_DB_UPDATE) { + mError("invalid db option update:%d valid range: [%d, %d]", pCfg->update, TSDB_MIN_DB_UPDATE, TSDB_MAX_DB_UPDATE); + return TSDB_CODE_MND_INVALID_DB_OPTION; + } + return TSDB_CODE_SUCCESS; } @@ -339,6 +344,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->walLevel < 0) pCfg->walLevel = tsWAL; if (pCfg->replications < 0) pCfg->replications = tsReplications; if (pCfg->quorum < 0) pCfg->quorum = tsQuorum; + if (pCfg->update < 0) pCfg->update = tsUpdate; } static int32_t mnodeCreateDbCb(SMnodeMsg *pMsg, int32_t code) { @@ -352,7 +358,7 @@ static int32_t mnodeCreateDbCb(SMnodeMsg *pMsg, int32_t code) { return code; } -static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, SMnodeMsg *pMsg) { +static int32_t mnodeCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate, SMnodeMsg *pMsg) { int32_t code = acctCheck(pAcct, ACCT_GRANT_DB); if (code != 0) return code; @@ -391,30 +397,31 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, SMnodeMsg .compression = pCreate->compression, .walLevel = pCreate->walLevel, .replications = pCreate->replications, - .quorum = pCreate->quorum + .quorum = pCreate->quorum, + .update = pCreate->update }; mnodeSetDefaultDbCfg(&pDb->cfg); code = mnodeCheckDbCfg(&pDb->cfg); if (code != TSDB_CODE_SUCCESS) { - taosTFree(pDb); + tfree(pDb); return code; } pMsg->pDb = pDb; mnodeIncDbRef(pDb); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDbSdb, - .pObj = pDb, - .rowSize = sizeof(SDbObj), - .pMsg = pMsg, - .writeCb = mnodeCreateDbCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDbSdb, + .pObj = pDb, + .rowSize = sizeof(SDbObj), + .pMsg = pMsg, + .fpRsp = mnodeCreateDbCb }; - code = sdbInsertRow(&oper); + code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code)); pMsg->pDb = NULL; @@ -433,8 +440,8 @@ bool mnodeCheckIsMonitorDB(char *db, char *monitordb) { } #if 0 -void mnodePrintVgroups(SDbObj *pDb, char *oper) { - mInfo("db:%s, vgroup link from head, oper:%s", pDb->name, oper); +void mnodePrintVgroups(SDbObj *pDb, char *row) { + mInfo("db:%s, vgroup link from head, row:%s", pDb->name, row); SVgObj *pVgroup = pDb->pHead; while (pVgroup != NULL) { mInfo("vgId:%d", pVgroup->vgId); @@ -610,6 +617,12 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; + pShow->bytes[cols] = 1; + pSchema[cols].type = TSDB_DATA_TYPE_TINYINT; + strcpy(pSchema[cols].name, "update"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + pShow->bytes[cols] = 10 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; strcpy(pSchema[cols].name, "status"); @@ -749,6 +762,10 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void STR_WITH_SIZE_TO_VARSTR(pWrite, prec, 2); cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int8_t *)pWrite = pDb->cfg.update; + cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; if (pDb->status == TSDB_DB_STATUS_READY) { const char *src = "ready"; @@ -790,13 +807,13 @@ static int32_t mnodeSetDbDropping(SDbObj *pDb) { if (pDb->status) return TSDB_CODE_SUCCESS; pDb->status = true; - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDbSdb, - .pObj = pDb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDbSdb, + .pObj = pDb }; - int32_t code = sdbUpdateRow(&oper); + int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("db:%s, failed to set dropping state, reason:%s", pDb->name, tstrerror(code)); } @@ -805,7 +822,7 @@ static int32_t mnodeSetDbDropping(SDbObj *pDb) { } static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg) { - SCMCreateDbMsg *pCreate = pMsg->rpcMsg.pCont; + SCreateDbMsg *pCreate = pMsg->rpcMsg.pCont; pCreate->maxTables = htonl(pCreate->maxTables); pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize); pCreate->totalBlocks = htonl(pCreate->totalBlocks); @@ -830,7 +847,7 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg) { return code; } -static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) { +static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) { SDbCfg newCfg = pDb->cfg; int32_t maxTables = htonl(pAlter->maxTables); int32_t cacheBlockSize = htonl(pAlter->cacheBlockSize); @@ -848,6 +865,7 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) { int8_t replications = pAlter->replications; int8_t quorum = pAlter->quorum; int8_t precision = pAlter->precision; + int8_t update = pAlter->update; terrno = TSDB_CODE_SUCCESS; @@ -950,6 +968,16 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) { newCfg.quorum = quorum; } + if (update >= 0 && update != pDb->cfg.update) { +#if 0 + mDebug("db:%s, update:%d change to %d", pDb->name, pDb->cfg.update, update); + newCfg.update = update; +#else + mError("db:%s, can't alter update option", pDb->name); + terrno = TSDB_CODE_MND_INVALID_DB_OPTION; +#endif + } + return newCfg; } @@ -977,7 +1005,7 @@ static int32_t mnodeAlterDbCb(SMnodeMsg *pMsg, int32_t code) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeAlterDb(SDbObj *pDb, SCMAlterDbMsg *pAlter, void *pMsg) { +static int32_t mnodeAlterDb(SDbObj *pDb, SAlterDbMsg *pAlter, void *pMsg) { SDbCfg newCfg = mnodeGetAlterDbOption(pDb, pAlter); if (terrno != TSDB_CODE_SUCCESS) { return terrno; @@ -991,15 +1019,15 @@ static int32_t mnodeAlterDb(SDbObj *pDb, SCMAlterDbMsg *pAlter, void *pMsg) { if (memcmp(&newCfg, &pDb->cfg, sizeof(SDbCfg)) != 0) { pDb->cfg = newCfg; pDb->cfgVersion++; - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDbSdb, - .pObj = pDb, - .pMsg = pMsg, - .writeCb = mnodeAlterDbCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDbSdb, + .pObj = pDb, + .pMsg = pMsg, + .fpRsp = mnodeAlterDbCb }; - code = sdbUpdateRow(&oper); + code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("db:%s, failed to alter, reason:%s", pDb->name, tstrerror(code)); } @@ -1009,7 +1037,7 @@ static int32_t mnodeAlterDb(SDbObj *pDb, SCMAlterDbMsg *pAlter, void *pMsg) { } static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg) { - SCMAlterDbMsg *pAlter = pMsg->rpcMsg.pCont; + SAlterDbMsg *pAlter = pMsg->rpcMsg.pCont; mDebug("db:%s, alter db msg is received from thandle:%p", pAlter->db, pMsg->rpcMsg.handle); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pAlter->db); @@ -1043,15 +1071,15 @@ static int32_t mnodeDropDb(SMnodeMsg *pMsg) { SDbObj *pDb = pMsg->pDb; mInfo("db:%s, drop db from sdb", pDb->name); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDbSdb, - .pObj = pDb, - .pMsg = pMsg, - .writeCb = mnodeDropDbCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDbSdb, + .pObj = pDb, + .pMsg = pMsg, + .fpRsp = mnodeDropDbCb }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("db:%s, failed to drop, reason:%s", pDb->name, tstrerror(code)); } @@ -1060,7 +1088,7 @@ static int32_t mnodeDropDb(SMnodeMsg *pMsg) { } static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) { - SCMDropDbMsg *pDrop = pMsg->rpcMsg.pCont; + SDropDbMsg *pDrop = pMsg->rpcMsg.pCont; mDebug("db:%s, drop db msg is received from thandle:%p", pDrop->db, pMsg->rpcMsg.handle); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pDrop->db); @@ -1106,13 +1134,13 @@ void mnodeDropAllDbs(SAcctObj *pAcct) { if (pDb->pAcct == pAcct) { mInfo("db:%s, drop db from sdb for acct:%s is dropped", pDb->name, pAcct->user); - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsDbSdb, - .pObj = pDb + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsDbSdb, + .pObj = pDb }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfDbs++; } mnodeDecDbRef(pDb); diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 1cd861e22303d5f6f0dcb75c7ef83cf66696578f..f76533c76032907e627eea7c1d16bb99009e0ed3 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -39,11 +39,15 @@ #include "mnodeCluster.h" int32_t tsAccessSquence = 0; -static void *tsDnodeSdb = NULL; +static void * tsDnodeSdb = NULL; static int32_t tsDnodeUpdateSize = 0; extern void * tsMnodeSdb; extern void * tsVgroupSdb; +static SDnodeEps*tsDnodeEps; +static int32_t tsDnodeEpsSize; +static pthread_mutex_t tsDnodeEpsMutex; + static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg); static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropDnodeMsg(SMnodeMsg *pMsg); @@ -59,6 +63,7 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn); static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole); +static void mnodeUpdateDnodeEps(); static char* offlineReason[] = { "", @@ -82,61 +87,66 @@ static char* offlineReason[] = { "unknown", }; -static int32_t mnodeDnodeActionDestroy(SSdbOper *pOper) { - taosTFree(pOper->pObj); +static int32_t mnodeDnodeActionDestroy(SSdbRow *pRow) { + tfree(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) { - SDnodeObj *pDnode = pOper->pObj; +static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { + SDnodeObj *pDnode = pRow->pObj; if (pDnode->status != TAOS_DN_STATUS_DROPPING) { pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->lastAccess = tsAccessSquence; pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED; } + dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort); + mnodeUpdateDnodeEps(); + mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } -static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { - SDnodeObj *pDnode = pOper->pObj; +static int32_t mnodeDnodeActionDelete(SSdbRow *pRow) { + SDnodeObj *pDnode = pRow->pObj; #ifndef _SYNC mnodeDropAllDnodeVgroups(pDnode); #endif mnodeDropMnodeLocal(pDnode->dnodeId); balanceAsyncNotify(); + mnodeUpdateDnodeEps(); mDebug("dnode:%d, all vgroups is dropped from sdb", pDnode->dnodeId); return TSDB_CODE_SUCCESS; } -static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) { - SDnodeObj *pNew = pOper->pObj; +static int32_t mnodeDnodeActionUpdate(SSdbRow *pRow) { + SDnodeObj *pNew = pRow->pObj; SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId); if (pDnode != NULL && pNew != pDnode) { - memcpy(pDnode, pNew, pOper->rowSize); + memcpy(pDnode, pNew, pRow->rowSize); free(pNew); } mnodeDecDnodeRef(pDnode); + mnodeUpdateDnodeEps(); return TSDB_CODE_SUCCESS; } -static int32_t mnodeDnodeActionEncode(SSdbOper *pOper) { - SDnodeObj *pDnode = pOper->pObj; - memcpy(pOper->rowData, pDnode, tsDnodeUpdateSize); - pOper->rowSize = tsDnodeUpdateSize; +static int32_t mnodeDnodeActionEncode(SSdbRow *pRow) { + SDnodeObj *pDnode = pRow->pObj; + memcpy(pRow->rowData, pDnode, tsDnodeUpdateSize); + pRow->rowSize = tsDnodeUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeDnodeActionDecode(SSdbOper *pOper) { +static int32_t mnodeDnodeActionDecode(SSdbRow *pRow) { SDnodeObj *pDnode = (SDnodeObj *) calloc(1, sizeof(SDnodeObj)); if (pDnode == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pDnode, pOper->rowData, tsDnodeUpdateSize); - pOper->pObj = pDnode; + memcpy(pDnode, pRow->rowData, tsDnodeUpdateSize); + pRow->pObj = pDnode; return TSDB_CODE_SUCCESS; } @@ -152,30 +162,32 @@ static int32_t mnodeDnodeActionRestored() { } } + mnodeUpdateDnodeEps(); return TSDB_CODE_SUCCESS; } int32_t mnodeInitDnodes() { SDnodeObj tObj; tsDnodeUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; + pthread_mutex_init(&tsDnodeEpsMutex, NULL); - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_DNODE, - .tableName = "dnodes", + SSdbTableDesc desc = { + .id = SDB_TABLE_DNODE, + .name = "dnodes", .hashSessions = TSDB_DEFAULT_DNODES_HASH_SIZE, .maxRowSize = tsDnodeUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_AUTO, - .insertFp = mnodeDnodeActionInsert, - .deleteFp = mnodeDnodeActionDelete, - .updateFp = mnodeDnodeActionUpdate, - .encodeFp = mnodeDnodeActionEncode, - .decodeFp = mnodeDnodeActionDecode, - .destroyFp = mnodeDnodeActionDestroy, - .restoredFp = mnodeDnodeActionRestored + .fpInsert = mnodeDnodeActionInsert, + .fpDelete = mnodeDnodeActionDelete, + .fpUpdate = mnodeDnodeActionUpdate, + .fpEncode = mnodeDnodeActionEncode, + .fpDecode = mnodeDnodeActionDecode, + .fpDestroy = mnodeDnodeActionDestroy, + .fpRestored = mnodeDnodeActionRestored }; - tsDnodeSdb = sdbOpenTable(&tableDesc); + tsDnodeSdb = sdbOpenTable(&desc); if (tsDnodeSdb == NULL) { mError("failed to init dnodes data"); return -1; @@ -201,6 +213,9 @@ int32_t mnodeInitDnodes() { void mnodeCleanupDnodes() { sdbCloseTable(tsDnodeSdb); + pthread_mutex_destroy(&tsDnodeEpsMutex); + free(tsDnodeEps); + tsDnodeEps = NULL; tsDnodeSdb = NULL; } @@ -281,13 +296,13 @@ void mnodeDecDnodeRef(SDnodeObj *pDnode) { } void mnodeUpdateDnode(SDnodeObj *pDnode) { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDnodeSdb, - .pObj = pDnode + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDnodeSdb, + .pObj = pDnode }; - int32_t code = sdbUpdateRow(&oper); + int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("dnodeId:%d, failed update", pDnode->dnodeId); } @@ -299,7 +314,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_NO_RIGHTS; } - SCMCfgDnodeMsg *pCmCfgDnode = pMsg->rpcMsg.pCont; + SCfgDnodeMsg *pCmCfgDnode = pMsg->rpcMsg.pCont; if (pCmCfgDnode->ep[0] == 0) { tstrncpy(pCmCfgDnode->ep, tsLocalEp, TSDB_EP_LEN); } @@ -334,7 +349,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) { mnodeDecDnodeRef(pDnode); return code; } else { - SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg)); + SCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SCfgDnodeMsg)); strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep); strcpy(pMdCfgDnode->config, pCmCfgDnode->config); @@ -343,7 +358,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) { .code = 0, .msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE, .pCont = pMdCfgDnode, - .contLen = sizeof(SMDCfgDnodeMsg) + .contLen = sizeof(SCfgDnodeMsg) }; mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user); @@ -418,9 +433,54 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) { return 0; } +static int32_t mnodeGetDnodeEpsSize() { + pthread_mutex_lock(&tsDnodeEpsMutex); + int32_t size = tsDnodeEpsSize; + pthread_mutex_unlock(&tsDnodeEpsMutex); + return size; +} + +static void mnodeGetDnodeEpsData(SDnodeEps *pEps, int32_t epsSize) { + pthread_mutex_lock(&tsDnodeEpsMutex); + if (epsSize == tsDnodeEpsSize) { + memcpy(pEps, tsDnodeEps, tsDnodeEpsSize); + } + pthread_mutex_unlock(&tsDnodeEpsMutex); +} + +static void mnodeUpdateDnodeEps() { + pthread_mutex_lock(&tsDnodeEpsMutex); + + int32_t totalDnodes = mnodeGetDnodesNum(); + tsDnodeEpsSize = sizeof(SDnodeEps) + totalDnodes * sizeof(SDnodeEp); + free(tsDnodeEps); + tsDnodeEps = calloc(1, tsDnodeEpsSize); + tsDnodeEps->dnodeNum = htonl(totalDnodes); + + SDnodeObj *pDnode = NULL; + void * pIter = NULL; + int32_t dnodesNum = 0; + + while (1) { + pIter = mnodeGetNextDnode(pIter, &pDnode); + if (pDnode == NULL) break; + if (dnodesNum >= totalDnodes) break; + + SDnodeEp *pEp = &tsDnodeEps->dnodeEps[dnodesNum]; + dnodesNum++; + pEp->dnodeId = htonl(pDnode->dnodeId); + pEp->dnodePort = htons(pDnode->dnodePort); + tstrncpy(pEp->dnodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN); + mnodeDecDnodeRef(pDnode); + } + + sdbFreeIter(pIter); + pthread_mutex_unlock(&tsDnodeEpsMutex); +} + static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { SDnodeObj *pDnode = NULL; - SDMStatusMsg *pStatus = pMsg->rpcMsg.pCont; + SStatusMsg *pStatus = pMsg->rpcMsg.pCont; pStatus->dnodeId = htonl(pStatus->dnodeId); pStatus->moduleStatus = htonl(pStatus->moduleStatus); pStatus->lastReboot = htonl(pStatus->lastReboot); @@ -477,8 +537,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { } int32_t openVnodes = htons(pStatus->openVnodes); - int32_t contLen = sizeof(SDMStatusRsp) + openVnodes * sizeof(SDMVgroupAccess); - SDMStatusRsp *pRsp = rpcMallocCont(contLen); + int32_t epsSize = mnodeGetDnodeEpsSize(); + int32_t vgAccessSize = openVnodes * sizeof(SVgroupAccess); + int32_t contLen = sizeof(SStatusRsp) + vgAccessSize + epsSize; + + SStatusRsp *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { mnodeDecDnodeRef(pDnode); return TSDB_CODE_MND_OUT_OF_MEMORY; @@ -488,8 +551,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { pRsp->dnodeCfg.moduleStatus = htonl((int32_t)pDnode->isMgmt); pRsp->dnodeCfg.numOfVnodes = htonl(openVnodes); tstrncpy(pRsp->dnodeCfg.clusterId, mnodeGetClusterId(), TSDB_CLUSTER_ID_LEN); - SDMVgroupAccess *pAccess = (SDMVgroupAccess *)((char *)pRsp + sizeof(SDMStatusRsp)); - + SVgroupAccess *pAccess = (SVgroupAccess *)((char *)pRsp + sizeof(SStatusRsp)); + for (int32_t j = 0; j < openVnodes; ++j) { SVnodeLoad *pVload = &pStatus->load[j]; pVload->vgId = htonl(pVload->vgId); @@ -521,7 +584,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT; } - mDebug("dnode:%d, from offline to online", pDnode->dnodeId); + mInfo("dnode:%d, from offline to online", pDnode->dnodeId); pDnode->status = TAOS_DN_STATUS_READY; pDnode->offlineReason = TAOS_DN_OFF_ONLINE; balanceSyncNotify(); @@ -539,6 +602,9 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { mnodeDecDnodeRef(pDnode); + SDnodeEps *pEps = (SDnodeEps *)((char *)pRsp + sizeof(SStatusRsp) + vgAccessSize); + mnodeGetDnodeEpsData(pEps, epsSize); + pMsg->rpcRsp.len = contLen; pMsg->rpcRsp.rsp = pRsp; @@ -578,18 +644,18 @@ static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg) { tstrncpy(pDnode->dnodeEp, ep, TSDB_EP_LEN); taosGetFqdnPortFromEp(ep, pDnode->dnodeFqdn, &pDnode->dnodePort); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDnodeSdb, - .pObj = pDnode, + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDnodeSdb, + .pObj = pDnode, .rowSize = sizeof(SDnodeObj), - .pMsg = pMsg + .pMsg = pMsg }; - int32_t code = sdbInsertRow(&oper); + int32_t code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { int dnodeId = pDnode->dnodeId; - taosTFree(pDnode); + tfree(pDnode); mError("failed to create dnode:%d, reason:%s", dnodeId, tstrerror(code)); } else { mLInfo("dnode:%d is created", pDnode->dnodeId); @@ -599,14 +665,14 @@ static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg) { } int32_t mnodeDropDnode(SDnodeObj *pDnode, void *pMsg) { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsDnodeSdb, - .pObj = pDnode, - .pMsg = pMsg + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsDnodeSdb, + .pObj = pDnode, + .pMsg = pMsg }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("dnode:%d, failed to drop from cluster, result:%s", pDnode->dnodeId, tstrerror(code)); } else { @@ -645,7 +711,7 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { } static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) { - SCMCreateDnodeMsg *pCreate = pMsg->rpcMsg.pCont; + SCreateDnodeMsg *pCreate = pMsg->rpcMsg.pCont; if (strcmp(pMsg->pUser->user, TSDB_DEFAULT_USER) != 0) { return TSDB_CODE_MND_NO_RIGHTS; @@ -655,7 +721,7 @@ static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) { } static int32_t mnodeProcessDropDnodeMsg(SMnodeMsg *pMsg) { - SCMDropDnodeMsg *pDrop = pMsg->rpcMsg.pCont; + SDropDnodeMsg *pDrop = pMsg->rpcMsg.pCont; if (strcmp(pMsg->pUser->user, TSDB_DEFAULT_USER) != 0) { return TSDB_CODE_MND_NO_RIGHTS; @@ -1075,7 +1141,7 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, mnodeGetMnodeRoleStr(pVgid->role)); + strcpy(pWrite, syncRole[pVgid->role]); cols++; } } diff --git a/src/mnode/src/mnodeInt.c b/src/mnode/src/mnodeInt.c index fb1b8741a9f8895c579ad0e549dc8f3c15f7618c..534deec09a821ce2aa6e5761d2d7242015713fd0 100644 --- a/src/mnode/src/mnodeInt.c +++ b/src/mnode/src/mnodeInt.c @@ -18,7 +18,7 @@ #include "taosmsg.h" #include "taoserror.h" #include "trpc.h" -#include "tcache.h" +#include "tqueue.h" #include "mnode.h" #include "dnode.h" #include "mnodeDef.h" @@ -34,13 +34,21 @@ #include "mnodeUser.h" #include "mnodeVgroup.h" -void mnodeCreateMsg(SMnodeMsg *pMsg, SRpcMsg *rpcMsg) { - pMsg->rpcMsg = *rpcMsg; +void *mnodeCreateMsg(SRpcMsg *pRpcMsg) { + int32_t size = sizeof(SMnodeMsg) + pRpcMsg->contLen; + SMnodeMsg *pMsg = taosAllocateQitem(size); + + pMsg->rpcMsg = *pRpcMsg; + pMsg->rpcMsg.pCont = pMsg->pCont; + pMsg->incomingTs = taosGetTimestampSec(); + memcpy(pMsg->pCont, pRpcMsg->pCont, pRpcMsg->contLen); + + return pMsg; } int32_t mnodeInitMsg(SMnodeMsg *pMsg) { if (pMsg->pUser != NULL) { - mDebug("app:%p:%p, user info already inited", pMsg->rpcMsg.ahandle, pMsg); + mTrace("msg:%p, app:%p user info already inited", pMsg, pMsg->rpcMsg.ahandle); return TSDB_CODE_SUCCESS; } @@ -54,7 +62,9 @@ int32_t mnodeInitMsg(SMnodeMsg *pMsg) { void mnodeCleanupMsg(SMnodeMsg *pMsg) { if (pMsg != NULL) { - if (pMsg->rpcMsg.pCont) rpcFreeCont(pMsg->rpcMsg.pCont); + if (pMsg->rpcMsg.pCont != pMsg->pCont) { + tfree(pMsg->rpcMsg.pCont); + } if (pMsg->pUser) mnodeDecUserRef(pMsg->pUser); if (pMsg->pDb) mnodeDecDbRef(pMsg->pDb); if (pMsg->pVgroup) mnodeDecVgroupRef(pMsg->pVgroup); diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 2bb8a810566a676a4ae991bc79e5eb78234747c7..1f5ad42bdead75064c60f8626ce4916e7b08fd9f 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -96,9 +96,9 @@ int32_t mnodeStartSystem() { return -1; } - dnodeAllocateMnodeWqueue(); - dnodeAllocateMnodeRqueue(); - dnodeAllocateMnodePqueue(); + dnodeAllocMWritequeue(); + dnodeAllocMReadQueue(); + dnodeAllocateMPeerQueue(); if (mnodeInitComponents() != 0) { return -1; @@ -127,9 +127,9 @@ void mnodeCleanupSystem() { mInfo("starting to clean up mnode"); tsMgmtIsRunning = false; - dnodeFreeMnodeWqueue(); - dnodeFreeMnodeRqueue(); - dnodeFreeMnodePqueue(); + dnodeFreeMWritequeue(); + dnodeFreeMReadQueue(); + dnodeFreeMPeerQueue(); mnodeCleanupTimer(); mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 89b2f50b731c90c1261957da93623ba16cebf2ab..205bfda4b9571b488ccb335ab98c677f94166aae 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -38,7 +38,7 @@ static void * tsMnodeSdb = NULL; static int32_t tsMnodeUpdateSize = 0; static SRpcEpSet tsMnodeEpSetForShell; static SRpcEpSet tsMnodeEpSetForPeer; -static SDMMnodeInfos tsMnodeInfos; +static SMnodeInfos tsMnodeInfos; static int32_t mnodeGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn); @@ -58,25 +58,26 @@ static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, vo #define mnodeMnodeDestroyLock() pthread_mutex_destroy(&tsMnodeLock) #endif -static int32_t mnodeMnodeActionDestroy(SSdbOper *pOper) { - taosTFree(pOper->pObj); +static int32_t mnodeMnodeActionDestroy(SSdbRow *pRow) { + tfree(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeMnodeActionInsert(SSdbOper *pOper) { - SMnodeObj *pMnode = pOper->pObj; +static int32_t mnodeMnodeActionInsert(SSdbRow *pRow) { + SMnodeObj *pMnode = pRow->pObj; SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId); if (pDnode == NULL) return TSDB_CODE_MND_DNODE_NOT_EXIST; pDnode->isMgmt = true; mnodeDecDnodeRef(pDnode); - - mInfo("mnode:%d, fqdn:%s ep:%s port:%d, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); + + mInfo("mnode:%d, fqdn:%s ep:%s port:%u, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, + pDnode->dnodePort); return TSDB_CODE_SUCCESS; } -static int32_t mnodeMnodeActionDelete(SSdbOper *pOper) { - SMnodeObj *pMnode = pOper->pObj; +static int32_t mnodeMnodeActionDelete(SSdbRow *pRow) { + SMnodeObj *pMnode = pRow->pObj; SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId); if (pDnode == NULL) return TSDB_CODE_MND_DNODE_NOT_EXIST; @@ -87,30 +88,30 @@ static int32_t mnodeMnodeActionDelete(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeMnodeActionUpdate(SSdbOper *pOper) { - SMnodeObj *pMnode = pOper->pObj; +static int32_t mnodeMnodeActionUpdate(SSdbRow *pRow) { + SMnodeObj *pMnode = pRow->pObj; SMnodeObj *pSaved = mnodeGetMnode(pMnode->mnodeId); if (pMnode != pSaved) { - memcpy(pSaved, pMnode, pOper->rowSize); + memcpy(pSaved, pMnode, pRow->rowSize); free(pMnode); } mnodeDecMnodeRef(pSaved); return TSDB_CODE_SUCCESS; } -static int32_t mnodeMnodeActionEncode(SSdbOper *pOper) { - SMnodeObj *pMnode = pOper->pObj; - memcpy(pOper->rowData, pMnode, tsMnodeUpdateSize); - pOper->rowSize = tsMnodeUpdateSize; +static int32_t mnodeMnodeActionEncode(SSdbRow *pRow) { + SMnodeObj *pMnode = pRow->pObj; + memcpy(pRow->rowData, pMnode, tsMnodeUpdateSize); + pRow->rowSize = tsMnodeUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeMnodeActionDecode(SSdbOper *pOper) { +static int32_t mnodeMnodeActionDecode(SSdbRow *pRow) { SMnodeObj *pMnode = calloc(1, sizeof(SMnodeObj)); if (pMnode == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pMnode, pOper->rowData, tsMnodeUpdateSize); - pOper->pObj = pMnode; + memcpy(pMnode, pRow->rowData, tsMnodeUpdateSize); + pRow->pObj = pMnode; return TSDB_CODE_SUCCESS; } @@ -136,23 +137,23 @@ int32_t mnodeInitMnodes() { SMnodeObj tObj; tsMnodeUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_MNODE, - .tableName = "mnodes", + SSdbTableDesc desc = { + .id = SDB_TABLE_MNODE, + .name = "mnodes", .hashSessions = TSDB_DEFAULT_MNODES_HASH_SIZE, .maxRowSize = tsMnodeUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_INT, - .insertFp = mnodeMnodeActionInsert, - .deleteFp = mnodeMnodeActionDelete, - .updateFp = mnodeMnodeActionUpdate, - .encodeFp = mnodeMnodeActionEncode, - .decodeFp = mnodeMnodeActionDecode, - .destroyFp = mnodeMnodeActionDestroy, - .restoredFp = mnodeMnodeActionRestored + .fpInsert = mnodeMnodeActionInsert, + .fpDelete = mnodeMnodeActionDelete, + .fpUpdate = mnodeMnodeActionUpdate, + .fpEncode = mnodeMnodeActionEncode, + .fpDecode = mnodeMnodeActionDecode, + .fpDestroy = mnodeMnodeActionDestroy, + .fpRestored = mnodeMnodeActionRestored }; - tsMnodeSdb = sdbOpenTable(&tableDesc); + tsMnodeSdb = sdbOpenTable(&desc); if (tsMnodeSdb == NULL) { mError("failed to init mnodes data"); return -1; @@ -191,10 +192,6 @@ void *mnodeGetNextMnode(void *pIter, SMnodeObj **pMnode) { return sdbFetchRow(tsMnodeSdb, pIter, (void **)pMnode); } -char *mnodeGetMnodeRoleStr(int32_t role) { - return syncRole[role]; -} - void mnodeUpdateMnodeEpSet() { mInfo("update mnodes epSet, numOfEps:%d ", mnodeGetMnodesNum()); @@ -202,7 +199,7 @@ void mnodeUpdateMnodeEpSet() { memset(&tsMnodeEpSetForShell, 0, sizeof(SRpcEpSet)); memset(&tsMnodeEpSetForPeer, 0, sizeof(SRpcEpSet)); - memset(&tsMnodeInfos, 0, sizeof(SDMMnodeInfos)); + memset(&tsMnodeInfos, 0, sizeof(SMnodeInfos)); int32_t index = 0; void * pIter = NULL; @@ -221,8 +218,8 @@ void mnodeUpdateMnodeEpSet() { tsMnodeEpSetForPeer.port[index] = htons(pDnode->dnodePort + TSDB_PORT_DNODEDNODE); mDebug("mnode:%d, for peer fqdn:%s %d", pDnode->dnodeId, tsMnodeEpSetForPeer.fqdn[index], htons(tsMnodeEpSetForPeer.port[index])); - tsMnodeInfos.nodeInfos[index].nodeId = htonl(pMnode->mnodeId); - strcpy(tsMnodeInfos.nodeInfos[index].nodeEp, pDnode->dnodeEp); + tsMnodeInfos.mnodeInfos[index].mnodeId = htonl(pMnode->mnodeId); + strcpy(tsMnodeInfos.mnodeInfos[index].mnodeEp, pDnode->dnodeEp); if (pMnode->role == TAOS_SYNC_ROLE_MASTER) { tsMnodeEpSetForShell.inUse = index; @@ -238,7 +235,7 @@ void mnodeUpdateMnodeEpSet() { mnodeDecMnodeRef(pMnode); } - tsMnodeInfos.nodeNum = index; + tsMnodeInfos.mnodeNum = index; tsMnodeEpSetForShell.numOfEps = index; tsMnodeEpSetForPeer.numOfEps = index; @@ -260,19 +257,19 @@ void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet) { } char* mnodeGetMnodeMasterEp() { - return tsMnodeInfos.nodeInfos[tsMnodeInfos.inUse].nodeEp; + return tsMnodeInfos.mnodeInfos[tsMnodeInfos.inUse].mnodeEp; } void mnodeGetMnodeInfos(void *mnodeInfos) { mnodeMnodeRdLock(); - *(SDMMnodeInfos *)mnodeInfos = tsMnodeInfos; + *(SMnodeInfos *)mnodeInfos = tsMnodeInfos; mnodeMnodeUnLock(); } static int32_t mnodeSendCreateMnodeMsg(int32_t dnodeId, char *dnodeEp) { mDebug("dnode:%d, send create mnode msg to dnode %s", dnodeId, dnodeEp); - SMDCreateMnodeMsg *pCreate = rpcMallocCont(sizeof(SMDCreateMnodeMsg)); + SCreateMnodeMsg *pCreate = rpcMallocCont(sizeof(SCreateMnodeMsg)); if (pCreate == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; } else { @@ -280,21 +277,21 @@ static int32_t mnodeSendCreateMnodeMsg(int32_t dnodeId, char *dnodeEp) { tstrncpy(pCreate->dnodeEp, dnodeEp, sizeof(pCreate->dnodeEp)); pCreate->mnodes = tsMnodeInfos; bool found = false; - for (int i = 0; i < pCreate->mnodes.nodeNum; ++i) { - if (pCreate->mnodes.nodeInfos[i].nodeId == htonl(dnodeId)) { + for (int i = 0; i < pCreate->mnodes.mnodeNum; ++i) { + if (pCreate->mnodes.mnodeInfos[i].mnodeId == htonl(dnodeId)) { found = true; } } if (!found) { - pCreate->mnodes.nodeInfos[pCreate->mnodes.nodeNum].nodeId = htonl(dnodeId); - tstrncpy(pCreate->mnodes.nodeInfos[pCreate->mnodes.nodeNum].nodeEp, dnodeEp, sizeof(pCreate->dnodeEp)); - pCreate->mnodes.nodeNum++; + pCreate->mnodes.mnodeInfos[pCreate->mnodes.mnodeNum].mnodeId = htonl(dnodeId); + tstrncpy(pCreate->mnodes.mnodeInfos[pCreate->mnodes.mnodeNum].mnodeEp, dnodeEp, sizeof(pCreate->dnodeEp)); + pCreate->mnodes.mnodeNum++; } } SRpcMsg rpcMsg = {0}; rpcMsg.pCont = pCreate; - rpcMsg.contLen = sizeof(SMDCreateMnodeMsg); + rpcMsg.contLen = sizeof(SCreateMnodeMsg); rpcMsg.msgType = TSDB_MSG_TYPE_MD_CREATE_MNODE; SRpcMsg rpcRsp = {0}; @@ -328,11 +325,11 @@ void mnodeCreateMnode(int32_t dnodeId, char *dnodeEp, bool needConfirm) { pMnode->mnodeId = dnodeId; pMnode->createdTime = taosGetTimestampMs(); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsMnodeSdb, - .pObj = pMnode, - .writeCb = mnodeCreateMnodeCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsMnodeSdb, + .pObj = pMnode, + .fpRsp = mnodeCreateMnodeCb }; int32_t code = TSDB_CODE_SUCCESS; @@ -341,22 +338,22 @@ void mnodeCreateMnode(int32_t dnodeId, char *dnodeEp, bool needConfirm) { } if (code != TSDB_CODE_SUCCESS) { - taosTFree(pMnode); + tfree(pMnode); return; } - code = sdbInsertRow(&oper); + code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("dnode:%d, failed to create mnode, ep:%s reason:%s", dnodeId, dnodeEp, tstrerror(code)); - taosTFree(pMnode); + tfree(pMnode); } } void mnodeDropMnodeLocal(int32_t dnodeId) { SMnodeObj *pMnode = mnodeGetMnode(dnodeId); if (pMnode != NULL) { - SSdbOper oper = {.type = SDB_OPER_LOCAL, .table = tsMnodeSdb, .pObj = pMnode}; - sdbDeleteRow(&oper); + SSdbRow row = {.type = SDB_OPER_LOCAL, .pTable = tsMnodeSdb, .pObj = pMnode}; + sdbDeleteRow(&row); mnodeDecMnodeRef(pMnode); } @@ -370,13 +367,13 @@ int32_t mnodeDropMnode(int32_t dnodeId) { return TSDB_CODE_MND_DNODE_NOT_EXIST; } - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsMnodeSdb, - .pObj = pMnode + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsMnodeSdb, + .pObj = pMnode }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); sdbDecRef(tsMnodeSdb, pMnode); @@ -468,7 +465,7 @@ static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, vo cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - char* roles = mnodeGetMnodeRoleStr(pMnode->role); + char* roles = syncRole[pMnode->role]; STR_WITH_MAXSIZE_TO_VARSTR(pWrite, roles, pShow->bytes[cols]); cols++; diff --git a/src/mnode/src/mnodePeer.c b/src/mnode/src/mnodePeer.c index 2a04f541c51483ccba93369a65061507f83ead23..f13ef7539800919e5f31d43b55def590b801e86d 100644 --- a/src/mnode/src/mnodePeer.c +++ b/src/mnode/src/mnodePeer.c @@ -47,7 +47,7 @@ void mnodeAddPeerRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)) { int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { if (pMsg->rpcMsg.pCont == NULL) { - mError("%p, msg:%s in mpeer queue, content is null", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, ahandle:%p type:%s in mpeer queue, content is null", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_INVALID_MSG_LEN; } @@ -58,14 +58,14 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); - mDebug("%p, msg:%s in mpeer queue will be redirected, numOfEps:%d inUse:%d", pMsg->rpcMsg.ahandle, - taosMsg[pMsg->rpcMsg.msgType], epSet->numOfEps, epSet->inUse); + mDebug("msg:%p, ahandle:%p type:%s in mpeer queue will be redirected, numOfEps:%d inUse:%d", pMsg, + pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], epSet->numOfEps, epSet->inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort + TSDB_PORT_DNODEDNODE) { epSet->inUse = (i + 1) % epSet->numOfEps; - mDebug("mnode index:%d ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); + mDebug("mpeer:%d ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { - mDebug("mnode index:%d ep:%s:%u", i, epSet->fqdn[i], htons(epSet->port[i])); + mDebug("mpeer:%d ep:%s:%u", i, epSet->fqdn[i], htons(epSet->port[i])); } } @@ -73,7 +73,8 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { } if (tsMnodeProcessPeerMsgFp[pMsg->rpcMsg.msgType] == NULL) { - mError("%p, msg:%s in mpeer queue, not processed", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, ahandle:%p type:%s in mpeer queue, not processed", pMsg, pMsg->rpcMsg.ahandle, + taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_MSG_NOT_PROCESSED; } @@ -82,13 +83,14 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { void mnodeProcessPeerRsp(SRpcMsg *pMsg) { if (!sdbIsMaster()) { - mError("%p, msg:%s is not processed for it is not master", pMsg->ahandle, taosMsg[pMsg->msgType]); + mError("msg:%p, ahandle:%p type:%s is not processed for it is not master", pMsg, pMsg->ahandle, + taosMsg[pMsg->msgType]); return; } if (tsMnodeProcessPeerRspFp[pMsg->msgType]) { (*tsMnodeProcessPeerRspFp[pMsg->msgType])(pMsg); } else { - mError("%p, msg:%s is not processed", pMsg->ahandle, taosMsg[pMsg->msgType]); + mError("msg:%p, ahandle:%p type:%s is not processed", pMsg, pMsg->ahandle, taosMsg[pMsg->msgType]); } } diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index f8f99e22c6f437b1eece0d704c8c4551bc434110..12ac64854c6834ca0cd1509f9bfedd7b65a0900c 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -131,8 +131,8 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po static void mnodeFreeConn(void *data) { SConnObj *pConn = data; - taosTFree(pConn->pQueries); - taosTFree(pConn->pStreams); + tfree(pConn->pQueries); + tfree(pConn->pStreams); mDebug("connId:%d, is destroyed", pConn->connId); } @@ -182,7 +182,7 @@ static int32_t mnodeGetConnsMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC // app name pShow->bytes[cols] = TSDB_APPNAME_LEN + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "app_name"); + strcpy(pSchema[cols].name, "program"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -280,7 +280,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi } // not thread safe, need optimized -int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg) { +int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SHeartBeatMsg *pHBMsg) { pConn->numOfQueries = htonl(pHBMsg->numOfQueries); if (pConn->numOfQueries > 0) { if (pConn->pQueries == NULL) { @@ -561,7 +561,7 @@ static int32_t mnodeProcessKillQueryMsg(SMnodeMsg *pMsg) { SUserObj *pUser = pMsg->pUser; if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; - SCMKillQueryMsg *pKill = pMsg->rpcMsg.pCont; + SKillQueryMsg *pKill = pMsg->rpcMsg.pCont; mInfo("kill query msg is received, queryId:%s", pKill->queryId); const char delim = ':'; @@ -592,7 +592,7 @@ static int32_t mnodeProcessKillStreamMsg(SMnodeMsg *pMsg) { SUserObj *pUser = pMsg->pUser; if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; - SCMKillQueryMsg *pKill = pMsg->rpcMsg.pCont; + SKillQueryMsg *pKill = pMsg->rpcMsg.pCont; mInfo("kill stream msg is received, streamId:%s", pKill->queryId); const char delim = ':'; @@ -623,7 +623,7 @@ static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg) { SUserObj *pUser = pMsg->pUser; if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; - SCMKillConnMsg *pKill = pMsg->rpcMsg.pCont; + SKillConnMsg *pKill = pMsg->rpcMsg.pCont; int32_t connId = atoi(pKill->queryId); SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); if (pConn == NULL) { diff --git a/src/mnode/src/mnodeRead.c b/src/mnode/src/mnodeRead.c index 93b944febb42d3c7e7bac113b5447ddd1488369c..ea7ce783e8f0a018d3648257206cb6abcaf28e26 100644 --- a/src/mnode/src/mnodeRead.c +++ b/src/mnode/src/mnodeRead.c @@ -43,7 +43,7 @@ void mnodeAddReadMsgHandle(uint8_t msgType, int32_t (*fp)(SMnodeMsg *pMsg)) { int32_t mnodeProcessRead(SMnodeMsg *pMsg) { if (pMsg->rpcMsg.pCont == NULL) { - mError("%p, msg:%s in mread queue, content is null", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, app:%p type:%s in mread queue, content is null", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_INVALID_MSG_LEN; } @@ -52,7 +52,7 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) { SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); mnodeGetMnodeEpSetForShell(epSet); - mDebug("%p, msg:%s in mread queue will be redirected, numOfEps:%d inUse:%d", pMsg->rpcMsg.ahandle, + mDebug("msg:%p, app:%p type:%s in mread queue will be redirected, numOfEps:%d inUse:%d", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], epSet->numOfEps, epSet->inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { @@ -70,13 +70,15 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) { } if (tsMnodeProcessReadMsgFp[pMsg->rpcMsg.msgType] == NULL) { - mError("%p, msg:%s in mread queue, not processed", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, app:%p type:%s in mread queue, not processed", pMsg, pMsg->rpcMsg.ahandle, + taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_MSG_NOT_PROCESSED; } int32_t code = mnodeInitMsg(pMsg); if (code != TSDB_CODE_SUCCESS) { - mError("%p, msg:%s in mread queue, not processed reason:%s", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], tstrerror(code)); + mError("msg:%p, app:%p type:%s in mread queue, not processed reason:%s", pMsg, pMsg->rpcMsg.ahandle, + taosMsg[pMsg->rpcMsg.msgType], tstrerror(code)); return code; } diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 14558485aa743a42bee016226dfc2e16a4f81d0c..1728bb7e3bc6587bc28f1cefd1b4e3c56c5ff4d5 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -34,23 +34,30 @@ #include "mnodeSdb.h" #define SDB_TABLE_LEN 12 -#define SDB_SYNC_HACK 16 +#define MAX_QUEUED_MSG_NUM 10000 typedef enum { - SDB_ACTION_INSERT, - SDB_ACTION_DELETE, - SDB_ACTION_UPDATE + SDB_ACTION_INSERT = 0, + SDB_ACTION_DELETE = 1, + SDB_ACTION_UPDATE = 2 } ESdbAction; typedef enum { - SDB_STATUS_OFFLINE, - SDB_STATUS_SERVING, - SDB_STATUS_CLOSING + SDB_STATUS_OFFLINE = 0, + SDB_STATUS_SERVING = 1, + SDB_STATUS_CLOSING = 2 } ESdbStatus; -typedef struct _SSdbTable { - char tableName[SDB_TABLE_LEN]; - ESdbTable tableId; +char *actStr[] = { + "insert", + "delete", + "update", + "invalid" +}; + +typedef struct SSdbTable { + char name[SDB_TABLE_LEN]; + ESdbTable id; ESdbKey keyType; int32_t hashSessions; int32_t maxRowSize; @@ -58,77 +65,78 @@ typedef struct _SSdbTable { int32_t autoIndex; int64_t numOfRows; void * iHandle; - int32_t (*insertFp)(SSdbOper *pDesc); - int32_t (*deleteFp)(SSdbOper *pOper); - int32_t (*updateFp)(SSdbOper *pOper); - int32_t (*decodeFp)(SSdbOper *pOper); - int32_t (*encodeFp)(SSdbOper *pOper); - int32_t (*destroyFp)(SSdbOper *pOper); - int32_t (*restoredFp)(); + int32_t (*fpInsert)(SSdbRow *pRow); + int32_t (*fpDelete)(SSdbRow *pRow); + int32_t (*fpUpdate)(SSdbRow *pRow); + int32_t (*fpDecode)(SSdbRow *pRow); + int32_t (*fpEncode)(SSdbRow *pRow); + int32_t (*fpDestroy)(SSdbRow *pRow); + int32_t (*fpRestored)(); pthread_mutex_t mutex; } SSdbTable; typedef struct { ESyncRole role; ESdbStatus status; - int64_t version; - void * sync; + uint64_t version; + int64_t sync; void * wal; SSyncCfg cfg; + int32_t queuedMsg; int32_t numOfTables; SSdbTable *tableList[SDB_TABLE_MAX]; pthread_mutex_t mutex; -} SSdbObject; +} SSdbMgmt; typedef struct { pthread_t thread; int32_t workerId; -} SSdbWriteWorker; +} SSdbWorker; typedef struct { int32_t num; - SSdbWriteWorker *writeWorker; -} SSdbWriteWorkerPool; + SSdbWorker *worker; +} SSdbWorkerPool; extern void * tsMnodeTmr; -static void * tsUpdateSyncTmr; -static SSdbObject tsSdbObj = {0}; -static taos_qset tsSdbWriteQset; -static taos_qall tsSdbWriteQall; -static taos_queue tsSdbWriteQueue; -static SSdbWriteWorkerPool tsSdbPool; - -static int sdbWrite(void *param, void *data, int type); -static int sdbWriteToQueue(void *param, void *data, int type); -static void * sdbWorkerFp(void *param); -static int32_t sdbInitWriteWorker(); -static void sdbCleanupWriteWorker(); -static int32_t sdbAllocWriteQueue(); -static void sdbFreeWritequeue(); -static int32_t sdbUpdateRowImp(SSdbOper *pOper); -static int32_t sdbDeleteRowImp(SSdbOper *pOper); -static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper); -static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper); -static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper); - -int32_t sdbGetId(void *handle) { - return ((SSdbTable *)handle)->autoIndex; +static void * tsSdbTmr; +static SSdbMgmt tsSdbMgmt = {0}; +static taos_qset tsSdbWQset; +static taos_qall tsSdbWQall; +static taos_queue tsSdbWQueue; +static SSdbWorkerPool tsSdbPool; + +static int32_t sdbProcessWrite(void *pRow, void *pHead, int32_t qtype, void *unused); +static int32_t sdbWriteWalToQueue(void *vparam, void *pHead, int32_t qtype, void *rparam); +static int32_t sdbWriteRowToQueue(SSdbRow *pRow, int32_t action); +static void sdbFreeFromQueue(SSdbRow *pRow); +static void * sdbWorkerFp(void *pWorker); +static int32_t sdbInitWorker(); +static void sdbCleanupWorker(); +static int32_t sdbAllocQueue(); +static void sdbFreeQueue(); +static int32_t sdbInsertHash(SSdbTable *pTable, SSdbRow *pRow); +static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbRow *pRow); +static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbRow *pRow); + +int32_t sdbGetId(void *pTable) { + return ((SSdbTable *)pTable)->autoIndex; } -int64_t sdbGetNumOfRows(void *handle) { - return ((SSdbTable *)handle)->numOfRows; +int64_t sdbGetNumOfRows(void *pTable) { + return ((SSdbTable *)pTable)->numOfRows; } uint64_t sdbGetVersion() { - return tsSdbObj.version; + return tsSdbMgmt.version; } bool sdbIsMaster() { - return tsSdbObj.role == TAOS_SYNC_ROLE_MASTER; + return tsSdbMgmt.role == TAOS_SYNC_ROLE_MASTER; } bool sdbIsServing() { - return tsSdbObj.status == SDB_STATUS_SERVING; + return tsSdbMgmt.status == SDB_STATUS_SERVING; } static void *sdbGetObjKey(SSdbTable *pTable, void *key) { @@ -139,18 +147,6 @@ static void *sdbGetObjKey(SSdbTable *pTable, void *key) { return key; } -static char *sdbGetActionStr(int32_t action) { - switch (action) { - case SDB_ACTION_INSERT: - return "insert"; - case SDB_ACTION_DELETE: - return "delete"; - case SDB_ACTION_UPDATE: - return "update"; - } - return "invalid"; -} - static char *sdbGetKeyStr(SSdbTable *pTable, void *key) { static char str[16]; switch (pTable->keyType) { @@ -166,28 +162,28 @@ static char *sdbGetKeyStr(SSdbTable *pTable, void *key) { } } -static char *sdbGetKeyStrFromObj(SSdbTable *pTable, void *key) { +static char *sdbGetRowStr(SSdbTable *pTable, void *key) { return sdbGetKeyStr(pTable, sdbGetObjKey(pTable, key)); } static void *sdbGetTableFromId(int32_t tableId) { - return tsSdbObj.tableList[tableId]; + return tsSdbMgmt.tableList[tableId]; } static int32_t sdbInitWal() { - SWalCfg walCfg = {.walLevel = 2, .wals = 2, .keep = 1, .fsyncPeriod = 0}; - char temp[TSDB_FILENAME_LEN]; + SWalCfg walCfg = {.vgId = 1, .walLevel = TAOS_WAL_FSYNC, .keep = TAOS_WAL_KEEP, .fsyncPeriod = 0}; + char temp[TSDB_FILENAME_LEN] = {0}; sprintf(temp, "%s/wal", tsMnodeDir); - tsSdbObj.wal = walOpen(temp, &walCfg); - if (tsSdbObj.wal == NULL) { - sdbError("failed to open sdb wal in %s", tsMnodeDir); + tsSdbMgmt.wal = walOpen(temp, &walCfg); + if (tsSdbMgmt.wal == NULL) { + sdbError("vgId:1, failed to open wal in %s", tsMnodeDir); return -1; } - sdbInfo("open sdb wal for restore"); - int code = walRestore(tsSdbObj.wal, NULL, sdbWrite); + sdbInfo("vgId:1, open wal for restore"); + int32_t code = walRestore(tsSdbMgmt.wal, NULL, sdbProcessWrite); if (code != TSDB_CODE_SUCCESS) { - sdbError("failed to open wal for restore, reason:%s", tstrerror(code)); + sdbError("vgId:1, failed to open wal for restore since %s", tstrerror(code)); return -1; } return 0; @@ -199,31 +195,31 @@ static void sdbRestoreTables() { for (int32_t tableId = 0; tableId < SDB_TABLE_MAX; ++tableId) { SSdbTable *pTable = sdbGetTableFromId(tableId); if (pTable == NULL) continue; - if (pTable->restoredFp) { - (*pTable->restoredFp)(); + if (pTable->fpRestored) { + (*pTable->fpRestored)(); } totalRows += pTable->numOfRows; numOfTables++; - sdbDebug("table:%s, is restored, numOfRows:%" PRId64, pTable->tableName, pTable->numOfRows); + sdbDebug("vgId:1, sdb:%s is restored, rows:%" PRId64, pTable->name, pTable->numOfRows); } - sdbInfo("sdb is restored, ver:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables); + sdbInfo("vgId:1, sdb is restored, mver:%" PRIu64 " rows:%d tables:%d", tsSdbMgmt.version, totalRows, numOfTables); } void sdbUpdateMnodeRoles() { - if (tsSdbObj.sync == NULL) return; + if (tsSdbMgmt.sync <= 0) return; SNodesRole roles = {0}; - syncGetNodesRole(tsSdbObj.sync, &roles); + syncGetNodesRole(tsSdbMgmt.sync, &roles); - sdbInfo("update mnodes sync roles, total:%d", tsSdbObj.cfg.replica); - for (int32_t i = 0; i < tsSdbObj.cfg.replica; ++i) { + sdbInfo("vgId:1, update mnodes role, replica:%d", tsSdbMgmt.cfg.replica); + for (int32_t i = 0; i < tsSdbMgmt.cfg.replica; ++i) { SMnodeObj *pMnode = mnodeGetMnode(roles.nodeId[i]); if (pMnode != NULL) { pMnode->role = roles.role[i]; - sdbInfo("mnode:%d, role:%s", pMnode->mnodeId, mnodeGetMnodeRoleStr(pMnode->role)); - if (pMnode->mnodeId == dnodeGetDnodeId()) tsSdbObj.role = pMnode->role; + sdbInfo("vgId:1, mnode:%d, role:%s", pMnode->mnodeId, syncRole[pMnode->role]); + if (pMnode->mnodeId == dnodeGetDnodeId()) tsSdbMgmt.role = pMnode->role; mnodeDecMnodeRef(pMnode); } } @@ -237,81 +233,75 @@ static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, uint3 return 0; } -static int sdbGetWalInfo(void *ahandle, char *name, uint32_t *index) { - return walGetWalFile(tsSdbObj.wal, name, index); +static int32_t sdbGetWalInfo(void *ahandle, char *fileName, int64_t *fileId) { + return walGetWalFile(tsSdbMgmt.wal, fileName, fileId); } static void sdbNotifyRole(void *ahandle, int8_t role) { - sdbInfo("mnode role changed from %s to %s", mnodeGetMnodeRoleStr(tsSdbObj.role), mnodeGetMnodeRoleStr(role)); + sdbInfo("vgId:1, mnode role changed from %s to %s", syncRole[tsSdbMgmt.role], syncRole[role]); - if (role == TAOS_SYNC_ROLE_MASTER && tsSdbObj.role != TAOS_SYNC_ROLE_MASTER) { + if (role == TAOS_SYNC_ROLE_MASTER && tsSdbMgmt.role != TAOS_SYNC_ROLE_MASTER) { balanceReset(); } - tsSdbObj.role = role; + tsSdbMgmt.role = role; sdbUpdateMnodeRoles(); } -FORCE_INLINE -static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { - assert(param); - SSdbOper * pOper = param; - SMnodeMsg *pMsg = pOper->pMsg; - if (code <= 0) pOper->retCode = code; - - int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1); - if (processedCount <= 1) { - if (pMsg != NULL) { - sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d result:%s", pMsg->rpcMsg.ahandle, pMsg, - processedCount, tstrerror(code)); - } - return; - } +// failed to forward, need revert insert +static void sdbHandleFailedConfirm(SSdbRow *pRow) { + SWalHead *pHead = pRow->pHead; + int32_t action = pHead->msgType % 10; - if (pMsg != NULL) { - sdbDebug("app:%p:%p, is confirmed and will do callback func, result:%s", pMsg->rpcMsg.ahandle, pMsg, - tstrerror(code)); - } + sdbError("vgId:1, row:%p:%s hver:%" PRIu64 " action:%s, failed to foward since %s", pRow->pObj, + sdbGetKeyStr(pRow->pTable, pHead->cont), pHead->version, actStr[action], tstrerror(pRow->code)); - // failed to forward, need revert insert - if (pOper->retCode != TSDB_CODE_SUCCESS) { - SWalHead *pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK; - int32_t action = pHead->msgType % 10; - sdbError("table:%s record:%p:%s ver:%" PRIu64 ", action:%d failed to foward reason:%s", - ((SSdbTable *)pOper->table)->tableName, pOper->pObj, sdbGetKeyStr(pOper->table, pHead->cont), - pHead->version, action, tstrerror(pOper->retCode)); - if (action == SDB_ACTION_INSERT) { - sdbDeleteHash(pOper->table, pOper); - } + // It's better to create a table in two stages, create it first and then set it success + if (action == SDB_ACTION_INSERT) { + SSdbRow row = {.type = SDB_OPER_GLOBAL, .pTable = pRow->pTable, .pObj = pRow->pObj}; + sdbDeleteRow(&row); } +} - if (pOper->writeCb != NULL) { - pOper->retCode = (*pOper->writeCb)(pMsg, pOper->retCode); +FORCE_INLINE +static void sdbConfirmForward(void *ahandle, void *wparam, int32_t code) { + if (wparam == NULL) return; + SSdbRow *pRow = wparam; + SMnodeMsg * pMsg = pRow->pMsg; + + if (code <= 0) pRow->code = code; + int32_t count = atomic_add_fetch_32(&pRow->processedCount, 1); + if (count <= 1) { + if (pMsg != NULL) sdbTrace("vgId:1, msg:%p waiting for confirm, count:%d code:%x", pMsg, count, code); + return; + } else { + if (pMsg != NULL) sdbTrace("vgId:1, msg:%p is confirmed, code:%x", pMsg, code); } - dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode); - // if ahandle, means this func is called by sdb write - if (ahandle == NULL) { - sdbDecRef(pOper->table, pOper->pObj); + if (pRow->code != TSDB_CODE_SUCCESS) sdbHandleFailedConfirm(pRow); + + if (pRow->fpRsp != NULL) { + pRow->code = (*pRow->fpRsp)(pMsg, pRow->code); } - taosFreeQitem(pOper); + dnodeSendRpcMWriteRsp(pMsg, pRow->code); + sdbFreeFromQueue(pRow); } static void sdbUpdateSyncTmrFp(void *param, void *tmrId) { sdbUpdateSync(NULL); } void sdbUpdateAsync() { - taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsUpdateSyncTmr); + taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr); } void sdbUpdateSync(void *pMnodes) { - SDMMnodeInfos *mnodes = pMnodes; + SMnodeInfos *mnodes = pMnodes; if (!mnodeIsRunning()) { - mDebug("mnode not start yet, update sync config later"); + mDebug("vgId:1, mnode not start yet, update sync config later"); return; } - mDebug("update sync config in sync module, mnodes:%p", pMnodes); + mDebug("vgId:1, update sync config in sync module, mnodes:%p", pMnodes); SSyncCfg syncCfg = {0}; int32_t index = 0; @@ -337,16 +327,16 @@ void sdbUpdateSync(void *pMnodes) { } sdbFreeIter(pIter); syncCfg.replica = index; - mDebug("mnodes info not input, use infos in sdb, numOfMnodes:%d", syncCfg.replica); + mDebug("vgId:1, mnodes info not input, use infos in sdb, numOfMnodes:%d", syncCfg.replica); } else { - for (index = 0; index < mnodes->nodeNum; ++index) { - SDMMnodeInfo *node = &mnodes->nodeInfos[index]; - syncCfg.nodeInfo[index].nodeId = node->nodeId; - taosGetFqdnPortFromEp(node->nodeEp, syncCfg.nodeInfo[index].nodeFqdn, &syncCfg.nodeInfo[index].nodePort); + for (index = 0; index < mnodes->mnodeNum; ++index) { + SMnodeInfo *node = &mnodes->mnodeInfos[index]; + syncCfg.nodeInfo[index].nodeId = node->mnodeId; + taosGetFqdnPortFromEp(node->mnodeEp, syncCfg.nodeInfo[index].nodeFqdn, &syncCfg.nodeInfo[index].nodePort); syncCfg.nodeInfo[index].nodePort += TSDB_PORT_SYNC; } syncCfg.replica = index; - mDebug("mnodes info input, numOfMnodes:%d", syncCfg.replica); + mDebug("vgId:1, mnodes info input, numOfMnodes:%d", syncCfg.replica); } syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2; @@ -360,18 +350,19 @@ void sdbUpdateSync(void *pMnodes) { } if (!hasThisDnode) { - sdbDebug("update sync config, this dnode not exist"); + sdbDebug("vgId:1, update sync config, this dnode not exist"); return; } - if (memcmp(&syncCfg, &tsSdbObj.cfg, sizeof(SSyncCfg)) == 0) { - sdbDebug("update sync config, info not changed"); + if (memcmp(&syncCfg, &tsSdbMgmt.cfg, sizeof(SSyncCfg)) == 0) { + sdbDebug("vgId:1, update sync config, info not changed"); return; } - sdbInfo("work as mnode, replica:%d", syncCfg.replica); + sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica); for (int32_t i = 0; i < syncCfg.replica; ++i) { - sdbInfo("mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn, syncCfg.nodeInfo[i].nodePort); + sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn, + syncCfg.nodeInfo[i].nodePort); } SSyncInfo syncInfo = {0}; @@ -382,23 +373,24 @@ void sdbUpdateSync(void *pMnodes) { syncInfo.ahandle = NULL; syncInfo.getWalInfo = sdbGetWalInfo; syncInfo.getFileInfo = sdbGetFileInfo; - syncInfo.writeToCache = sdbWriteToQueue; + syncInfo.writeToCache = sdbWriteWalToQueue; syncInfo.confirmForward = sdbConfirmForward; syncInfo.notifyRole = sdbNotifyRole; - tsSdbObj.cfg = syncCfg; + tsSdbMgmt.cfg = syncCfg; - if (tsSdbObj.sync) { - syncReconfig(tsSdbObj.sync, &syncCfg); + if (tsSdbMgmt.sync) { + syncReconfig(tsSdbMgmt.sync, &syncCfg); } else { - tsSdbObj.sync = syncStart(&syncInfo); + tsSdbMgmt.sync = syncStart(&syncInfo); } + sdbUpdateMnodeRoles(); } int32_t sdbInit() { - pthread_mutex_init(&tsSdbObj.mutex, NULL); + pthread_mutex_init(&tsSdbMgmt.mutex, NULL); - if (sdbInitWriteWorker() != 0) { + if (sdbInitWorker() != 0) { return -1; } @@ -409,56 +401,56 @@ int32_t sdbInit() { sdbRestoreTables(); if (mnodeGetMnodesNum() == 1) { - tsSdbObj.role = TAOS_SYNC_ROLE_MASTER; + tsSdbMgmt.role = TAOS_SYNC_ROLE_MASTER; } - tsSdbObj.status = SDB_STATUS_SERVING; + tsSdbMgmt.status = SDB_STATUS_SERVING; return TSDB_CODE_SUCCESS; } void sdbCleanUp() { - if (tsSdbObj.status != SDB_STATUS_SERVING) return; + if (tsSdbMgmt.status != SDB_STATUS_SERVING) return; - tsSdbObj.status = SDB_STATUS_CLOSING; - - sdbCleanupWriteWorker(); - sdbDebug("sdb will be closed, ver:%" PRId64, tsSdbObj.version); + tsSdbMgmt.status = SDB_STATUS_CLOSING; + + sdbCleanupWorker(); + sdbDebug("vgId:1, sdb will be closed, mver:%" PRIu64, tsSdbMgmt.version); - if (tsSdbObj.sync) { - syncStop(tsSdbObj.sync); - tsSdbObj.sync = NULL; + if (tsSdbMgmt.sync) { + syncStop(tsSdbMgmt.sync); + tsSdbMgmt.sync = -1; } - if (tsSdbObj.wal) { - walClose(tsSdbObj.wal); - tsSdbObj.wal = NULL; + if (tsSdbMgmt.wal) { + walClose(tsSdbMgmt.wal); + tsSdbMgmt.wal = NULL; } - pthread_mutex_destroy(&tsSdbObj.mutex); + pthread_mutex_destroy(&tsSdbMgmt.mutex); } -void sdbIncRef(void *handle, void *pObj) { - if (pObj == NULL || handle == NULL) return; +void sdbIncRef(void *tparam, void *pRow) { + if (pRow == NULL || tparam == NULL) return; - SSdbTable *pTable = handle; - int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); + SSdbTable *pTable = tparam; + int32_t * pRefCount = (int32_t *)(pRow + pTable->refCountPos); int32_t refCount = atomic_add_fetch_32(pRefCount, 1); - sdbTrace("add ref to table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), refCount); + sdbTrace("vgId:1, sdb:%s, inc ref to row:%p:%s:%d", pTable->name, pRow, sdbGetRowStr(pTable, pRow), refCount); } -void sdbDecRef(void *handle, void *pObj) { - if (pObj == NULL || handle == NULL) return; +void sdbDecRef(void *tparam, void *pRow) { + if (pRow == NULL || tparam == NULL) return; - SSdbTable *pTable = handle; - int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); + SSdbTable *pTable = tparam; + int32_t * pRefCount = (int32_t *)(pRow + pTable->refCountPos); int32_t refCount = atomic_sub_fetch_32(pRefCount, 1); - sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), refCount); + sdbTrace("vgId:1, sdb:%s, dec ref to row:%p:%s:%d", pTable->name, pRow, sdbGetRowStr(pTable, pRow), refCount); - int32_t *updateEnd = pObj + pTable->refCountPos - 4; + int32_t *updateEnd = pRow + pTable->refCountPos - 4; if (refCount <= 0 && *updateEnd) { - sdbTrace("table:%s, record:%p:%s:%d is destroyed", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), refCount); - SSdbOper oper = {.pObj = pObj}; - (*pTable->destroyFp)(&oper); + sdbTrace("vgId:1, sdb:%s, row:%p:%s:%d destroyed", pTable->name, pRow, sdbGetRowStr(pTable, pRow), refCount); + SSdbRow row = {.pObj = pRow}; + (*pTable->fpDestroy)(&row); } } @@ -480,12 +472,12 @@ static void *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) { return sdbGetRowMeta(pTable, sdbGetObjKey(pTable, key)); } -void *sdbGetRow(void *handle, void *key) { - SSdbTable *pTable = handle; +void *sdbGetRow(void *tparam, void *key) { + SSdbTable *pTable = tparam; pthread_mutex_lock(&pTable->mutex); - void *pRow = sdbGetRowMeta(handle, key); - if (pRow) sdbIncRef(handle, pRow); + void *pRow = sdbGetRowMeta(pTable, key); + if (pRow) sdbIncRef(pTable, pRow); pthread_mutex_unlock(&pTable->mutex); return pRow; @@ -495,8 +487,8 @@ static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) { return sdbGetRow(pTable, sdbGetObjKey(pTable, key)); } -static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { - void * key = sdbGetObjKey(pTable, pOper->pObj); +static int32_t sdbInsertHash(SSdbTable *pTable, SSdbRow *pRow) { + void * key = sdbGetObjKey(pTable, pRow->pObj); int32_t keySize = sizeof(int32_t); if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { @@ -504,43 +496,43 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { } pthread_mutex_lock(&pTable->mutex); - taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t)); + taosHashPut(pTable->iHandle, key, keySize, &pRow->pObj, sizeof(int64_t)); pthread_mutex_unlock(&pTable->mutex); - sdbIncRef(pTable, pOper->pObj); + sdbIncRef(pTable, pRow->pObj); atomic_add_fetch_32(&pTable->numOfRows, 1); if (pTable->keyType == SDB_KEY_AUTO) { - pTable->autoIndex = MAX(pTable->autoIndex, *((uint32_t *)pOper->pObj)); + pTable->autoIndex = MAX(pTable->autoIndex, *((uint32_t *)pRow->pObj)); } else { atomic_add_fetch_32(&pTable->autoIndex, 1); } - sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 ", msg:%p", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, pOper->pMsg); + sdbDebug("vgId:1, sdb:%s, insert key:%s to hash, rowSize:%d rows:%" PRId64 ", msg:%p", pTable->name, + sdbGetRowStr(pTable, pRow->pObj), pRow->rowSize, pTable->numOfRows, pRow->pMsg); - int32_t code = (*pTable->insertFp)(pOper); + int32_t code = (*pTable->fpInsert)(pRow); if (code != TSDB_CODE_SUCCESS) { - sdbError("table:%s, failed to insert record:%s to hash, remove it", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj)); - sdbDeleteHash(pTable, pOper); + sdbError("vgId:1, sdb:%s, failed to insert key:%s to hash, remove it", pTable->name, + sdbGetRowStr(pTable, pRow->pObj)); + sdbDeleteHash(pTable, pRow); } return TSDB_CODE_SUCCESS; } -static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { - int32_t *updateEnd = pOper->pObj + pTable->refCountPos - 4; +static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbRow *pRow) { + int32_t *updateEnd = pRow->pObj + pTable->refCountPos - 4; bool set = atomic_val_compare_exchange_32(updateEnd, 0, 1) == 0; if (!set) { - sdbError("table:%s, failed to delete record:%s from hash, for it already removed", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj)); + sdbError("vgId:1, sdb:%s, failed to delete key:%s from hash, for it already removed", pTable->name, + sdbGetRowStr(pTable, pRow->pObj)); return TSDB_CODE_MND_SDB_OBJ_NOT_THERE; } - (*pTable->deleteFp)(pOper); + (*pTable->fpDelete)(pRow); - void * key = sdbGetObjKey(pTable, pOper->pObj); + void * key = sdbGetObjKey(pTable, pRow->pObj); int32_t keySize = sizeof(int32_t); if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { keySize = strlen((char *)key); @@ -551,310 +543,221 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { pthread_mutex_unlock(&pTable->mutex); atomic_sub_fetch_32(&pTable->numOfRows, 1); - - sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg); - sdbDecRef(pTable, pOper->pObj); + sdbDebug("vgId:1, sdb:%s, delete key:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->name, + sdbGetRowStr(pTable, pRow->pObj), pTable->numOfRows, pRow->pMsg); + + sdbDecRef(pTable, pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) { - sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg); +static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbRow *pRow) { + sdbDebug("vgId:1, sdb:%s, update key:%s in hash, numOfRows:%" PRId64 ", msg:%p", pTable->name, + sdbGetRowStr(pTable, pRow->pObj), pTable->numOfRows, pRow->pMsg); - (*pTable->updateFp)(pOper); + (*pTable->fpUpdate)(pRow); return TSDB_CODE_SUCCESS; } -static int sdbWrite(void *param, void *data, int type) { - SSdbOper *pOper = param; - SWalHead *pHead = data; +static int sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *unused) { + SSdbRow *pRow = wparam; + SWalHead *pHead = hparam; int32_t tableId = pHead->msgType / 10; int32_t action = pHead->msgType % 10; SSdbTable *pTable = sdbGetTableFromId(tableId); assert(pTable != NULL); - pthread_mutex_lock(&tsSdbObj.mutex); + pthread_mutex_lock(&tsSdbMgmt.mutex); if (pHead->version == 0) { // assign version - tsSdbObj.version++; - pHead->version = tsSdbObj.version; + tsSdbMgmt.version++; + pHead->version = tsSdbMgmt.version; } else { // for data from WAL or forward, version may be smaller - if (pHead->version <= tsSdbObj.version) { - pthread_mutex_unlock(&tsSdbObj.mutex); - sdbDebug("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); + if (pHead->version <= tsSdbMgmt.version) { + pthread_mutex_unlock(&tsSdbMgmt.mutex); + sdbDebug("vgId:1, sdb:%s, failed to restore %s key:%s from source(%d), hver:%" PRIu64 " too large, mver:%" PRIu64, + pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version); return TSDB_CODE_SUCCESS; - } else if (pHead->version != tsSdbObj.version + 1) { - pthread_mutex_unlock(&tsSdbObj.mutex); - sdbError("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); + } else if (pHead->version != tsSdbMgmt.version + 1) { + pthread_mutex_unlock(&tsSdbMgmt.mutex); + sdbError("vgId:1, sdb:%s, failed to restore %s key:%s from source(%d), hver:%" PRIu64 " too large, mver:%" PRIu64, + pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version); return TSDB_CODE_SYN_INVALID_VERSION; } else { - tsSdbObj.version = pHead->version; + tsSdbMgmt.version = pHead->version; } } - int32_t code = walWrite(tsSdbObj.wal, pHead); + int32_t code = walWrite(tsSdbMgmt.wal, pHead); if (code < 0) { - pthread_mutex_unlock(&tsSdbObj.mutex); + pthread_mutex_unlock(&tsSdbMgmt.mutex); return code; } - pthread_mutex_unlock(&tsSdbObj.mutex); + pthread_mutex_unlock(&tsSdbMgmt.mutex); - // from app, oper is created - if (pOper != NULL) { + // from app, row is created + if (pRow != NULL) { // forward to peers - pOper->processedCount = 0; - int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); - if (syncCode <= 0) pOper->processedCount = 1; + pRow->processedCount = 0; + int32_t syncCode = syncForwardToPeer(tsSdbMgmt.sync, pHead, pRow, TAOS_QTYPE_RPC); + if (syncCode <= 0) pRow->processedCount = 1; if (syncCode < 0) { - sdbError("table:%s, failed to forward request, result:%s action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, - tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + sdbError("vgId:1, sdb:%s, failed to forward req since %s action:%s key:%s hver:%" PRIu64 ", msg:%p", pTable->name, + tstrerror(syncCode), actStr[action], sdbGetKeyStr(pTable, pHead->cont), pHead->version, pRow->pMsg); } else if (syncCode > 0) { - sdbDebug("table:%s, forward request is sent, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, - sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + sdbDebug("vgId:1, sdb:%s, forward req is sent, action:%s key:%s hver:%" PRIu64 ", msg:%p", pTable->name, + actStr[action], sdbGetKeyStr(pTable, pHead->cont), pHead->version, pRow->pMsg); } else { - sdbTrace("table:%s, no need to send fwd request, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, - sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + sdbTrace("vgId:1, sdb:%s, no need to send fwd req, action:%s key:%s hver:%" PRIu64 ", msg:%p", pTable->name, + actStr[action], sdbGetKeyStr(pTable, pHead->cont), pHead->version, pRow->pMsg); } return syncCode; } - sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s ver:%" PRId64, pTable->tableName, - sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); + sdbDebug("vgId:1, sdb:%s, record from wal/fwd is disposed, action:%s key:%s hver:%" PRIu64, pTable->name, + actStr[action], sdbGetKeyStr(pTable, pHead->cont), pHead->version); // even it is WAL/FWD, it shall be called to update version in sync - syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); + syncForwardToPeer(tsSdbMgmt.sync, pHead, pRow, TAOS_QTYPE_RPC); - // from wal or forward msg, oper not created, should add into hash + // from wal or forward msg, row not created, should add into hash if (action == SDB_ACTION_INSERT) { - SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; - code = (*pTable->decodeFp)(&oper); - return sdbInsertHash(pTable, &oper); + SSdbRow row = {.rowSize = pHead->len, .rowData = pHead->cont, .pTable = pTable}; + code = (*pTable->fpDecode)(&row); + return sdbInsertHash(pTable, &row); } else if (action == SDB_ACTION_DELETE) { - void *pRow = sdbGetRowMeta(pTable, pHead->cont); - if (pRow == NULL) { - sdbDebug("table:%s, object:%s not exist in hash, ignore delete action", pTable->tableName, + void *pObj = sdbGetRowMeta(pTable, pHead->cont); + if (pObj == NULL) { + sdbDebug("vgId:1, sdb:%s, object:%s not exist in hash, ignore delete action", pTable->name, sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } - SSdbOper oper = {.table = pTable, .pObj = pRow}; - return sdbDeleteHash(pTable, &oper); + SSdbRow row = {.pTable = pTable, .pObj = pObj}; + return sdbDeleteHash(pTable, &row); } else if (action == SDB_ACTION_UPDATE) { - void *pRow = sdbGetRowMeta(pTable, pHead->cont); - if (pRow == NULL) { - sdbDebug("table:%s, object:%s not exist in hash, ignore update action", pTable->tableName, + void *pObj = sdbGetRowMeta(pTable, pHead->cont); + if (pObj == NULL) { + sdbDebug("vgId:1, sdb:%s, object:%s not exist in hash, ignore update action", pTable->name, sdbGetKeyStr(pTable, pHead->cont)); return TSDB_CODE_SUCCESS; } - SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; - code = (*pTable->decodeFp)(&oper); - return sdbUpdateHash(pTable, &oper); + SSdbRow row = {.rowSize = pHead->len, .rowData = pHead->cont, .pTable = pTable}; + code = (*pTable->fpDecode)(&row); + return sdbUpdateHash(pTable, &row); } else { return TSDB_CODE_MND_INVALID_MSG_TYPE; } } -int32_t sdbInsertRow(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; +int32_t sdbInsertRow(SSdbRow *pRow) { + SSdbTable *pTable = pRow->pTable; if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - if (sdbGetRowFromObj(pTable, pOper->pObj)) { - sdbError("table:%s, failed to insert record:%s, already exist", pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj)); - sdbDecRef(pTable, pOper->pObj); + if (sdbGetRowFromObj(pTable, pRow->pObj)) { + sdbError("vgId:1, sdb:%s, failed to insert:%s since it exist", pTable->name, sdbGetRowStr(pTable, pRow->pObj)); + sdbDecRef(pTable, pRow->pObj); return TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE; } if (pTable->keyType == SDB_KEY_AUTO) { - *((uint32_t *)pOper->pObj) = atomic_add_fetch_32(&pTable->autoIndex, 1); + *((uint32_t *)pRow->pObj) = atomic_add_fetch_32(&pTable->autoIndex, 1); // let vgId increase from 2 - if (pTable->autoIndex == 1 && strcmp(pTable->tableName, "vgroups") == 0) { - *((uint32_t *)pOper->pObj) = atomic_add_fetch_32(&pTable->autoIndex, 1); + if (pTable->autoIndex == 1 && pTable->id == SDB_TABLE_VGROUP) { + *((uint32_t *)pRow->pObj) = atomic_add_fetch_32(&pTable->autoIndex, 1); } } - int32_t code = sdbInsertHash(pTable, pOper); + int32_t code = sdbInsertHash(pTable, pRow); if (code != TSDB_CODE_SUCCESS) { - sdbError("table:%s, failed to insert into hash", pTable->tableName); + sdbError("vgId:1, sdb:%s, failed to insert:%s into hash", pTable->name, sdbGetRowStr(pTable, pRow->pObj)); return code; } // just insert data into memory - if (pOper->type != SDB_OPER_GLOBAL) { + if (pRow->type != SDB_OPER_GLOBAL) { return TSDB_CODE_SUCCESS; } - if (pOper->reqFp) { - return (*pOper->reqFp)(pOper->pMsg); + if (pRow->fpReq) { + return (*pRow->fpReq)(pRow->pMsg); } else { - return sdbInsertRowImp(pOper); - } -} - -int32_t sdbInsertRowImp(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; - if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - - int32_t size = sizeof(SSdbOper) + sizeof(SWalHead) + pTable->maxRowSize + SDB_SYNC_HACK; - SSdbOper *pNewOper = taosAllocateQitem(size); - - SWalHead *pHead = (void *)pNewOper + sizeof(SSdbOper) + SDB_SYNC_HACK; - pHead->version = 0; - pHead->len = pOper->rowSize; - pHead->msgType = pTable->tableId * 10 + SDB_ACTION_INSERT; - - pOper->rowData = pHead->cont; - (*pTable->encodeFp)(pOper); - pHead->len = pOper->rowSize; - - memcpy(pNewOper, pOper, sizeof(SSdbOper)); - - if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, - pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); + return sdbWriteRowToQueue(pRow, SDB_ACTION_INSERT); } - - sdbIncRef(pNewOper->table, pNewOper->pObj); - taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper); - - return TSDB_CODE_MND_ACTION_IN_PROGRESS; } -bool sdbCheckRowDeleted(void *pTableInput, void *pRow) { - SSdbTable *pTable = pTableInput; +bool sdbCheckRowDeleted(void *tparam, void *pRow) { + SSdbTable *pTable = tparam; if (pTable == NULL) return false; int32_t *updateEnd = pRow + pTable->refCountPos - 4; return atomic_val_compare_exchange_32(updateEnd, 1, 1) == 1; } -int32_t sdbDeleteRow(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; +int32_t sdbDeleteRow(SSdbRow *pRow) { + SSdbTable *pTable = pRow->pTable; if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - void *pRow = sdbGetRowMetaFromObj(pTable, pOper->pObj); - if (pRow == NULL) { - sdbDebug("table:%s, record is not there, delete failed", pTable->tableName); + void *pObj = sdbGetRowMetaFromObj(pTable, pRow->pObj); + if (pObj == NULL) { + sdbDebug("vgId:1, sdb:%s, record is not there, delete failed", pTable->name); return TSDB_CODE_MND_SDB_OBJ_NOT_THERE; } - sdbIncRef(pTable, pOper->pObj); - - int32_t code = sdbDeleteHash(pTable, pOper); + int32_t code = sdbDeleteHash(pTable, pRow); if (code != TSDB_CODE_SUCCESS) { - sdbError("table:%s, failed to delete from hash", pTable->tableName); - sdbDecRef(pTable, pOper->pObj); + sdbError("vgId:1, sdb:%s, failed to delete from hash", pTable->name); return code; } // just delete data from memory - if (pOper->type != SDB_OPER_GLOBAL) { - sdbDecRef(pTable, pOper->pObj); + if (pRow->type != SDB_OPER_GLOBAL) { return TSDB_CODE_SUCCESS; } - if (pOper->reqFp) { - return (*pOper->reqFp)(pOper->pMsg); + if (pRow->fpReq) { + return (*pRow->fpReq)(pRow->pMsg); } else { - return sdbDeleteRowImp(pOper); - } -} - -int32_t sdbDeleteRowImp(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; - if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - - int32_t size = sizeof(SSdbOper) + sizeof(SWalHead) + pTable->maxRowSize + SDB_SYNC_HACK; - SSdbOper *pNewOper = taosAllocateQitem(size); - - SWalHead *pHead = (void *)pNewOper + sizeof(SSdbOper) + SDB_SYNC_HACK; - pHead->version = 0; - pHead->msgType = pTable->tableId * 10 + SDB_ACTION_DELETE; - - pOper->rowData = pHead->cont; - (*pTable->encodeFp)(pOper); - pHead->len = pOper->rowSize; - - memcpy(pNewOper, pOper, sizeof(SSdbOper)); - - if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, - pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); + return sdbWriteRowToQueue(pRow, SDB_ACTION_DELETE); } - - taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper); - - return TSDB_CODE_MND_ACTION_IN_PROGRESS; } -int32_t sdbUpdateRow(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; +int32_t sdbUpdateRow(SSdbRow *pRow) { + SSdbTable *pTable = pRow->pTable; if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - void *pRow = sdbGetRowMetaFromObj(pTable, pOper->pObj); - if (pRow == NULL) { - sdbDebug("table:%s, record is not there, update failed", pTable->tableName); + void *pObj = sdbGetRowMetaFromObj(pTable, pRow->pObj); + if (pObj == NULL) { + sdbDebug("vgId:1, sdb:%s, record is not there, update failed", pTable->name); return TSDB_CODE_MND_SDB_OBJ_NOT_THERE; } - int32_t code = sdbUpdateHash(pTable, pOper); + int32_t code = sdbUpdateHash(pTable, pRow); if (code != TSDB_CODE_SUCCESS) { - sdbError("table:%s, failed to update hash", pTable->tableName); + sdbError("vgId:1, sdb:%s, failed to update hash", pTable->name); return code; } // just update data in memory - if (pOper->type != SDB_OPER_GLOBAL) { + if (pRow->type != SDB_OPER_GLOBAL) { return TSDB_CODE_SUCCESS; } - if (pOper->reqFp) { - return (*pOper->reqFp)(pOper->pMsg); + if (pRow->fpReq) { + return (*pRow->fpReq)(pRow->pMsg); } else { - return sdbUpdateRowImp(pOper); + return sdbWriteRowToQueue(pRow, SDB_ACTION_UPDATE); } } -int32_t sdbUpdateRowImp(SSdbOper *pOper) { - SSdbTable *pTable = (SSdbTable *)pOper->table; - if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; - - int32_t size = sizeof(SSdbOper) + sizeof(SWalHead) + pTable->maxRowSize + SDB_SYNC_HACK; - SSdbOper *pNewOper = taosAllocateQitem(size); - - SWalHead *pHead = (void *)pNewOper + sizeof(SSdbOper) + SDB_SYNC_HACK; - pHead->version = 0; - pHead->msgType = pTable->tableId * 10 + SDB_ACTION_UPDATE; - - pOper->rowData = pHead->cont; - (*pTable->encodeFp)(pOper); - pHead->len = pOper->rowSize; - - memcpy(pNewOper, pOper, sizeof(SSdbOper)); - - if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, - pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); - } - - sdbIncRef(pNewOper->table, pNewOper->pObj); - taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper); - - return TSDB_CODE_MND_ACTION_IN_PROGRESS; -} - -void *sdbFetchRow(void *handle, void *pNode, void **ppRow) { - SSdbTable *pTable = (SSdbTable *)handle; +void *sdbFetchRow(void *tparam, void *pNode, void **ppRow) { + SSdbTable *pTable = tparam; *ppRow = NULL; if (pTable == NULL) return NULL; @@ -875,7 +778,7 @@ void *sdbFetchRow(void *handle, void *pNode, void **ppRow) { } *ppRow = *ppMetaRow; - sdbIncRef(handle, *ppMetaRow); + sdbIncRef(pTable, *ppMetaRow); return pIter; } @@ -892,19 +795,19 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) { if (pTable == NULL) return NULL; pthread_mutex_init(&pTable->mutex, NULL); - tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN); + tstrncpy(pTable->name, pDesc->name, SDB_TABLE_LEN); pTable->keyType = pDesc->keyType; - pTable->tableId = pDesc->tableId; + pTable->id = pDesc->id; pTable->hashSessions = pDesc->hashSessions; pTable->maxRowSize = pDesc->maxRowSize; pTable->refCountPos = pDesc->refCountPos; - pTable->insertFp = pDesc->insertFp; - pTable->deleteFp = pDesc->deleteFp; - pTable->updateFp = pDesc->updateFp; - pTable->encodeFp = pDesc->encodeFp; - pTable->decodeFp = pDesc->decodeFp; - pTable->destroyFp = pDesc->destroyFp; - pTable->restoredFp = pDesc->restoredFp; + pTable->fpInsert = pDesc->fpInsert; + pTable->fpDelete = pDesc->fpDelete; + pTable->fpUpdate = pDesc->fpUpdate; + pTable->fpEncode = pDesc->fpEncode; + pTable->fpDecode = pDesc->fpDecode; + pTable->fpDestroy = pDesc->fpDestroy; + pTable->fpRestored = pDesc->fpRestored; _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { @@ -912,8 +815,8 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) { } pTable->iHandle = taosHashInit(pTable->hashSessions, hashFp, true, true); - tsSdbObj.numOfTables++; - tsSdbObj.tableList[pTable->tableId] = pTable; + tsSdbMgmt.numOfTables++; + tsSdbMgmt.tableList[pTable->id] = pTable; return pTable; } @@ -921,87 +824,87 @@ void sdbCloseTable(void *handle) { SSdbTable *pTable = (SSdbTable *)handle; if (pTable == NULL) return; - tsSdbObj.numOfTables--; - tsSdbObj.tableList[pTable->tableId] = NULL; + tsSdbMgmt.numOfTables--; + tsSdbMgmt.tableList[pTable->id] = NULL; SHashMutableIterator *pIter = taosHashCreateIter(pTable->iHandle); while (taosHashIterNext(pIter)) { void **ppRow = taosHashIterGet(pIter); if (ppRow == NULL) continue; - SSdbOper oper = { + SSdbRow row = { .pObj = *ppRow, - .table = pTable, + .pTable = pTable, }; - (*pTable->destroyFp)(&oper); + (*pTable->fpDestroy)(&row); } taosHashDestroyIter(pIter); taosHashCleanup(pTable->iHandle); pthread_mutex_destroy(&pTable->mutex); - sdbDebug("table:%s, is closed, numOfTables:%d", pTable->tableName, tsSdbObj.numOfTables); + sdbDebug("vgId:1, sdb:%s, is closed, numOfTables:%d", pTable->name, tsSdbMgmt.numOfTables); free(pTable); } -int32_t sdbInitWriteWorker() { +static int32_t sdbInitWorker() { tsSdbPool.num = 1; - tsSdbPool.writeWorker = (SSdbWriteWorker *)calloc(sizeof(SSdbWriteWorker), tsSdbPool.num); + tsSdbPool.worker = calloc(sizeof(SSdbWorker), tsSdbPool.num); - if (tsSdbPool.writeWorker == NULL) return -1; + if (tsSdbPool.worker == NULL) return -1; for (int32_t i = 0; i < tsSdbPool.num; ++i) { - SSdbWriteWorker *pWorker = tsSdbPool.writeWorker + i; + SSdbWorker *pWorker = tsSdbPool.worker + i; pWorker->workerId = i; } - sdbAllocWriteQueue(); + sdbAllocQueue(); - mInfo("sdb write is opened"); + mInfo("vgId:1, sdb write is opened"); return 0; } -void sdbCleanupWriteWorker() { +static void sdbCleanupWorker() { for (int32_t i = 0; i < tsSdbPool.num; ++i) { - SSdbWriteWorker *pWorker = tsSdbPool.writeWorker + i; + SSdbWorker *pWorker = tsSdbPool.worker + i; if (pWorker->thread) { - taosQsetThreadResume(tsSdbWriteQset); + taosQsetThreadResume(tsSdbWQset); } } for (int32_t i = 0; i < tsSdbPool.num; ++i) { - SSdbWriteWorker *pWorker = tsSdbPool.writeWorker + i; + SSdbWorker *pWorker = tsSdbPool.worker + i; if (pWorker->thread) { pthread_join(pWorker->thread, NULL); } } - sdbFreeWritequeue(); - taosTFree(tsSdbPool.writeWorker); + sdbFreeQueue(); + tfree(tsSdbPool.worker); - mInfo("sdb write is closed"); + mInfo("vgId:1, sdb write is closed"); } -int32_t sdbAllocWriteQueue() { - tsSdbWriteQueue = taosOpenQueue(); - if (tsSdbWriteQueue == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; +static int32_t sdbAllocQueue() { + tsSdbWQueue = taosOpenQueue(); + if (tsSdbWQueue == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - tsSdbWriteQset = taosOpenQset(); - if (tsSdbWriteQset == NULL) { - taosCloseQueue(tsSdbWriteQueue); + tsSdbWQset = taosOpenQset(); + if (tsSdbWQset == NULL) { + taosCloseQueue(tsSdbWQueue); return TSDB_CODE_MND_OUT_OF_MEMORY; } - taosAddIntoQset(tsSdbWriteQset, tsSdbWriteQueue, NULL); + taosAddIntoQset(tsSdbWQset, tsSdbWQueue, NULL); - tsSdbWriteQall = taosAllocateQall(); - if (tsSdbWriteQall == NULL) { - taosCloseQset(tsSdbWriteQset); - taosCloseQueue(tsSdbWriteQueue); + tsSdbWQall = taosAllocateQall(); + if (tsSdbWQall == NULL) { + taosCloseQset(tsSdbWQset); + taosCloseQueue(tsSdbWQueue); return TSDB_CODE_MND_OUT_OF_MEMORY; } for (int32_t i = 0; i < tsSdbPool.num; ++i) { - SSdbWriteWorker *pWorker = tsSdbPool.writeWorker + i; + SSdbWorker *pWorker = tsSdbPool.worker + i; pWorker->workerId = i; pthread_attr_t thAttr; @@ -1010,9 +913,9 @@ int32_t sdbAllocWriteQueue() { if (pthread_create(&pWorker->thread, &thAttr, sdbWorkerFp, pWorker) != 0) { mError("failed to create thread to process sdb write queue, reason:%s", strerror(errno)); - taosFreeQall(tsSdbWriteQall); - taosCloseQset(tsSdbWriteQset); - taosCloseQueue(tsSdbWriteQueue); + taosFreeQall(tsSdbWQall); + taosCloseQset(tsSdbWQset); + taosCloseQueue(tsSdbWQueue); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -1020,85 +923,128 @@ int32_t sdbAllocWriteQueue() { mDebug("sdb write worker:%d is launched, total:%d", pWorker->workerId, tsSdbPool.num); } - mDebug("sdb write queue:%p is allocated", tsSdbWriteQueue); + mDebug("sdb write queue:%p is allocated", tsSdbWQueue); return TSDB_CODE_SUCCESS; } -void sdbFreeWritequeue() { - taosCloseQueue(tsSdbWriteQueue); - taosFreeQall(tsSdbWriteQall); - taosCloseQset(tsSdbWriteQset); - tsSdbWriteQall = NULL; - tsSdbWriteQset = NULL; - tsSdbWriteQueue = NULL; +static void sdbFreeQueue() { + taosCloseQueue(tsSdbWQueue); + taosFreeQall(tsSdbWQall); + taosCloseQset(tsSdbWQset); + tsSdbWQall = NULL; + tsSdbWQset = NULL; + tsSdbWQueue = NULL; } -int sdbWriteToQueue(void *param, void *data, int type) { - SWalHead *pHead = data; - int size = sizeof(SWalHead) + pHead->len; - SWalHead *pWal = (SWalHead *)taosAllocateQitem(size); - memcpy(pWal, pHead, size); +static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) { + SWalHead *pHead = pRow->pHead; - taosWriteQitem(tsSdbWriteQueue, type, pWal); - return 0; + if (pHead->len > TSDB_MAX_WAL_SIZE) { + sdbError("vgId:1, wal len:%d exceeds limit, hver:%" PRIu64, pHead->len, pHead->version); + taosFreeQitem(pRow); + return TSDB_CODE_WAL_SIZE_LIMIT; + } + + int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1); + if (queued > MAX_QUEUED_MSG_NUM) { + sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued); + taosMsleep(1); + } + + sdbIncRef(pRow->pTable, pRow->pObj); + + sdbTrace("vgId:1, msg:%p qtype:%s write into to sdb queue, queued:%d", pRow->pMsg, qtypeStr[qtype], queued); + taosWriteQitem(tsSdbWQueue, qtype, pRow); + + return TSDB_CODE_MND_ACTION_IN_PROGRESS; +} + +static void sdbFreeFromQueue(SSdbRow *pRow) { + int32_t queued = atomic_sub_fetch_32(&tsSdbMgmt.queuedMsg, 1); + sdbTrace("vgId:1, msg:%p free from sdb queue, queued:%d", pRow->pMsg, queued); + + sdbDecRef(pRow->pTable, pRow->pObj); + taosFreeQitem(pRow); } -static void *sdbWorkerFp(void *param) { - SWalHead *pHead; - SSdbOper *pOper; - int32_t type; - int32_t numOfMsgs; - void * item; - void * unUsed; +static int32_t sdbWriteWalToQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SWalHead *pHead = wparam; + + int32_t size = sizeof(SSdbRow) + sizeof(SWalHead) + pHead->len; + SSdbRow *pRow = taosAllocateQitem(size); + if (pRow == NULL) { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + + memcpy(pRow->pHead, pHead, sizeof(SWalHead) + pHead->len); + pRow->rowData = pRow->pHead->cont; + + return sdbWriteToQueue(pRow, qtype); +} + +static int32_t sdbWriteRowToQueue(SSdbRow *pInputRow, int32_t action) { + SSdbTable *pTable = pInputRow->pTable; + if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; + + int32_t size = sizeof(SSdbRow) + sizeof(SWalHead) + pTable->maxRowSize; + SSdbRow *pRow = taosAllocateQitem(size); + if (pRow == NULL) { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + + memcpy(pRow, pInputRow, sizeof(SSdbRow)); + pRow->processedCount = 1; + + SWalHead *pHead = pRow->pHead; + pRow->rowData = pHead->cont; + (*pTable->fpEncode)(pRow); + + pHead->len = pRow->rowSize; + pHead->version = 0; + pHead->msgType = pTable->id * 10 + action; + + return sdbWriteToQueue(pRow, TAOS_QTYPE_RPC); +} + +int32_t sdbInsertRowToQueue(SSdbRow *pRow) { return sdbWriteRowToQueue(pRow, SDB_ACTION_INSERT); } + +static void *sdbWorkerFp(void *pWorker) { + SSdbRow *pRow; + int32_t qtype; + void * unUsed; while (1) { - numOfMsgs = taosReadAllQitemsFromQset(tsSdbWriteQset, tsSdbWriteQall, &unUsed); + int32_t numOfMsgs = taosReadAllQitemsFromQset(tsSdbWQset, tsSdbWQall, &unUsed); if (numOfMsgs == 0) { - sdbDebug("qset:%p, sdb got no message from qset, exiting", tsSdbWriteQset); + sdbDebug("qset:%p, sdb got no message from qset, exiting", tsSdbWQset); break; } for (int32_t i = 0; i < numOfMsgs; ++i) { - taosGetQitem(tsSdbWriteQall, &type, &item); - if (type == TAOS_QTYPE_RPC) { - pOper = (SSdbOper *)item; - pOper->processedCount = 1; - pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK; - if (pOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s ver:%" PRIu64 ", will be processed in sdb queue", - pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj, - sdbGetKeyStr(pOper->table, pHead->cont), pHead->version); - } - } else { - pHead = (SWalHead *)item; - pOper = NULL; - } + taosGetQitem(tsSdbWQall, &qtype, (void **)&pRow); + sdbTrace("vgId:1, msg:%p, row:%p hver:%" PRIu64 ", will be processed in sdb queue", pRow->pMsg, pRow->pObj, + pRow->pHead->version); - int32_t code = sdbWrite(pOper, pHead, type); - if (code > 0) code = 0; - if (pOper) { - pOper->retCode = code; - } else { - pHead->len = code; // hackway - } + pRow->code = sdbProcessWrite((qtype == TAOS_QTYPE_RPC) ? pRow : NULL, pRow->pHead, qtype, NULL); + if (pRow->code > 0) pRow->code = 0; + + sdbTrace("vgId:1, msg:%p is processed in sdb queue, code:%x", pRow->pMsg, pRow->code); } - walFsync(tsSdbObj.wal); + walFsync(tsSdbMgmt.wal, true); // browse all items, and process them one by one - taosResetQitems(tsSdbWriteQall); + taosResetQitems(tsSdbWQall); for (int32_t i = 0; i < numOfMsgs; ++i) { - taosGetQitem(tsSdbWriteQall, &type, &item); - - if (type == TAOS_QTYPE_RPC) { - pOper = (SSdbOper *)item; - sdbConfirmForward(NULL, pOper, pOper->retCode); - } else if (type == TAOS_QTYPE_FWD) { - pHead = (SWalHead *)item; - syncConfirmForward(tsSdbObj.sync, pHead->version, pHead->len); - taosFreeQitem(item); + taosGetQitem(tsSdbWQall, &qtype, (void **)&pRow); + + if (qtype == TAOS_QTYPE_RPC) { + sdbConfirmForward(NULL, pRow, pRow->code); } else { - taosFreeQitem(item); + if (qtype == TAOS_QTYPE_FWD) { + syncConfirmForward(tsSdbMgmt.sync, pRow->pHead->version, pRow->code); + } + sdbFreeFromQueue(pRow); } } } diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 80909e99aec6d752d35042ca2d761a6e8b923441..0d53fa9617b500d61457889e2228e5e756a244ae 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -110,7 +110,7 @@ static char *mnodeGetShowType(int32_t showType) { } static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { - SCMShowMsg *pShowMsg = pMsg->rpcMsg.pCont; + SShowMsg *pShowMsg = pMsg->rpcMsg.pCont; if (pShowMsg->type >= TSDB_MGMT_TABLE_MAX) { return TSDB_CODE_MND_INVALID_MSG_TYPE; } @@ -132,8 +132,8 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_OUT_OF_MEMORY; } - int32_t size = sizeof(SCMShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE; - SCMShowRsp *pShowRsp = rpcMallocCont(size); + int32_t size = sizeof(SShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE; + SShowRsp *pShowRsp = rpcMallocCont(size); if (pShowRsp == NULL) { mnodeReleaseShowObj(pShow, true); return TSDB_CODE_MND_OUT_OF_MEMORY; @@ -146,7 +146,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { if (code == TSDB_CODE_SUCCESS) { pMsg->rpcRsp.rsp = pShowRsp; - pMsg->rpcRsp.len = sizeof(SCMShowRsp) + sizeof(SSchema) * pShow->numOfColumns; + pMsg->rpcRsp.len = sizeof(SShowRsp) + sizeof(SSchema) * pShow->numOfColumns; mnodeReleaseShowObj(pShow, false); return TSDB_CODE_SUCCESS; } else { @@ -232,12 +232,17 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) { } static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { - SCMHeartBeatRsp *pHBRsp = (SCMHeartBeatRsp *) rpcMallocCont(sizeof(SCMHeartBeatRsp)); - if (pHBRsp == NULL) { + SHeartBeatRsp *pRsp = (SHeartBeatRsp *)rpcMallocCont(sizeof(SHeartBeatRsp)); + if (pRsp == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; } - SCMHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont; + SHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont; + if (taosCheckVersion(pHBMsg->clientVer, version, 3) != TSDB_CODE_SUCCESS) { + rpcFreeCont(pRsp); + return TSDB_CODE_TSC_INVALID_VERSION; // todo change the error code + } + SRpcConnInfo connInfo = {0}; rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo); @@ -251,40 +256,40 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { if (pConn == NULL) { // do not close existing links, otherwise // mError("failed to create connId, close connect"); - // pHBRsp->killConnection = 1; + // pRsp->killConnection = 1; } else { - pHBRsp->connId = htonl(pConn->connId); + pRsp->connId = htonl(pConn->connId); mnodeSaveQueryStreamList(pConn, pHBMsg); if (pConn->killed != 0) { - pHBRsp->killConnection = 1; + pRsp->killConnection = 1; } if (pConn->streamId != 0) { - pHBRsp->streamId = htonl(pConn->streamId); + pRsp->streamId = htonl(pConn->streamId); pConn->streamId = 0; } if (pConn->queryId != 0) { - pHBRsp->queryId = htonl(pConn->queryId); + pRsp->queryId = htonl(pConn->queryId); pConn->queryId = 0; } } - pHBRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); - pHBRsp->totalDnodes = htonl(mnodeGetDnodesNum()); - mnodeGetMnodeEpSetForShell(&pHBRsp->epSet); + pRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); + pRsp->totalDnodes = htonl(mnodeGetDnodesNum()); + mnodeGetMnodeEpSetForShell(&pRsp->epSet); + + pMsg->rpcRsp.rsp = pRsp; + pMsg->rpcRsp.len = sizeof(SHeartBeatRsp); - pMsg->rpcRsp.rsp = pHBRsp; - pMsg->rpcRsp.len = sizeof(SCMHeartBeatRsp); - mnodeReleaseConn(pConn); return TSDB_CODE_SUCCESS; } static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { - SCMConnectMsg *pConnectMsg = pMsg->rpcMsg.pCont; - SCMConnectRsp *pConnectRsp = NULL; + SConnectMsg *pConnectMsg = pMsg->rpcMsg.pCont; + SConnectRsp *pConnectRsp = NULL; int32_t code = TSDB_CODE_SUCCESS; SRpcConnInfo connInfo = {0}; @@ -320,7 +325,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { mnodeDecDbRef(pDb); } - pConnectRsp = rpcMallocCont(sizeof(SCMConnectRsp)); + pConnectRsp = rpcMallocCont(sizeof(SConnectRsp)); if (pConnectRsp == NULL) { code = TSDB_CODE_MND_OUT_OF_MEMORY; goto connect_over; @@ -349,14 +354,14 @@ connect_over: } else { mLInfo("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code)); pMsg->rpcRsp.rsp = pConnectRsp; - pMsg->rpcRsp.len = sizeof(SCMConnectRsp); + pMsg->rpcRsp.len = sizeof(SConnectRsp); } return code; } static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) { - SCMUseDbMsg *pUseDbMsg = pMsg->rpcMsg.pCont; + SUseDbMsg *pUseDbMsg = pMsg->rpcMsg.pCont; int32_t code = TSDB_CODE_SUCCESS; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pUseDbMsg->db); @@ -410,7 +415,7 @@ static void mnodeFreeShowObj(void *data) { sdbFreeIter(pShow->pIter); mDebug("%p, show is destroyed, data:%p index:%d", pShow, data, pShow->index); - taosTFree(pShow); + tfree(pShow); } static void mnodeReleaseShowObj(SShowObj *pShow, bool forceRemove) { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 82a062169a591d26d665437273183a4c5e1f27b4..4d5f0808f55a3592b2ea9833f12df85ed252c9e2 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -45,6 +45,10 @@ #include "mnodeRead.h" #include "mnodePeer.h" +#define ALTER_CTABLE_RETRY_TIMES 3 +#define CREATE_CTABLE_RETRY_TIMES 10 +#define CREATE_CTABLE_RETRY_SEC 14 + static void * tsChildTableSdb; static void * tsSuperTableSdb; static int32_t tsChildTableUpdateSize; @@ -52,9 +56,9 @@ static int32_t tsSuperTableUpdateSize; static void * mnodeGetChildTable(char *tableId); static void * mnodeGetSuperTable(char *tableId); static void * mnodeGetSuperTableByUid(uint64_t uid); -static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable); -static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable); -static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable); +static void mnodeDropAllChildTablesInStable(SSTableObj *pStable); +static void mnodeAddTableIntoStable(SSTableObj *pStable, SCTableObj *pCtable); +static void mnodeRemoveTableFromStable(SSTableObj *pStable, SCTableObj *pCtable); static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn); @@ -86,22 +90,22 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg); static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg); static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg); -static int32_t mnodeFindSuperTableColumnIndex(SSuperTableObj *pStable, char *colName); +static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName); -static void mnodeDestroyChildTable(SChildTableObj *pTable) { - taosTFree(pTable->info.tableId); - taosTFree(pTable->schema); - taosTFree(pTable->sql); - taosTFree(pTable); +static void mnodeDestroyChildTable(SCTableObj *pTable) { + tfree(pTable->info.tableId); + tfree(pTable->schema); + tfree(pTable->sql); + tfree(pTable); } -static int32_t mnodeChildTableActionDestroy(SSdbOper *pOper) { - mnodeDestroyChildTable(pOper->pObj); +static int32_t mnodeChildTableActionDestroy(SSdbRow *pRow) { + mnodeDestroyChildTable(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeChildTableActionInsert(SSdbOper *pOper) { - SChildTableObj *pTable = pOper->pObj; +static int32_t mnodeChildTableActionInsert(SSdbRow *pRow) { + SCTableObj *pTable = pRow->pObj; SVgObj *pVgroup = mnodeGetVgroup(pTable->vgId); if (pVgroup == NULL) { @@ -149,8 +153,8 @@ static int32_t mnodeChildTableActionInsert(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) { - SChildTableObj *pTable = pOper->pObj; +static int32_t mnodeChildTableActionDelete(SSdbRow *pRow) { + SCTableObj *pTable = pRow->pObj; if (pTable->vgId == 0) { return TSDB_CODE_MND_VGROUP_NOT_EXIST; } @@ -185,9 +189,9 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeChildTableActionUpdate(SSdbOper *pOper) { - SChildTableObj *pNew = pOper->pObj; - SChildTableObj *pTable = mnodeGetChildTable(pNew->info.tableId); +static int32_t mnodeChildTableActionUpdate(SSdbRow *pRow) { + SCTableObj *pNew = pRow->pObj; + SCTableObj *pTable = mnodeGetChildTable(pNew->info.tableId); if (pTable != pNew) { void *oldTableId = pTable->info.tableId; void *oldSql = pTable->sql; @@ -195,7 +199,7 @@ static int32_t mnodeChildTableActionUpdate(SSdbOper *pOper) { void *oldSTable = pTable->superTable; int32_t oldRefCount = pTable->refCount; - memcpy(pTable, pNew, sizeof(SChildTableObj)); + memcpy(pTable, pNew, sizeof(SCTableObj)); pTable->refCount = oldRefCount; pTable->sql = pNew->sql; @@ -212,50 +216,50 @@ static int32_t mnodeChildTableActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeChildTableActionEncode(SSdbOper *pOper) { - SChildTableObj *pTable = pOper->pObj; - assert(pTable != NULL && pOper->rowData != NULL); +static int32_t mnodeChildTableActionEncode(SSdbRow *pRow) { + SCTableObj *pTable = pRow->pObj; + assert(pTable != NULL && pRow->rowData != NULL); int32_t len = strlen(pTable->info.tableId); if (len >= TSDB_TABLE_FNAME_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID; - memcpy(pOper->rowData, pTable->info.tableId, len); - memset(pOper->rowData + len, 0, 1); + memcpy(pRow->rowData, pTable->info.tableId, len); + memset(pRow->rowData + len, 0, 1); len++; - memcpy(pOper->rowData + len, (char*)pTable + sizeof(char *), tsChildTableUpdateSize); + memcpy(pRow->rowData + len, (char*)pTable + sizeof(char *), tsChildTableUpdateSize); len += tsChildTableUpdateSize; if (pTable->info.type != TSDB_CHILD_TABLE) { int32_t schemaSize = pTable->numOfColumns * sizeof(SSchema); - memcpy(pOper->rowData + len, pTable->schema, schemaSize); + memcpy(pRow->rowData + len, pTable->schema, schemaSize); len += schemaSize; if (pTable->sqlLen != 0) { - memcpy(pOper->rowData + len, pTable->sql, pTable->sqlLen); + memcpy(pRow->rowData + len, pTable->sql, pTable->sqlLen); len += pTable->sqlLen; } } - pOper->rowSize = len; + pRow->rowSize = len; return TSDB_CODE_SUCCESS; } -static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) { - assert(pOper->rowData != NULL); - SChildTableObj *pTable = calloc(1, sizeof(SChildTableObj)); +static int32_t mnodeChildTableActionDecode(SSdbRow *pRow) { + assert(pRow->rowData != NULL); + SCTableObj *pTable = calloc(1, sizeof(SCTableObj)); if (pTable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - int32_t len = strlen(pOper->rowData); + int32_t len = strlen(pRow->rowData); if (len >= TSDB_TABLE_FNAME_LEN) { free(pTable); return TSDB_CODE_MND_INVALID_TABLE_ID; } - pTable->info.tableId = strdup(pOper->rowData); + pTable->info.tableId = strdup(pRow->rowData); len++; - memcpy((char*)pTable + sizeof(char *), pOper->rowData + len, tsChildTableUpdateSize); + memcpy((char*)pTable + sizeof(char *), pRow->rowData + len, tsChildTableUpdateSize); len += tsChildTableUpdateSize; if (pTable->info.type != TSDB_CHILD_TABLE) { @@ -265,7 +269,7 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) { mnodeDestroyChildTable(pTable); return TSDB_CODE_MND_INVALID_TABLE_TYPE; } - memcpy(pTable->schema, pOper->rowData + len, schemaSize); + memcpy(pTable->schema, pRow->rowData + len, schemaSize); len += schemaSize; if (pTable->sqlLen != 0) { @@ -274,17 +278,17 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) { mnodeDestroyChildTable(pTable); return TSDB_CODE_MND_OUT_OF_MEMORY; } - memcpy(pTable->sql, pOper->rowData + len, pTable->sqlLen); + memcpy(pTable->sql, pRow->rowData + len, pTable->sqlLen); } } - pOper->pObj = pTable; + pRow->pObj = pTable; return TSDB_CODE_SUCCESS; } static int32_t mnodeChildTableActionRestored() { void *pIter = NULL; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; while (1) { pIter = mnodeGetNextChildTable(pIter, &pTable); @@ -293,7 +297,7 @@ static int32_t mnodeChildTableActionRestored() { SDbObj *pDb = mnodeGetDbByTableId(pTable->info.tableId); if (pDb == NULL || pDb->status != TSDB_DB_STATUS_READY) { mError("ctable:%s, failed to get db or db in dropping, discard it", pTable->info.tableId); - SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; + SSdbRow desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); mnodeDecDbRef(pDb); @@ -305,7 +309,7 @@ static int32_t mnodeChildTableActionRestored() { if (pVgroup == NULL) { mError("ctable:%s, failed to get vgId:%d tid:%d, discard it", pTable->info.tableId, pTable->vgId, pTable->tid); pTable->vgId = 0; - SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; + SSdbRow desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); continue; @@ -316,18 +320,18 @@ static int32_t mnodeChildTableActionRestored() { mError("ctable:%s, db:%s not match with vgId:%d db:%s sid:%d, discard it", pTable->info.tableId, pDb->name, pTable->vgId, pVgroup->dbName, pTable->tid); pTable->vgId = 0; - SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; + SSdbRow desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); continue; } if (pTable->info.type == TSDB_CHILD_TABLE) { - SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid); + SSTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid); if (pSuperTable == NULL) { mError("ctable:%s, stable:%" PRIu64 " not exist", pTable->info.tableId, pTable->suid); pTable->vgId = 0; - SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; + SSdbRow desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); continue; @@ -344,23 +348,23 @@ static int32_t mnodeChildTableActionRestored() { } static int32_t mnodeInitChildTables() { - SChildTableObj tObj; + SCTableObj tObj; tsChildTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj.info.type; SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_CTABLE, - .tableName = "ctables", + .id = SDB_TABLE_CTABLE, + .name = "ctables", .hashSessions = TSDB_DEFAULT_CTABLES_HASH_SIZE, - .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN + TSDB_CQ_SQL_SIZE, + .maxRowSize = sizeof(SCTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN + TSDB_CQ_SQL_SIZE, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, - .insertFp = mnodeChildTableActionInsert, - .deleteFp = mnodeChildTableActionDelete, - .updateFp = mnodeChildTableActionUpdate, - .encodeFp = mnodeChildTableActionEncode, - .decodeFp = mnodeChildTableActionDecode, - .destroyFp = mnodeChildTableActionDestroy, - .restoredFp = mnodeChildTableActionRestored + .fpInsert = mnodeChildTableActionInsert, + .fpDelete = mnodeChildTableActionDelete, + .fpUpdate = mnodeChildTableActionUpdate, + .fpEncode = mnodeChildTableActionEncode, + .fpDecode = mnodeChildTableActionDecode, + .fpDestroy = mnodeChildTableActionDestroy, + .fpRestored = mnodeChildTableActionRestored }; tsChildTableSdb = sdbOpenTable(&tableDesc); @@ -386,7 +390,7 @@ int64_t mnodeGetChildTableNum() { return sdbGetNumOfRows(tsChildTableSdb); } -static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { +static void mnodeAddTableIntoStable(SSTableObj *pStable, SCTableObj *pCtable) { atomic_add_fetch_32(&pStable->numOfTables, 1); if (pStable->vgHash == NULL) { @@ -402,7 +406,7 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt } } -static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { +static void mnodeRemoveTableFromStable(SSTableObj *pStable, SCTableObj *pCtable) { atomic_sub_fetch_32(&pStable->numOfTables, 1); if (pStable->vgHash == NULL) return; @@ -416,23 +420,23 @@ static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj * mnodeDecVgroupRef(pVgroup); } -static void mnodeDestroySuperTable(SSuperTableObj *pStable) { +static void mnodeDestroySuperTable(SSTableObj *pStable) { if (pStable->vgHash != NULL) { taosHashCleanup(pStable->vgHash); pStable->vgHash = NULL; } - taosTFree(pStable->info.tableId); - taosTFree(pStable->schema); - taosTFree(pStable); + tfree(pStable->info.tableId); + tfree(pStable->schema); + tfree(pStable); } -static int32_t mnodeSuperTableActionDestroy(SSdbOper *pOper) { - mnodeDestroySuperTable(pOper->pObj); +static int32_t mnodeSuperTableActionDestroy(SSdbRow *pRow) { + mnodeDestroySuperTable(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeSuperTableActionInsert(SSdbOper *pOper) { - SSuperTableObj *pStable = pOper->pObj; +static int32_t mnodeSuperTableActionInsert(SSdbRow *pRow) { + SSTableObj *pStable = pRow->pObj; SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId); if (pDb != NULL && pDb->status == TSDB_DB_STATUS_READY) { mnodeAddSuperTableIntoDb(pDb); @@ -442,21 +446,21 @@ static int32_t mnodeSuperTableActionInsert(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeSuperTableActionDelete(SSdbOper *pOper) { - SSuperTableObj *pStable = pOper->pObj; +static int32_t mnodeSuperTableActionDelete(SSdbRow *pRow) { + SSTableObj *pStable = pRow->pObj; SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId); if (pDb != NULL) { mnodeRemoveSuperTableFromDb(pDb); - mnodeDropAllChildTablesInStable((SSuperTableObj *)pStable); + mnodeDropAllChildTablesInStable((SSTableObj *)pStable); } mnodeDecDbRef(pDb); return TSDB_CODE_SUCCESS; } -static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) { - SSuperTableObj *pNew = pOper->pObj; - SSuperTableObj *pTable = mnodeGetSuperTable(pNew->info.tableId); +static int32_t mnodeSuperTableActionUpdate(SSdbRow *pRow) { + SSTableObj *pNew = pRow->pObj; + SSTableObj *pTable = mnodeGetSuperTable(pNew->info.tableId); if (pTable != NULL && pTable != pNew) { void *oldTableId = pTable->info.tableId; void *oldSchema = pTable->schema; @@ -464,7 +468,7 @@ static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) { int32_t oldRefCount = pTable->refCount; int32_t oldNumOfTables = pTable->numOfTables; - memcpy(pTable, pNew, sizeof(SSuperTableObj)); + memcpy(pTable, pNew, sizeof(SSTableObj)); pTable->vgHash = oldVgHash; pTable->refCount = oldRefCount; @@ -479,43 +483,43 @@ static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeSuperTableActionEncode(SSdbOper *pOper) { - SSuperTableObj *pStable = pOper->pObj; - assert(pOper->pObj != NULL && pOper->rowData != NULL); +static int32_t mnodeSuperTableActionEncode(SSdbRow *pRow) { + SSTableObj *pStable = pRow->pObj; + assert(pRow->pObj != NULL && pRow->rowData != NULL); int32_t len = strlen(pStable->info.tableId); if (len >= TSDB_TABLE_FNAME_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID; - memcpy(pOper->rowData, pStable->info.tableId, len); - memset(pOper->rowData + len, 0, 1); + memcpy(pRow->rowData, pStable->info.tableId, len); + memset(pRow->rowData + len, 0, 1); len++; - memcpy(pOper->rowData + len, (char*)pStable + sizeof(char *), tsSuperTableUpdateSize); + memcpy(pRow->rowData + len, (char*)pStable + sizeof(char *), tsSuperTableUpdateSize); len += tsSuperTableUpdateSize; int32_t schemaSize = sizeof(SSchema) * (pStable->numOfColumns + pStable->numOfTags); - memcpy(pOper->rowData + len, pStable->schema, schemaSize); + memcpy(pRow->rowData + len, pStable->schema, schemaSize); len += schemaSize; - pOper->rowSize = len; + pRow->rowSize = len; return TSDB_CODE_SUCCESS; } -static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) { - assert(pOper->rowData != NULL); - SSuperTableObj *pStable = (SSuperTableObj *) calloc(1, sizeof(SSuperTableObj)); +static int32_t mnodeSuperTableActionDecode(SSdbRow *pRow) { + assert(pRow->rowData != NULL); + SSTableObj *pStable = (SSTableObj *) calloc(1, sizeof(SSTableObj)); if (pStable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - int32_t len = strlen(pOper->rowData); + int32_t len = strlen(pRow->rowData); if (len >= TSDB_TABLE_FNAME_LEN){ free(pStable); return TSDB_CODE_MND_INVALID_TABLE_ID; } - pStable->info.tableId = strdup(pOper->rowData); + pStable->info.tableId = strdup(pRow->rowData); len++; - memcpy((char*)pStable + sizeof(char *), pOper->rowData + len, tsSuperTableUpdateSize); + memcpy((char*)pStable + sizeof(char *), pRow->rowData + len, tsSuperTableUpdateSize); len += tsSuperTableUpdateSize; int32_t schemaSize = sizeof(SSchema) * (pStable->numOfColumns + pStable->numOfTags); @@ -525,9 +529,9 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) { return TSDB_CODE_MND_NOT_SUPER_TABLE; } - memcpy(pStable->schema, pOper->rowData + len, schemaSize); + memcpy(pStable->schema, pRow->rowData + len, schemaSize); - pOper->pObj = pStable; + pRow->pObj = pStable; return TSDB_CODE_SUCCESS; } @@ -537,23 +541,23 @@ static int32_t mnodeSuperTableActionRestored() { } static int32_t mnodeInitSuperTables() { - SSuperTableObj tObj; + SSTableObj tObj; tsSuperTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj.info.type; SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_STABLE, - .tableName = "stables", + .id = SDB_TABLE_STABLE, + .name = "stables", .hashSessions = TSDB_DEFAULT_STABLES_HASH_SIZE, - .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN, + .maxRowSize = sizeof(SSTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, - .insertFp = mnodeSuperTableActionInsert, - .deleteFp = mnodeSuperTableActionDelete, - .updateFp = mnodeSuperTableActionUpdate, - .encodeFp = mnodeSuperTableActionEncode, - .decodeFp = mnodeSuperTableActionDecode, - .destroyFp = mnodeSuperTableActionDestroy, - .restoredFp = mnodeSuperTableActionRestored + .fpInsert = mnodeSuperTableActionInsert, + .fpDelete = mnodeSuperTableActionDelete, + .fpUpdate = mnodeSuperTableActionUpdate, + .fpEncode = mnodeSuperTableActionEncode, + .fpDecode = mnodeSuperTableActionDecode, + .fpDestroy = mnodeSuperTableActionDestroy, + .fpRestored = mnodeSuperTableActionRestored }; tsSuperTableSdb = sdbOpenTable(&tableDesc); @@ -615,7 +619,7 @@ static void *mnodeGetSuperTable(char *tableId) { } static void *mnodeGetSuperTableByUid(uint64_t uid) { - SSuperTableObj *pStable = NULL; + SSTableObj *pStable = NULL; void *pIter = NULL; while (1) { @@ -647,11 +651,11 @@ void *mnodeGetTable(char *tableId) { return NULL; } -void *mnodeGetNextChildTable(void *pIter, SChildTableObj **pTable) { +void *mnodeGetNextChildTable(void *pIter, SCTableObj **pTable) { return sdbFetchRow(tsChildTableSdb, pIter, (void **)pTable); } -void *mnodeGetNextSuperTable(void *pIter, SSuperTableObj **pTable) { +void *mnodeGetNextSuperTable(void *pIter, SSTableObj **pTable) { return sdbFetchRow(tsSuperTableSdb, pIter, (void **)pTable); } @@ -699,7 +703,7 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreate->db); if (pMsg->pDb == NULL) { - mError("app:%p:%p, table:%s, failed to create, db not selected", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } @@ -711,24 +715,24 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId); if (pMsg->pTable != NULL && pMsg->retry == 0) { if (pCreate->getMeta) { - mDebug("app:%p:%p, table:%s, continue to get meta", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return mnodeGetChildTableMeta(pMsg); } else if (pCreate->igExists) { - mDebug("app:%p:%p, table:%s, is already exist", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_SUCCESS; } else { - mError("app:%p:%p, table:%s, failed to create, table already exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_TABLE_ALREADY_EXIST; } } if (pCreate->numOfTags != 0) { - mDebug("app:%p:%p, table:%s, create stable msg is received from thandle:%p", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, pMsg->rpcMsg.handle); return mnodeProcessCreateSuperTableMsg(pMsg); } else { - mDebug("app:%p:%p, table:%s, create ctable msg is received from thandle:%p", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, pMsg->rpcMsg.handle); return mnodeProcessCreateChildTableMsg(pMsg); } @@ -738,17 +742,18 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableId); if (pMsg->pDb == NULL) { - mError("app:%p:%p, table:%s, failed to drop table, db not selected or db in dropping", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + mError("msg:%p, app:%p table:%s, failed to drop table, db not selected or db in dropping", pMsg, + pMsg->rpcMsg.ahandle, pDrop->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } - + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); return TSDB_CODE_MND_DB_IN_DROPPING; } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { - mError("app:%p:%p, table:%s, failed to drop table, in monitor database", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId); return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN; } @@ -756,36 +761,36 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableId); if (pMsg->pTable == NULL) { if (pDrop->igNotExists) { - mDebug("app:%p:%p, table:%s, table is not exist, treat as success", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + mDebug("msg:%p, app:%p table:%s is not exist, treat as success", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId); return TSDB_CODE_SUCCESS; } else { - mError("app:%p:%p, table:%s, failed to drop table, table not exist", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + mError("msg:%p, app:%p table:%s, failed to drop, table not exist", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId); return TSDB_CODE_MND_INVALID_TABLE_NAME; } } if (pMsg->pTable->type == TSDB_SUPER_TABLE) { - SSuperTableObj *pSTable = (SSuperTableObj *)pMsg->pTable; - mInfo("app:%p:%p, table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", - pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId, pSTable->uid, pSTable->numOfTables, (int32_t)taosHashGetSize(pSTable->vgHash)); + SSTableObj *pSTable = (SSTableObj *)pMsg->pTable; + mInfo("msg:%p, app:%p table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", + pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId, pSTable->uid, pSTable->numOfTables, (int32_t)taosHashGetSize(pSTable->vgHash)); return mnodeProcessDropSuperTableMsg(pMsg); } else { - SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable; - mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d tid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, + SCTableObj *pCTable = (SCTableObj *)pMsg->pTable; + mInfo("msg:%p, app:%p table:%s, start to drop ctable, vgId:%d tid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId, pCTable->vgId, pCTable->tid, pCTable->uid); return mnodeProcessDropChildTableMsg(pMsg); } } static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { - SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; + STableInfoMsg *pInfo = pMsg->rpcMsg.pCont; pInfo->createFlag = htons(pInfo->createFlag); - mDebug("app:%p:%p, table:%s, table meta msg is received from thandle:%p, createFlag:%d", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, table meta msg is received from thandle:%p, createFlag:%d", pMsg, pMsg->rpcMsg.ahandle, pInfo->tableId, pMsg->rpcMsg.handle, pInfo->createFlag); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableId); if (pMsg->pDb == NULL) { - mError("app:%p:%p, table:%s, failed to get table meta, db not selected", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to get table meta, db not selected", pMsg, pMsg->rpcMsg.ahandle, pInfo->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } @@ -798,11 +803,11 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableId); if (pMsg->pTable == NULL) { if (!pInfo->createFlag) { - mError("app:%p:%p, table:%s, failed to get table meta, table not exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to get table meta, table not exist", pMsg, pMsg->rpcMsg.ahandle, pInfo->tableId); return TSDB_CODE_MND_INVALID_TABLE_NAME; } else { - mDebug("app:%p:%p, table:%s, failed to get table meta, start auto create table ", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, failed to get table meta, start auto create table ", pMsg, pMsg->rpcMsg.ahandle, pInfo->tableId); return mnodeAutoCreateChildTable(pMsg); } @@ -816,15 +821,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { } static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pTable = (SSTableObj *)pMsg->pTable; assert(pTable); if (code == TSDB_CODE_SUCCESS) { mLInfo("stable:%s, is created in sdb, uid:%" PRIu64, pTable->info.tableId, pTable->uid); } else { - mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mError("msg:%p, app:%p stable:%s, failed to create in sdb, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, tstrerror(code)); - SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb}; + SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .pTable = tsSuperTableSdb}; sdbDeleteRow(&desc); } @@ -835,9 +840,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; - SSuperTableObj * pStable = calloc(1, sizeof(SSuperTableObj)); + SSTableObj * pStable = calloc(1, sizeof(SSTableObj)); if (pStable == NULL) { - mError("app:%p:%p, table:%s, failed to create, no enough memory", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -856,7 +861,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pStable->schema = (SSchema *)calloc(1, schemaSize); if (pStable->schema == NULL) { free(pStable); - mError("app:%p:%p, table:%s, failed to create, no schema input", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_INVALID_TABLE_NAME; } memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema)); @@ -874,31 +879,31 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pMsg->pTable = (STableObj *)pStable; mnodeIncTableRef(pMsg->pTable); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .rowSize = sizeof(SSuperTableObj) + schemaSize, - .pMsg = pMsg, - .writeCb = mnodeCreateSuperTableCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .rowSize = sizeof(SSTableObj) + schemaSize, + .pMsg = pMsg, + .fpRsp = mnodeCreateSuperTableCb }; - int32_t code = sdbInsertRow(&oper); + int32_t code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mnodeDestroySuperTable(pStable); pMsg->pTable = NULL; - mError("app:%p:%p, table:%s, failed to create, sdb error", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to create, sdb error", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); } return code; } static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pTable = (SSTableObj *)pMsg->pTable; if (code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, stable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId); + mError("msg:%p, app:%p stable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); } else { - mLInfo("app:%p:%p, stable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId); + mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); } return code; @@ -907,7 +912,7 @@ static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) { SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash); while (taosHashIterNext(pIter)) { @@ -915,16 +920,16 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { SVgObj *pVgroup = mnodeGetVgroup(*pVgId); if (pVgroup == NULL) break; - SMDDropSTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropSTableMsg)); - pDrop->contLen = htonl(sizeof(SMDDropSTableMsg)); + SDropSTableMsg *pDrop = rpcMallocCont(sizeof(SDropSTableMsg)); + pDrop->contLen = htonl(sizeof(SDropSTableMsg)); pDrop->vgId = htonl(pVgroup->vgId); pDrop->uid = htobe64(pStable->uid); mnodeExtractTableName(pStable->info.tableId, pDrop->tableId); - mInfo("app:%p:%p, stable:%s, send drop stable msg to vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, - pVgroup->vgId); + mInfo("msg:%p, app:%p stable:%s, send drop stable msg to vgId:%d", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, pVgroup->vgId); SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pVgroup); - SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SMDDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE}; + SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE}; dnodeSendMsgToDnode(&epSet, &rpcMsg); mnodeDecVgroupRef(pVgroup); } @@ -933,24 +938,24 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { mnodeDropAllChildTablesInStable(pStable); } - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeDropSuperTableCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeDropSuperTableCb }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mError("app:%p:%p, table:%s, failed to drop, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mError("msg:%p, app:%p table:%s, failed to drop, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); } return code; } -static int32_t mnodeFindSuperTableTagIndex(SSuperTableObj *pStable, const char *tagName) { +static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagName) { SSchema *schema = (SSchema *) pStable->schema; for (int32_t tag = 0; tag < pStable->numOfTags; tag++) { if (strcasecmp(schema[pStable->numOfColumns + tag].name, tagName) == 0) { @@ -962,30 +967,30 @@ static int32_t mnodeFindSuperTableTagIndex(SSuperTableObj *pStable, const char * } static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, add tag result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, add tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeAddSuperTableTag(SMnodeMsg *pMsg, SSchema schema[], int32_t ntags) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; if (pStable->numOfTags + ntags > TSDB_MAX_TAGS) { - mError("app:%p:%p, stable:%s, add tag, too many tags", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId); + mError("msg:%p, app:%p stable:%s, add tag, too many tags", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId); return TSDB_CODE_MND_TOO_MANY_TAGS; } for (int32_t i = 0; i < ntags; i++) { if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) { - mError("app:%p:%p, stable:%s, add tag, column:%s already exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p stable:%s, add tag, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, schema[i].name); return TSDB_CODE_MND_TAG_ALREAY_EXIST; } if (mnodeFindSuperTableTagIndex(pStable, schema[i].name) > 0) { - mError("app:%p:%p, stable:%s, add tag, tag:%s already exist", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, - schema[i].name); + mError("msg:%p, app:%p stable:%s, add tag, tag:%s already exist", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, schema[i].name); return TSDB_CODE_MND_FIELD_ALREAY_EXIST; } } @@ -1003,32 +1008,32 @@ static int32_t mnodeAddSuperTableTag(SMnodeMsg *pMsg, SSchema schema[], int32_t pStable->numOfTags += ntags; pStable->tversion++; - mInfo("app:%p:%p, stable %s, start to add tag %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mInfo("msg:%p, app:%p stable %s, start to add tag %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, schema[0].name); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeAddSuperTableTagCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeAddSuperTableTagCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, drop tag result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeDropSuperTableTag(SMnodeMsg *pMsg, char *tagName) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; int32_t col = mnodeFindSuperTableTagIndex(pStable, tagName); if (col < 0) { - mError("app:%p:%p, stable:%s, drop tag, tag:%s not exist", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mError("msg:%p, app:%p stable:%s, drop tag, tag:%s not exist", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tagName); return TSDB_CODE_MND_TAG_NOT_EXIST; } @@ -1038,31 +1043,31 @@ static int32_t mnodeDropSuperTableTag(SMnodeMsg *pMsg, char *tagName) { pStable->numOfTags--; pStable->tversion++; - mInfo("app:%p:%p, stable %s, start to drop tag %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, tagName); + mInfo("msg:%p, app:%p stable %s, start to drop tag %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tagName); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeDropSuperTableTagCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeDropSuperTableTagCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, modify tag result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeModifySuperTableTagName(SMnodeMsg *pMsg, char *oldTagName, char *newTagName) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; int32_t col = mnodeFindSuperTableTagIndex(pStable, oldTagName); if (col < 0) { - mError("app:%p:%p, stable:%s, failed to modify table tag, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p stable:%s, failed to modify table tag, oldName: %s, newName: %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, oldTagName, newTagName); return TSDB_CODE_MND_TAG_NOT_EXIST; } @@ -1081,21 +1086,21 @@ static int32_t mnodeModifySuperTableTagName(SMnodeMsg *pMsg, char *oldTagName, c SSchema *schema = (SSchema *) (pStable->schema + pStable->numOfColumns + col); tstrncpy(schema->name, newTagName, sizeof(schema->name)); - mInfo("app:%p:%p, stable %s, start to modify tag %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mInfo("msg:%p, app:%p stable %s, start to modify tag %s to %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, oldTagName, newTagName); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeModifySuperTableTagNameCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeModifySuperTableTagNameCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } -static int32_t mnodeFindSuperTableColumnIndex(SSuperTableObj *pStable, char *colName) { +static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName) { SSchema *schema = (SSchema *) pStable->schema; for (int32_t col = 0; col < pStable->numOfColumns; col++) { if (strcasecmp(schema[col].name, colName) == 0) { @@ -1107,29 +1112,29 @@ static int32_t mnodeFindSuperTableColumnIndex(SSuperTableObj *pStable, char *col } static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, add column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32_t ncols) { SDbObj *pDb = pMsg->pDb; - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; if (ncols <= 0) { - mError("app:%p:%p, stable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, ncols); + mError("msg:%p, app:%p stable:%s, add column, ncols:%d <= 0", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, ncols); return TSDB_CODE_MND_APP_ERROR; } for (int32_t i = 0; i < ncols; i++) { if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) { - mError("app:%p:%p, stable:%s, add column, column:%s already exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, schema[i].name); return TSDB_CODE_MND_FIELD_ALREAY_EXIST; } if (mnodeFindSuperTableTagIndex(pStable, schema[i].name) > 0) { - mError("app:%p:%p, stable:%s, add column, tag:%s already exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p stable:%s, add column, tag:%s already exist", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, schema[i].name); return TSDB_CODE_MND_TAG_ALREAY_EXIST; } @@ -1156,32 +1161,32 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32 mnodeDecAcctRef(pAcct); } - mInfo("app:%p:%p, stable %s, start to add column", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId); + mInfo("msg:%p, app:%p stable %s, start to add column", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeAddSuperTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeAddSuperTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, delete column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) { SDbObj *pDb = pMsg->pDb; - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; int32_t col = mnodeFindSuperTableColumnIndex(pStable, colName); if (col <= 0) { - mError("app:%p:%p, stable:%s, drop column, column:%s not exist", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mError("msg:%p, app:%p stable:%s, drop column, column:%s not exist", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, colName); return TSDB_CODE_MND_FIELD_NOT_EXIST; } @@ -1201,31 +1206,31 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) { mnodeDecAcctRef(pAcct); } - mInfo("app:%p:%p, stable %s, start to delete column", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId); + mInfo("msg:%p, app:%p stable %s, start to delete column", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeDropSuperTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeDropSuperTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, stable %s, change column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); return code; } static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) { - SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName); if (col < 0) { - mError("app:%p:%p, stable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p stable:%s, change column, oldName:%s, newName:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, oldName, newName); return TSDB_CODE_MND_FIELD_NOT_EXIST; } @@ -1244,18 +1249,18 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char SSchema *schema = (SSchema *) (pStable->schema + col); tstrncpy(schema->name, newName, sizeof(schema->name)); - mInfo("app:%p:%p, stable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, + mInfo("msg:%p, app:%p stable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, oldName, newName); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsSuperTableSdb, - .pObj = pStable, - .pMsg = pMsg, - .writeCb = mnodeChangeSuperTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeChangeSuperTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } // show super tables @@ -1321,7 +1326,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, int32_t numOfRows = 0; char * pWrite; int32_t cols = 0; - SSuperTableObj *pTable = NULL; + SSTableObj *pTable = NULL; char prefix[64] = {0}; int32_t prefixLen; @@ -1399,7 +1404,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, void mnodeDropAllSuperTables(SDbObj *pDropDb) { void * pIter= NULL; int32_t numOfTables = 0; - SSuperTableObj *pTable = NULL; + SSTableObj *pTable = NULL; char prefix[64] = {0}; tstrncpy(prefix, pDropDb->name, 64); @@ -1413,12 +1418,12 @@ void mnodeDropAllSuperTables(SDbObj *pDropDb) { if (pTable == NULL) break; if (strncmp(prefix, pTable->info.tableId, prefixLen) == 0) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsSuperTableSdb, - .pObj = pTable, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsSuperTableSdb, + .pObj = pTable, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfTables ++; } @@ -1430,7 +1435,7 @@ void mnodeDropAllSuperTables(SDbObj *pDropDb) { mInfo("db:%s, all super tables:%d is dropped from sdb", pDropDb->name, numOfTables); } -static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pTable) { +static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSTableObj *pTable) { int32_t numOfCols = pTable->numOfColumns + pTable->numOfTags; assert(numOfCols <= TSDB_MAX_COLUMNS); @@ -1446,7 +1451,7 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT } static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) { - SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; + SSTableObj *pTable = (SSTableObj *)pMsg->pTable; STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)); if (pMeta == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; @@ -1466,45 +1471,45 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) { pMsg->rpcRsp.rsp = pMeta; - mDebug("app:%p:%p, stable:%s, uid:%" PRIu64 " table meta is retrieved", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p stable:%s, uid:%" PRIu64 " table meta is retrieved", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->uid); return TSDB_CODE_SUCCESS; } static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { - SCMSTableVgroupMsg *pInfo = pMsg->rpcMsg.pCont; + SSTableVgroupMsg *pInfo = pMsg->rpcMsg.pCont; int32_t numOfTable = htonl(pInfo->numOfTables); // reserve space - int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupMsg) + sizeof(SVgroupsMsg); + int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg); for (int32_t i = 0; i < numOfTable; ++i) { - char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN) * i; - SSuperTableObj *pTable = mnodeGetSuperTable(stableName); + char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; + SSTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable != NULL && pTable->vgHash != NULL) { - contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupMsg) + sizeof(SVgroupsMsg)); - } - + contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg)); + } + mnodeDecTableRef(pTable); } - SCMSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen); + SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; } pRsp->numOfTables = 0; - char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg); + char *msg = (char *)pRsp + sizeof(SSTableVgroupRspMsg); for (int32_t i = 0; i < numOfTable; ++i) { - char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; - SSuperTableObj *pTable = mnodeGetSuperTable(stableName); + char * stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; + SSTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable == NULL) { - mError("app:%p:%p, stable:%s, not exist while get stable vgroup info", pMsg->rpcMsg.ahandle, pMsg, stableName); + mError("msg:%p, app:%p stable:%s, not exist while get stable vgroup info", pMsg, pMsg->rpcMsg.ahandle, stableName); mnodeDecTableRef(pTable); continue; } if (pTable->vgHash == NULL) { - mDebug("app:%p:%p, stable:%s, no vgroup exist while get stable vgroup info", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p stable:%s, no vgroup exist while get stable vgroup info", pMsg, pMsg->rpcMsg.ahandle, stableName); mnodeDecTableRef(pTable); @@ -1548,7 +1553,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { pVgroupMsg->numOfVgroups = htonl(vgSize); // one table is done, try the next table - msg += sizeof(SVgroupsMsg) + vgSize * sizeof(SCMVgroupMsg); + msg += sizeof(SVgroupsMsg) + vgSize * sizeof(SVgroupMsg); pRsp->numOfTables++; } } @@ -1569,7 +1574,7 @@ static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg) { mInfo("drop stable rsp received, result:%s", tstrerror(rpcMsg->code)); } -static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableObj *pTable) { +static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SCTableObj *pTable) { STagData * pTagData = NULL; int32_t tagDataLen = 0; int32_t totalCols = 0; @@ -1643,10 +1648,10 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO } static int32_t mnodeDoCreateChildTableFp(SMnodeMsg *pMsg) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; assert(pTable); - mDebug("app:%p:%p, table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid); SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; @@ -1669,28 +1674,28 @@ static int32_t mnodeDoCreateChildTableFp(SMnodeMsg *pMsg) { } static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; assert(pTable); if (code == TSDB_CODE_SUCCESS) { if (pCreate->getMeta) { - mDebug("app:%p:%p, table:%s, created in dnode and continue to get meta, thandle:%p", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pMsg->rpcMsg.handle); + mDebug("msg:%p, app:%p table:%s, created in dnode and continue to get meta, thandle:%p", pMsg, + pMsg->rpcMsg.ahandle, pTable->info.tableId, pMsg->rpcMsg.handle); pMsg->retry = 0; - dnodeReprocessMnodeWriteMsg(pMsg); + dnodeReprocessMWriteMsg(pMsg); } else { - mDebug("app:%p:%p, table:%s, created in dnode, thandle:%p", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mDebug("msg:%p, app:%p table:%s, created in dnode, thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pMsg->rpcMsg.handle); - dnodeSendRpcMnodeWriteRsp(pMsg, TSDB_CODE_SUCCESS); + dnodeSendRpcMWriteRsp(pMsg, TSDB_CODE_SUCCESS); } return TSDB_CODE_MND_ACTION_IN_PROGRESS; } else { - mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->tid, pTable->uid, tstrerror(code)); - SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb}; + mError("msg:%p, app:%p table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg, + pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->tid, pTable->uid, tstrerror(code)); + SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); return code; } @@ -1699,9 +1704,9 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { SVgObj *pVgroup = pMsg->pVgroup; SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; - SChildTableObj *pTable = calloc(1, sizeof(SChildTableObj)); + SCTableObj *pTable = calloc(1, sizeof(SCTableObj)); if (pTable == NULL) { - mError("app:%p:%p, table:%s, failed to alloc memory", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -1720,7 +1725,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { STagData *pTagData = (STagData *)pCreate->schema; // it is a tag key if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(pTagData->name); if (pMsg->pSTable == NULL) { - mError("app:%p:%p, table:%s, corresponding super table:%s does not exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, corresponding super table:%s does not exist", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, pTagData->name); mnodeDestroyChildTable(pTable); return TSDB_CODE_MND_INVALID_TABLE_NAME; @@ -1768,7 +1773,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { } memcpy(pTable->sql, (char *) (pCreate->schema) + numOfCols * sizeof(SSchema), pTable->sqlLen); pTable->sql[pTable->sqlLen - 1] = 0; - mDebug("app:%p:%p, table:%s, stream sql len:%d sql:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mDebug("msg:%p, app:%p table:%s, stream sql len:%d sql:%s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->sqlLen, pTable->sql); } } @@ -1776,22 +1781,22 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { pMsg->pTable = (STableObj *)pTable; mnodeIncTableRef(pMsg->pTable); - SSdbOper desc = { - .type = SDB_OPER_GLOBAL, - .pObj = pTable, - .table = tsChildTableSdb, - .pMsg = pMsg, - .reqFp = mnodeDoCreateChildTableFp + SSdbRow desc = { + .type = SDB_OPER_GLOBAL, + .pObj = pTable, + .pTable = tsChildTableSdb, + .pMsg = pMsg, + .fpReq = mnodeDoCreateChildTableFp }; int32_t code = sdbInsertRow(&desc); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mnodeDestroyChildTable(pTable); pMsg->pTable = NULL; - mError("app:%p:%p, table:%s, failed to create, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, + mError("msg:%p, app:%p table:%s, failed to create, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, tstrerror(code)); } else { - mDebug("app:%p:%p, table:%s, allocated in vgroup, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, allocated in vgroup, vgId:%d sid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pVgroup->vgId, pTable->tid, pTable->uid); } @@ -1802,7 +1807,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; int32_t code = grantCheck(TSDB_GRANT_TIMESERIES); if (code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, table:%s, failed to create, grant timeseries failed", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to create, grant timeseries failed", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return code; } @@ -1813,7 +1818,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { int32_t tid = 0; code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &tid); if (code != TSDB_CODE_SUCCESS) { - mDebug("app:%p:%p, table:%s, failed to get available vgroup, reason:%s", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, failed to get available vgroup, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, tstrerror(code)); return code; } @@ -1832,22 +1837,22 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { } if (pMsg->pTable == NULL) { - mError("app:%p:%p, table:%s, object not found, retry:%d reason:%s", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, pMsg->retry, + mError("msg:%p, app:%p table:%s, object not found, retry:%d reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId, pMsg->retry, tstrerror(terrno)); return terrno; } else { - mDebug("app:%p:%p, table:%s, send create msg to vnode again", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); + mDebug("msg:%p, app:%p table:%s, send create msg to vnode again", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return mnodeDoCreateChildTableFp(pMsg); } } static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; - mLInfo("app:%p:%p, ctable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId); + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; + mLInfo("msg:%p, app:%p ctable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); SMDDropTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropTableMsg)); if (pDrop == NULL) { - mError("app:%p:%p, ctable:%s, failed to drop ctable, no enough memory", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p ctable:%s, failed to drop ctable, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -1860,7 +1865,7 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup); - mInfo("app:%p:%p, ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, + mInfo("msg:%p, app:%p ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId, pTable->vgId, pTable->tid, pTable->uid); SRpcMsg rpcMsg = { @@ -1880,8 +1885,8 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { static int32_t mnodeDropChildTableCb(SMnodeMsg *pMsg, int32_t code) { if (code != TSDB_CODE_SUCCESS) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; - mError("app:%p:%p, ctable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId); + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; + mError("msg:%p, app:%p ctable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); return code; } @@ -1889,32 +1894,32 @@ static int32_t mnodeDropChildTableCb(SMnodeMsg *pMsg, int32_t code) { } static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); if (pMsg->pVgroup == NULL) { - mError("app:%p:%p, table:%s, failed to drop ctable, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to drop ctable, vgroup not exist", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); return TSDB_CODE_MND_APP_ERROR; } - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsChildTableSdb, - .pObj = pTable, - .pMsg = pMsg, - .writeCb = mnodeDropChildTableCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsChildTableSdb, + .pObj = pTable, + .pMsg = pMsg, + .fpRsp = mnodeDropChildTableCb }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mError("app:%p:%p, ctable:%s, failed to drop, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mError("msg:%p, app:%p ctable:%s, failed to drop, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, tstrerror(code)); } return code; } -static int32_t mnodeFindNormalTableColumnIndex(SChildTableObj *pTable, char *colName) { +static int32_t mnodeFindNormalTableColumnIndex(SCTableObj *pTable, char *colName) { SSchema *schema = (SSchema *) pTable->schema; for (int32_t col = 0; col < pTable->numOfColumns; col++) { if (strcasecmp(schema[col].name, colName) == 0) { @@ -1926,9 +1931,9 @@ static int32_t mnodeFindNormalTableColumnIndex(SChildTableObj *pTable, char *col } static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; if (code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, ctable %s, failed to alter column, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mError("msg:%p, app:%p ctable %s, failed to alter column, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, tstrerror(code)); return code; } @@ -1942,7 +1947,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); if (pMsg->pVgroup == NULL) { rpcFreeCont(pMDCreate); - mError("app:%p:%p, ctable %s, vgId:%d not exist in mnode", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mError("msg:%p, app:%p ctable %s, vgId:%d not exist in mnode", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } @@ -1957,7 +1962,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { .msgType = TSDB_MSG_TYPE_MD_ALTER_TABLE }; - mDebug("app:%p:%p, ctable %s, send alter column msg to vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mDebug("msg:%p, app:%p ctable %s, send alter column msg to vgId:%d", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pMsg->pVgroup->vgId); dnodeSendMsgToDnode(&epSet, &rpcMsg); @@ -1965,16 +1970,16 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) { } static int32_t mnodeAddNormalTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32_t ncols) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; SDbObj *pDb = pMsg->pDb; if (ncols <= 0) { - mError("app:%p:%p, ctable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, ncols); + mError("msg:%p, app:%p ctable:%s, add column, ncols:%d <= 0", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, ncols); return TSDB_CODE_MND_APP_ERROR; } for (int32_t i = 0; i < ncols; i++) { if (mnodeFindNormalTableColumnIndex(pTable, schema[i].name) > 0) { - mError("app:%p:%p, ctable:%s, add column, column:%s already exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p ctable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, schema[i].name); return TSDB_CODE_MND_FIELD_ALREAY_EXIST; } @@ -1999,26 +2004,26 @@ static int32_t mnodeAddNormalTableColumn(SMnodeMsg *pMsg, SSchema schema[], int3 mnodeDecAcctRef(pAcct); } - mInfo("app:%p:%p, ctable %s, start to add column", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId); + mInfo("msg:%p, app:%p ctable %s, start to add column", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsChildTableSdb, - .pObj = pTable, - .pMsg = pMsg, - .writeCb = mnodeAlterNormalTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsChildTableSdb, + .pObj = pTable, + .pMsg = pMsg, + .fpRsp = mnodeAlterNormalTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) { SDbObj *pDb = pMsg->pDb; - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; int32_t col = mnodeFindNormalTableColumnIndex(pTable, colName); if (col <= 0) { - mError("app:%p:%p, ctable:%s, drop column, column:%s not exist", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, - colName); + mError("msg:%p, app:%p ctable:%s, drop column, column:%s not exist", pMsg, pMsg->rpcMsg.ahandle, + pTable->info.tableId, colName); return TSDB_CODE_MND_FIELD_NOT_EXIST; } @@ -2032,24 +2037,24 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) { mnodeDecAcctRef(pAcct); } - mInfo("app:%p:%p, ctable %s, start to drop column %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, colName); + mInfo("msg:%p, app:%p ctable %s, start to drop column %s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, colName); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsChildTableSdb, - .pObj = pTable, - .pMsg = pMsg, - .writeCb = mnodeAlterNormalTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsChildTableSdb, + .pObj = pTable, + .pMsg = pMsg, + .fpRsp = mnodeAlterNormalTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) { - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName); if (col < 0) { - mError("app:%p:%p, ctable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p ctable:%s, change column, oldName: %s, newName: %s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, oldName, newName); return TSDB_CODE_MND_FIELD_NOT_EXIST; } @@ -2068,21 +2073,21 @@ static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char SSchema *schema = (SSchema *) (pTable->schema + col); tstrncpy(schema->name, newName, sizeof(schema->name)); - mInfo("app:%p:%p, ctable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + mInfo("msg:%p, app:%p ctable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, oldName, newName); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsChildTableSdb, - .pObj = pTable, - .pMsg = pMsg, - .writeCb = mnodeAlterNormalTableColumnCb + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsChildTableSdb, + .pObj = pTable, + .pMsg = pMsg, + .fpRsp = mnodeAlterNormalTableColumnCb }; - return sdbUpdateRow(&oper); + return sdbUpdateRow(&row); } -static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) { +static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SCTableObj *pTable) { int32_t numOfCols = pTable->numOfColumns; for (int32_t i = 0; i < numOfCols; ++i) { strcpy(pSchema->name, pTable->schema[i].name); @@ -2097,7 +2102,7 @@ static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *p static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { SDbObj *pDb = pMsg->pDb; - SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; pMeta->uid = htobe64(pTable->uid); pMeta->tid = htonl(pTable->tid); @@ -2124,7 +2129,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); if (pMsg->pVgroup == NULL) { - mError("app:%p:%p, table:%s, failed to get table meta, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to get table meta, vgroup not exist", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } @@ -2139,27 +2144,27 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { } pMeta->vgroup.vgId = htonl(pMsg->pVgroup->vgId); - mDebug("app:%p:%p, table:%s, uid:%" PRIu64 " table meta is retrieved, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p table:%s, uid:%" PRIu64 " table meta is retrieved, vgId:%d sid:%d", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->uid, pTable->vgId, pTable->tid); return TSDB_CODE_SUCCESS; } static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { - SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; + STableInfoMsg *pInfo = pMsg->rpcMsg.pCont; STagData *pTags = (STagData *)pInfo->tags; int32_t tagLen = htonl(pTags->dataLen); if (pTags->name[0] == 0) { - mError("app:%p:%p, table:%s, failed to create table on demand for stable is empty, tagLen:%d", pMsg->rpcMsg.ahandle, - pMsg, pInfo->tableId, tagLen); - return TSDB_CODE_MND_INVALID_STABLE_NAME; + mError("msg:%p, app:%p table:%s, failed to create table on demand for stable is empty, tagLen:%d", pMsg, + pMsg->rpcMsg.ahandle, pInfo->tableId, tagLen); + return TSDB_CODE_MND_INVALID_STABLE_NAME; } int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + tagLen; - SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen); + SCMCreateTableMsg *pCreateMsg = calloc(1, contLen); if (pCreateMsg == NULL) { - mError("app:%p:%p, table:%s, failed to create table while get meta info, no enough memory", pMsg->rpcMsg.ahandle, - pMsg, pInfo->tableId); + mError("msg:%p, app:%p table:%s, failed to create table while get meta info, no enough memory", pMsg, + pMsg->rpcMsg.ahandle, pInfo->tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -2171,14 +2176,16 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { pCreateMsg->contLen = htonl(contLen); memcpy(pCreateMsg->schema, pTags, contLen - sizeof(SCMCreateTableMsg)); - mDebug("app:%p:%p, table:%s, start to create on demand, tagLen:%d stable:%s", - pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId, tagLen, pTags->name); + mDebug("msg:%p, app:%p table:%s, start to create on demand, tagLen:%d stable:%s", pMsg, pMsg->rpcMsg.ahandle, + pInfo->tableId, tagLen, pTags->name); - rpcFreeCont(pMsg->rpcMsg.pCont); + if (pMsg->rpcMsg.pCont != pMsg->pCont) { + tfree(pMsg->rpcMsg.pCont); + } pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE; pMsg->rpcMsg.pCont = pCreateMsg; pMsg->rpcMsg.contLen = contLen; - + return TSDB_CODE_MND_ACTION_NEED_REPROCESSED; } @@ -2186,7 +2193,7 @@ static int32_t mnodeGetChildTableMeta(SMnodeMsg *pMsg) { STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)); if (pMeta == NULL) { - mError("app:%p:%p, table:%s, failed to get table meta, no enough memory", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p table:%s, failed to get table meta, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pMsg->pTable->tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } @@ -2203,7 +2210,7 @@ static int32_t mnodeGetChildTableMeta(SMnodeMsg *pMsg) { void mnodeDropAllChildTablesInVgroups(SVgObj *pVgroup) { void * pIter = NULL; int32_t numOfTables = 0; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; mInfo("vgId:%d, all child tables will be dropped from sdb", pVgroup->vgId); @@ -2212,12 +2219,12 @@ void mnodeDropAllChildTablesInVgroups(SVgObj *pVgroup) { if (pTable == NULL) break; if (pTable->vgId == pVgroup->vgId) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsChildTableSdb, - .pObj = pTable, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsChildTableSdb, + .pObj = pTable, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfTables++; } mnodeDecTableRef(pTable); @@ -2231,7 +2238,7 @@ void mnodeDropAllChildTablesInVgroups(SVgObj *pVgroup) { void mnodeDropAllChildTables(SDbObj *pDropDb) { void * pIter = NULL; int32_t numOfTables = 0; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; char prefix[64] = {0}; tstrncpy(prefix, pDropDb->name, 64); @@ -2245,12 +2252,12 @@ void mnodeDropAllChildTables(SDbObj *pDropDb) { if (pTable == NULL) break; if (strncmp(prefix, pTable->info.tableId, prefixLen) == 0) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsChildTableSdb, - .pObj = pTable, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsChildTableSdb, + .pObj = pTable, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfTables++; } mnodeDecTableRef(pTable); @@ -2261,10 +2268,10 @@ void mnodeDropAllChildTables(SDbObj *pDropDb) { mInfo("db:%s, all child tables:%d is dropped from sdb", pDropDb->name, numOfTables); } -static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) { +static void mnodeDropAllChildTablesInStable(SSTableObj *pStable) { void * pIter = NULL; int32_t numOfTables = 0; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; mInfo("stable:%s uid:%" PRIu64 ", all child tables:%d will be dropped from sdb", pStable->info.tableId, pStable->uid, pStable->numOfTables); @@ -2274,12 +2281,12 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) { if (pTable == NULL) break; if (pTable->superTable == pStable) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsChildTableSdb, - .pObj = pTable, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsChildTableSdb, + .pObj = pTable, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfTables++; } @@ -2292,11 +2299,11 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) { } #if 0 -static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t tid) { +static SCTableObj* mnodeGetTableByPos(int32_t vnode, int32_t tid) { SVgObj *pVgroup = mnodeGetVgroup(vnode); if (pVgroup == NULL) return NULL; - SChildTableObj *pTable = pVgroup->tableList[tid - 1]; + SCTableObj *pTable = pVgroup->tableList[tid - 1]; mnodeIncTableRef((STableObj *)pTable); mnodeDecVgroupRef(pVgroup); @@ -2307,22 +2314,22 @@ static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t tid) { static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) { return TSDB_CODE_COM_OPS_NOT_SUPPORT; #if 0 - SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont; + SConfigTableMsg *pCfg = pMsg->rpcMsg.pCont; pCfg->dnodeId = htonl(pCfg->dnodeId); pCfg->vgId = htonl(pCfg->vgId); pCfg->sid = htonl(pCfg->sid); - mDebug("app:%p:%p, dnode:%d, vgId:%d sid:%d, receive table config msg", pMsg->rpcMsg.ahandle, pMsg, pCfg->dnodeId, + mDebug("msg:%p, app:%p dnode:%d, vgId:%d sid:%d, receive table config msg", pMsg, pMsg->rpcMsg.ahandle, pCfg->dnodeId, pCfg->vgId, pCfg->sid); - SChildTableObj *pTable = mnodeGetTableByPos(pCfg->vgId, pCfg->sid); + SCTableObj *pTable = mnodeGetTableByPos(pCfg->vgId, pCfg->sid); if (pTable == NULL) { - mError("app:%p:%p, dnode:%d, vgId:%d sid:%d, table not found", pMsg->rpcMsg.ahandle, pMsg, pCfg->dnodeId, + mError("msg:%p, app:%p dnode:%d, vgId:%d sid:%d, table not found", pMsg, pMsg->rpcMsg.ahandle, pCfg->dnodeId, pCfg->vgId, pCfg->sid); return TSDB_CODE_MND_INVALID_TABLE_ID; } SMDCreateTableMsg *pCreate = NULL; - pCreate = mnodeBuildCreateChildTableMsg(NULL, (SChildTableObj *)pTable); + pCreate = mnodeBuildCreateChildTableMsg(NULL, (SCTableObj *)pTable); mnodeDecTableRef(pTable); if (pCreate == NULL) return terrno; @@ -2337,38 +2344,37 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) { static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; - SMnodeMsg *mnodeMsg = rpcMsg->ahandle; - mnodeMsg->received++; + SMnodeMsg *pMsg = rpcMsg->ahandle; + pMsg->received++; - SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; assert(pTable); - mInfo("app:%p:%p, table:%s, drop table rsp received, vgId:%d sid:%d uid:%" PRIu64 ", thandle:%p result:%s", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, - mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); + mInfo("msg:%p, app:%p table:%s, drop table rsp received, vgId:%d sid:%d uid:%" PRIu64 ", thandle:%p result:%s", pMsg, + pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, pMsg->rpcMsg.handle, + tstrerror(rpcMsg->code)); if (rpcMsg->code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, table:%s, failed to drop in dnode, vgId:%d sid:%d uid:%" PRIu64 ", reason:%s", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, - tstrerror(rpcMsg->code)); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); + mError("msg:%p, app:%p table:%s, failed to drop in dnode, vgId:%d sid:%d uid:%" PRIu64 ", reason:%s", pMsg, + pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, tstrerror(rpcMsg->code)); + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); return; } - if (mnodeMsg->pVgroup == NULL) mnodeMsg->pVgroup = mnodeGetVgroup(pTable->vgId); - if (mnodeMsg->pVgroup == NULL) { - mError("app:%p:%p, table:%s, failed to get vgroup", mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, TSDB_CODE_MND_VGROUP_NOT_EXIST); + if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); + if (pMsg->pVgroup == NULL) { + mError("msg:%p, app:%p table:%s, failed to get vgroup", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); + dnodeSendRpcMWriteRsp(pMsg, TSDB_CODE_MND_VGROUP_NOT_EXIST); return; } - if (mnodeMsg->pVgroup->numOfTables <= 0) { - mInfo("app:%p:%p, vgId:%d, all tables is dropped, drop vgroup", mnodeMsg->rpcMsg.ahandle, mnodeMsg, - mnodeMsg->pVgroup->vgId); - mnodeDropVgroup(mnodeMsg->pVgroup, NULL); + if (pMsg->pVgroup->numOfTables <= 0) { + mInfo("msg:%p, app:%p vgId:%d, all tables is dropped, drop vgroup", pMsg, pMsg->rpcMsg.ahandle, + pMsg->pVgroup->vgId); + mnodeDropVgroup(pMsg->pVgroup, NULL); } - dnodeSendRpcMnodeWriteRsp(mnodeMsg, TSDB_CODE_SUCCESS); + dnodeSendRpcMWriteRsp(pMsg, TSDB_CODE_SUCCESS); } /* @@ -2378,63 +2384,70 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) { static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; - SMnodeMsg *mnodeMsg = rpcMsg->ahandle; - mnodeMsg->received++; + SMnodeMsg *pMsg = rpcMsg->ahandle; + pMsg->received++; - SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; assert(pTable); // If the table is deleted by another thread during creation, stop creating and send drop msg to vnode if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) { - mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64, - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid); + mDebug("msg:%p, app:%p table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64, + pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid); // if the vgroup is already dropped from hash, it can't be accquired by pTable->vgId // so the refCount of vgroup can not be decreased // SVgObj *pVgroup = mnodeGetVgroup(pTable->vgId); // if (pVgroup == NULL) { - // mnodeRemoveTableFromVgroup(mnodeMsg->pVgroup, pTable); + // mnodeRemoveTableFromVgroup(pMsg->pVgroup, pTable); // } // mnodeDecVgroupRef(pVgroup); - mnodeSendDropChildTableMsg(mnodeMsg, false); + mnodeSendDropChildTableMsg(pMsg, false); rpcMsg->code = TSDB_CODE_SUCCESS; - dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); return; } if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { - SSdbOper desc = { - .type = SDB_OPER_GLOBAL, - .pObj = pTable, - .table = tsChildTableSdb, - .pMsg = mnodeMsg, - .writeCb = mnodeDoCreateChildTableCb + SSdbRow desc = { + .type = SDB_OPER_GLOBAL, + .pObj = pTable, + .pTable = tsChildTableSdb, + .pMsg = pMsg, + .fpRsp = mnodeDoCreateChildTableCb }; - int32_t code = sdbInsertRowImp(&desc); + int32_t code = sdbInsertRowToQueue(&desc); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mnodeMsg->pTable = NULL; + pMsg->pTable = NULL; mnodeDestroyChildTable(pTable); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, code); + dnodeSendRpcMWriteRsp(pMsg, code); } } else { - if (mnodeMsg->retry++ < 10) { - mDebug("app:%p:%p, table:%s, create table rsp received, need retry, times:%d vgId:%d sid:%d uid:%" PRIu64 + pMsg->retry++; + int32_t sec = taosGetTimestampSec(); + if (pMsg->retry < CREATE_CTABLE_RETRY_TIMES && ABS(sec - pMsg->incomingTs) < CREATE_CTABLE_RETRY_SEC) { + mDebug("msg:%p, app:%p table:%s, create table rsp received, need retry, times:%d vgId:%d sid:%d uid:%" PRIu64 " result:%s thandle:%p", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, mnodeMsg->retry, pTable->vgId, pTable->tid, - pTable->uid, tstrerror(rpcMsg->code), mnodeMsg->rpcMsg.handle); + pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pMsg->retry, pTable->vgId, pTable->tid, pTable->uid, + tstrerror(rpcMsg->code), pMsg->rpcMsg.handle); - dnodeDelayReprocessMnodeWriteMsg(mnodeMsg); + dnodeDelayReprocessMWriteMsg(pMsg); } else { - mError("app:%p:%p, table:%s, failed to create in dnode, vgId:%d sid:%d uid:%" PRIu64 ", result:%s thandle:%p", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, - tstrerror(rpcMsg->code), mnodeMsg->rpcMsg.handle); + mError("msg:%p, app:%p table:%s, failed to create in dnode, vgId:%d sid:%d uid:%" PRIu64 + ", result:%s thandle:%p incomingTs:%d curTs:%d retryTimes:%d", + pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, + tstrerror(rpcMsg->code), pMsg->rpcMsg.handle, pMsg->incomingTs, sec, pMsg->retry); - SSdbOper oper = {.type = SDB_OPER_GLOBAL, .table = tsChildTableSdb, .pObj = pTable}; - sdbDeleteRow(&oper); + SSdbRow row = {.type = SDB_OPER_GLOBAL, .pTable = tsChildTableSdb, .pObj = pTable}; + sdbDeleteRow(&row); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); + if (rpcMsg->code == TSDB_CODE_APP_NOT_READY) { + //Avoid retry again in client + rpcMsg->code = TSDB_CODE_MND_VGROUP_NOT_READY; + } + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); } } } @@ -2442,34 +2455,34 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; - SMnodeMsg *mnodeMsg = rpcMsg->ahandle; - mnodeMsg->received++; + SMnodeMsg *pMsg = rpcMsg->ahandle; + pMsg->received++; - SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; + SCTableObj *pTable = (SCTableObj *)pMsg->pTable; assert(pTable); if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { - mDebug("app:%p:%p, ctable:%s, altered in dnode, thandle:%p result:%s", mnodeMsg->rpcMsg.ahandle, mnodeMsg, - pTable->info.tableId, mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); + mDebug("msg:%p, app:%p ctable:%s, altered in dnode, thandle:%p result:%s", pMsg, pMsg->rpcMsg.ahandle, + pTable->info.tableId, pMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, TSDB_CODE_SUCCESS); + dnodeSendRpcMWriteRsp(pMsg, TSDB_CODE_SUCCESS); } else { - if (mnodeMsg->retry++ < 3) { - mDebug("app:%p:%p, table:%s, alter table rsp received, need retry, times:%d result:%s thandle:%p", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, mnodeMsg->retry, tstrerror(rpcMsg->code), - mnodeMsg->rpcMsg.handle); + if (pMsg->retry++ < ALTER_CTABLE_RETRY_TIMES) { + mDebug("msg:%p, app:%p table:%s, alter table rsp received, need retry, times:%d result:%s thandle:%p", + pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, pMsg->retry, tstrerror(rpcMsg->code), + pMsg->rpcMsg.handle); - dnodeDelayReprocessMnodeWriteMsg(mnodeMsg); + dnodeDelayReprocessMWriteMsg(pMsg); } else { - mError("app:%p:%p, table:%s, failed to alter in dnode, result:%s thandle:%p", mnodeMsg->rpcMsg.ahandle, mnodeMsg, - pTable->info.tableId, tstrerror(rpcMsg->code), mnodeMsg->rpcMsg.handle); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); + mError("msg:%p, app:%p table:%s, failed to alter in dnode, result:%s thandle:%p", pMsg, pMsg->rpcMsg.ahandle, + pTable->info.tableId, tstrerror(rpcMsg->code), pMsg->rpcMsg.handle); + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); } } } static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { - SCMMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; + SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; pInfo->numOfTables = htonl(pInfo->numOfTables); int32_t totalMallocLen = 4 * 1024 * 1024; // first malloc 4 MB, subsequent reallocation as twice @@ -2483,7 +2496,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { for (int32_t t = 0; t < pInfo->numOfTables; ++t) { char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_FNAME_LEN); - SChildTableObj *pTable = mnodeGetChildTable(tableId); + SCTableObj *pTable = mnodeGetChildTable(tableId); if (pTable == NULL) continue; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(tableId); @@ -2607,7 +2620,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows int32_t cols = 0; int32_t numOfRows = 0; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char prefix[64] = {0}; @@ -2705,13 +2718,13 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows } static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { - SCMAlterTableMsg *pAlter = pMsg->rpcMsg.pCont; - mDebug("app:%p:%p, table:%s, alter table msg is received from thandle:%p", pMsg->rpcMsg.ahandle, pMsg, + SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont; + mDebug("msg:%p, app:%p table:%s, alter table msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId, pMsg->rpcMsg.handle); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableId); if (pMsg->pDb == NULL) { - mError("app:%p:%p, table:%s, failed to alter table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); + mError("msg:%p, app:%p table:%s, failed to alter table, db not selected", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } @@ -2721,13 +2734,13 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { - mError("app:%p:%p, table:%s, failed to alter table, its log db", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); + mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId); return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN; } if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId); if (pMsg->pTable == NULL) { - mError("app:%p:%p, table:%s, failed to alter table, table not exist", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); + mError("msg:%p, app:%p table:%s, failed to alter table, table not exist", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId); return TSDB_CODE_MND_INVALID_TABLE_NAME; } @@ -2736,7 +2749,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { pAlter->tagValLen = htonl(pAlter->tagValLen); if (pAlter->numOfCols > 2) { - mError("app:%p:%p, table:%s, error numOfCols:%d in alter table", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId, + mError("msg:%p, app:%p table:%s, error numOfCols:%d in alter table", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId, pAlter->numOfCols); return TSDB_CODE_MND_APP_ERROR; } @@ -2747,7 +2760,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { int32_t code = TSDB_CODE_COM_OPS_NOT_SUPPORT; if (pMsg->pTable->type == TSDB_SUPER_TABLE) { - mDebug("app:%p:%p, table:%s, start to alter stable", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); + mDebug("msg:%p, app:%p table:%s, start to alter stable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId); if (pAlter->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { code = mnodeAddSuperTableTag(pMsg, pAlter->schema, 1); } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { @@ -2763,7 +2776,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } else { } } else { - mDebug("app:%p:%p, table:%s, start to alter ctable", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); + mDebug("msg:%p, app:%p table:%s, start to alter ctable", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableId); if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { return TSDB_CODE_COM_OPS_NOT_SUPPORT; } else if (pAlter->type == TSDB_ALTER_TABLE_ADD_COLUMN) { @@ -2843,7 +2856,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro } int32_t numOfRows = 0; - SChildTableObj *pTable = NULL; + SCTableObj *pTable = NULL; SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char prefix[64] = {0}; diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c index 779e25254897ff37f1ca884e83469233d7197b8c..dc76d92eb8c2c01396705b33cc9ea55ccc4275df 100644 --- a/src/mnode/src/mnodeUser.c +++ b/src/mnode/src/mnodeUser.c @@ -42,13 +42,13 @@ static int32_t mnodeProcessAlterUserMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropUserMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessAuthMsg(SMnodeMsg *pMsg); -static int32_t mnodeUserActionDestroy(SSdbOper *pOper) { - taosTFree(pOper->pObj); +static int32_t mnodeUserActionDestroy(SSdbRow *pRow) { + tfree(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeUserActionInsert(SSdbOper *pOper) { - SUserObj *pUser = pOper->pObj; +static int32_t mnodeUserActionInsert(SSdbRow *pRow) { + SUserObj *pUser = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pUser->acct); if (pAcct != NULL) { @@ -62,8 +62,8 @@ static int32_t mnodeUserActionInsert(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeUserActionDelete(SSdbOper *pOper) { - SUserObj *pUser = pOper->pObj; +static int32_t mnodeUserActionDelete(SSdbRow *pRow) { + SUserObj *pUser = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pUser->acct); if (pAcct != NULL) { @@ -74,8 +74,8 @@ static int32_t mnodeUserActionDelete(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeUserActionUpdate(SSdbOper *pOper) { - SUserObj *pUser = pOper->pObj; +static int32_t mnodeUserActionUpdate(SSdbRow *pRow) { + SUserObj *pUser = pRow->pObj; SUserObj *pSaved = mnodeGetUser(pUser->user); if (pUser != pSaved) { memcpy(pSaved, pUser, tsUserUpdateSize); @@ -85,19 +85,19 @@ static int32_t mnodeUserActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeUserActionEncode(SSdbOper *pOper) { - SUserObj *pUser = pOper->pObj; - memcpy(pOper->rowData, pUser, tsUserUpdateSize); - pOper->rowSize = tsUserUpdateSize; +static int32_t mnodeUserActionEncode(SSdbRow *pRow) { + SUserObj *pUser = pRow->pObj; + memcpy(pRow->rowData, pUser, tsUserUpdateSize); + pRow->rowSize = tsUserUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeUserActionDecode(SSdbOper *pOper) { +static int32_t mnodeUserActionDecode(SSdbRow *pRow) { SUserObj *pUser = (SUserObj *)calloc(1, sizeof(SUserObj)); if (pUser == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pUser, pOper->rowData, tsUserUpdateSize); - pOper->pObj = pUser; + memcpy(pUser, pRow->rowData, tsUserUpdateSize); + pRow->pObj = pUser; return TSDB_CODE_SUCCESS; } @@ -150,25 +150,25 @@ int32_t mnodeInitUsers() { SUserObj tObj; tsUserUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_USER, - .tableName = "users", + SSdbTableDesc desc = { + .id = SDB_TABLE_USER, + .name = "users", .hashSessions = TSDB_DEFAULT_USERS_HASH_SIZE, .maxRowSize = tsUserUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_STRING, - .insertFp = mnodeUserActionInsert, - .deleteFp = mnodeUserActionDelete, - .updateFp = mnodeUserActionUpdate, - .encodeFp = mnodeUserActionEncode, - .decodeFp = mnodeUserActionDecode, - .destroyFp = mnodeUserActionDestroy, - .restoredFp = mnodeUserActionRestored + .fpInsert = mnodeUserActionInsert, + .fpDelete = mnodeUserActionDelete, + .fpUpdate = mnodeUserActionUpdate, + .fpEncode = mnodeUserActionEncode, + .fpDecode = mnodeUserActionDecode, + .fpDestroy = mnodeUserActionDestroy, + .fpRestored = mnodeUserActionRestored }; - tsUserSdb = sdbOpenTable(&tableDesc); + tsUserSdb = sdbOpenTable(&desc); if (tsUserSdb == NULL) { - mError("table:%s, failed to create hash", tableDesc.tableName); + mError("table:%s, failed to create hash", desc.name); return -1; } @@ -179,7 +179,7 @@ int32_t mnodeInitUsers() { mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_USER, mnodeRetrieveUsers); mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_AUTH, mnodeProcessAuthMsg); - mDebug("table:%s, hash is created", tableDesc.tableName); + mDebug("table:%s, hash is created", desc.name); return 0; } @@ -205,14 +205,14 @@ void mnodeDecUserRef(SUserObj *pUser) { } static int32_t mnodeUpdateUser(SUserObj *pUser, void *pMsg) { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsUserSdb, - .pObj = pUser, - .pMsg = pMsg + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsUserSdb, + .pObj = pUser, + .pMsg = pMsg }; - int32_t code = sdbUpdateRow(&oper); + int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("user:%s, failed to alter by %s, reason:%s", pUser->user, mnodeGetUserFromMsg(pMsg), tstrerror(code)); } else { @@ -259,18 +259,18 @@ int32_t mnodeCreateUser(SAcctObj *pAcct, char *name, char *pass, void *pMsg) { pUser->superAuth = 1; } - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsUserSdb, - .pObj = pUser, - .rowSize = sizeof(SUserObj), - .pMsg = pMsg + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsUserSdb, + .pObj = pUser, + .rowSize = sizeof(SUserObj), + .pMsg = pMsg }; - code = sdbInsertRow(&oper); + code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("user:%s, failed to create by %s, reason:%s", pUser->user, mnodeGetUserFromMsg(pMsg), tstrerror(code)); - taosTFree(pUser); + tfree(pUser); } else { mLInfo("user:%s, is created by %s", pUser->user, mnodeGetUserFromMsg(pMsg)); } @@ -279,14 +279,14 @@ int32_t mnodeCreateUser(SAcctObj *pAcct, char *name, char *pass, void *pMsg) { } static int32_t mnodeDropUser(SUserObj *pUser, void *pMsg) { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsUserSdb, - .pObj = pUser, - .pMsg = pMsg + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsUserSdb, + .pObj = pUser, + .pMsg = pMsg }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("user:%s, failed to drop by %s, reason:%s", pUser->user, mnodeGetUserFromMsg(pMsg), tstrerror(code)); } else { @@ -414,7 +414,7 @@ static int32_t mnodeProcessCreateUserMsg(SMnodeMsg *pMsg) { SUserObj *pOperUser = pMsg->pUser; if (pOperUser->superAuth) { - SCMCreateUserMsg *pCreate = pMsg->rpcMsg.pCont; + SCreateUserMsg *pCreate = pMsg->rpcMsg.pCont; return mnodeCreateUser(pOperUser->pAcct, pCreate->user, pCreate->pass, pMsg); } else { mError("user:%s, no rights to create user", pOperUser->user); @@ -426,7 +426,7 @@ static int32_t mnodeProcessAlterUserMsg(SMnodeMsg *pMsg) { int32_t code; SUserObj *pOperUser = pMsg->pUser; - SCMAlterUserMsg *pAlter = pMsg->rpcMsg.pCont; + SAlterUserMsg *pAlter = pMsg->rpcMsg.pCont; SUserObj *pUser = mnodeGetUser(pAlter->user); if (pUser == NULL) { return TSDB_CODE_MND_INVALID_USER; @@ -514,7 +514,7 @@ static int32_t mnodeProcessDropUserMsg(SMnodeMsg *pMsg) { int32_t code; SUserObj *pOperUser = pMsg->pUser; - SCMDropUserMsg *pDrop = pMsg->rpcMsg.pCont; + SDropUserMsg *pDrop = pMsg->rpcMsg.pCont; SUserObj *pUser = mnodeGetUser(pDrop->user); if (pUser == NULL) { return TSDB_CODE_MND_INVALID_USER; @@ -562,12 +562,12 @@ void mnodeDropAllUsers(SAcctObj *pAcct) { if (pUser == NULL) break; if (strncmp(pUser->acct, pAcct->user, acctNameLen) == 0) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsUserSdb, - .pObj = pUser, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsUserSdb, + .pObj = pUser, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfUsers++; } @@ -604,11 +604,11 @@ int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, cha } static int32_t mnodeProcessAuthMsg(SMnodeMsg *pMsg) { - SDMAuthMsg *pAuthMsg = pMsg->rpcMsg.pCont; - SDMAuthRsp *pAuthRsp = rpcMallocCont(sizeof(SDMAuthRsp)); + SAuthMsg *pAuthMsg = pMsg->rpcMsg.pCont; + SAuthRsp *pAuthRsp = rpcMallocCont(sizeof(SAuthRsp)); pMsg->rpcRsp.rsp = pAuthRsp; - pMsg->rpcRsp.len = sizeof(SDMAuthRsp); + pMsg->rpcRsp.len = sizeof(SAuthRsp); return mnodeRetriveAuth(pAuthMsg->user, &pAuthRsp->spi, &pAuthRsp->encrypt, pAuthRsp->secret, pAuthRsp->ckey); } diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 5084f1276ae883289939bd74c50a0502f6b3c71d..f9a49e5ec20d611415023535f3ba2e8fce311054 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -69,16 +69,16 @@ static void mnodeDestroyVgroup(SVgObj *pVgroup) { pVgroup->idPool = NULL; } - taosTFree(pVgroup); + tfree(pVgroup); } -static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) { - mnodeDestroyVgroup(pOper->pObj); +static int32_t mnodeVgroupActionDestroy(SSdbRow *pRow) { + mnodeDestroyVgroup(pRow->pObj); return TSDB_CODE_SUCCESS; } -static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) { - SVgObj *pVgroup = pOper->pObj; +static int32_t mnodeVgroupActionInsert(SSdbRow *pRow) { + SVgObj *pVgroup = pRow->pObj; // refer to db SDbObj *pDb = mnodeGetDb(pVgroup->dbName); @@ -115,8 +115,8 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeVgroupActionDelete(SSdbOper *pOper) { - SVgObj *pVgroup = pOper->pObj; +static int32_t mnodeVgroupActionDelete(SSdbRow *pRow) { + SVgObj *pVgroup = pRow->pObj; if (pVgroup->pDb == NULL) { mError("vgId:%d, db:%s is not exist while insert into hash", pVgroup->vgId, pVgroup->dbName); @@ -137,8 +137,8 @@ static int32_t mnodeVgroupActionDelete(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { - SVgObj *pNew = pOper->pObj; +static int32_t mnodeVgroupActionUpdate(SSdbRow *pRow) { + SVgObj *pNew = pRow->pObj; SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId); if (pVgroup != pNew) { @@ -176,25 +176,25 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { return TSDB_CODE_SUCCESS; } -static int32_t mnodeVgroupActionEncode(SSdbOper *pOper) { - SVgObj *pVgroup = pOper->pObj; - memcpy(pOper->rowData, pVgroup, tsVgUpdateSize); - SVgObj *pTmpVgroup = pOper->rowData; +static int32_t mnodeVgroupActionEncode(SSdbRow *pRow) { + SVgObj *pVgroup = pRow->pObj; + memcpy(pRow->rowData, pVgroup, tsVgUpdateSize); + SVgObj *pTmpVgroup = pRow->rowData; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { pTmpVgroup->vnodeGid[i].pDnode = NULL; pTmpVgroup->vnodeGid[i].role = 0; } - pOper->rowSize = tsVgUpdateSize; + pRow->rowSize = tsVgUpdateSize; return TSDB_CODE_SUCCESS; } -static int32_t mnodeVgroupActionDecode(SSdbOper *pOper) { +static int32_t mnodeVgroupActionDecode(SSdbRow *pRow) { SVgObj *pVgroup = (SVgObj *) calloc(1, sizeof(SVgObj)); if (pVgroup == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; - memcpy(pVgroup, pOper->rowData, tsVgUpdateSize); - pOper->pObj = pVgroup; + memcpy(pVgroup, pRow->rowData, tsVgUpdateSize); + pRow->pObj = pVgroup; return TSDB_CODE_SUCCESS; } @@ -206,23 +206,23 @@ int32_t mnodeInitVgroups() { SVgObj tObj; tsVgUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; - SSdbTableDesc tableDesc = { - .tableId = SDB_TABLE_VGROUP, - .tableName = "vgroups", + SSdbTableDesc desc = { + .id = SDB_TABLE_VGROUP, + .name = "vgroups", .hashSessions = TSDB_DEFAULT_VGROUPS_HASH_SIZE, .maxRowSize = tsVgUpdateSize, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_AUTO, - .insertFp = mnodeVgroupActionInsert, - .deleteFp = mnodeVgroupActionDelete, - .updateFp = mnodeVgroupActionUpdate, - .encodeFp = mnodeVgroupActionEncode, - .decodeFp = mnodeVgroupActionDecode, - .destroyFp = mnodeVgroupActionDestroy, - .restoredFp = mnodeVgroupActionRestored, + .fpInsert = mnodeVgroupActionInsert, + .fpDelete = mnodeVgroupActionDelete, + .fpUpdate = mnodeVgroupActionUpdate, + .fpEncode = mnodeVgroupActionEncode, + .fpDecode = mnodeVgroupActionDecode, + .fpDestroy = mnodeVgroupActionDestroy, + .fpRestored = mnodeVgroupActionRestored, }; - tsVgroupSdb = sdbOpenTable(&tableDesc); + tsVgroupSdb = sdbOpenTable(&desc); if (tsVgroupSdb == NULL) { mError("failed to init vgroups data"); return -1; @@ -253,13 +253,13 @@ SVgObj *mnodeGetVgroup(int32_t vgId) { } void mnodeUpdateVgroup(SVgObj *pVgroup) { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, - .pObj = pVgroup + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup }; - int32_t code = sdbUpdateRow(&oper); + int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mError("vgId:%d, failed to update vgroup", pVgroup->vgId); } @@ -421,7 +421,7 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi int32_t sid = taosAllocateId(pVgroup->idPool); if (sid <= 0) { - mDebug("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId); + mDebug("msg:%p, app:%p db:%s, no enough sid in vgId:%d", pMsg, pMsg->rpcMsg.ahandle, pDb->name, pVgroup->vgId); continue; } @@ -442,8 +442,8 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi int32_t code = TSDB_CODE_MND_NO_ENOUGH_DNODES; if (pDb->numOfVgroups < maxVgroupsPerDb) { - mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, - pMsg, pDb->name, pDb->numOfVgroups, maxVgroupsPerDb); + mDebug("msg:%p, app:%p db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg, + pMsg->rpcMsg.ahandle, pDb->name, pDb->numOfVgroups, maxVgroupsPerDb); pthread_mutex_unlock(&pDb->mutex); code = mnodeCreateVgroup(pMsg); if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) { @@ -455,8 +455,8 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi if (pDb->numOfVgroups < 1) { pthread_mutex_unlock(&pDb->mutex); - mDebug("app:%p:%p, db:%s, failed create new vgroup since:%s, numOfVgroups:%d maxVgroupsPerDb:%d ", - pMsg->rpcMsg.ahandle, pMsg, pDb->name, tstrerror(code), pDb->numOfVgroups, maxVgroupsPerDb); + mDebug("msg:%p, app:%p db:%s, failed create new vgroup since:%s, numOfVgroups:%d maxVgroupsPerDb:%d ", pMsg, + pMsg->rpcMsg.ahandle, pDb->name, tstrerror(code), pDb->numOfVgroups, maxVgroupsPerDb); return code; } @@ -474,7 +474,7 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi int32_t sid = taosAllocateId(pVgroup->idPool); if (sid <= 0) { - mError("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId); + mError("msg:%p, app:%p db:%s, no enough sid in vgId:%d", pMsg, pMsg->rpcMsg.ahandle, pDb->name, pVgroup->vgId); pthread_mutex_unlock(&pDb->mutex); return TSDB_CODE_MND_NO_ENOUGH_DNODES; } @@ -496,10 +496,10 @@ static int32_t mnodeCreateVgroupFp(SMnodeMsg *pMsg) { SDbObj *pDb = pMsg->pDb; assert(pVgroup); - mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + mInfo("msg:%p, app:%p vgId:%d, is created in mnode, db:%s replica:%d", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i, + mInfo("msg:%p, app:%p vgId:%d, index:%d, dnode:%d", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId); } @@ -517,30 +517,30 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { assert(pVgroup); if (code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + mError("msg:%p, app:%p vgId:%d, failed to create in sdb, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, tstrerror(code)); - SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb}; + SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .pTable = tsVgroupSdb}; sdbDeleteRow(&desc); return code; } else { - mInfo("app:%p:%p, vgId:%d, is created in sdb, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, - pDb->name, pVgroup->numOfVnodes); + mInfo("msg:%p, app:%p vgId:%d, is created in sdb, db:%s replica:%d", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, + pDb->name, pVgroup->numOfVnodes); pVgroup->status = TAOS_VG_STATUS_READY; - SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb}; + SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .pTable = tsVgroupSdb}; (void)sdbUpdateRow(&desc); - dnodeReprocessMnodeWriteMsg(pMsg); + dnodeReprocessMWriteMsg(pMsg); return TSDB_CODE_MND_ACTION_IN_PROGRESS; // if (pVgroup->status == TAOS_VG_STATUS_CREATING || pVgroup->status == TAOS_VG_STATUS_READY) { - // mInfo("app:%p:%p, vgId:%d, is created in sdb, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + // mInfo("msg:%p, app:%p vgId:%d, is created in sdb, db:%s replica:%d", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, // pDb->name, pVgroup->numOfVnodes); // pVgroup->status = TAOS_VG_STATUS_READY; - // SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb}; + // SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .pTable = tsVgroupSdb}; // (void)sdbUpdateRow(&desc); - // dnodeReprocessMnodeWriteMsg(pMsg); + // dnodeReprocessMWriteMsg(pMsg); // return TSDB_CODE_MND_ACTION_IN_PROGRESS; // } else { - // mError("app:%p:%p, vgId:%d, is created in sdb, db:%s replica:%d, but vgroup is dropping", pMsg->rpcMsg.ahandle, + // mError("msg:%p, app:%p vgId:%d, is created in sdb, db:%s replica:%d, but vgroup is dropping", pMsg->rpcMsg.ahandle, // pMsg, pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); // return TSDB_CODE_MND_VGROUP_NOT_EXIST; // } @@ -571,16 +571,16 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg) { pMsg->pVgroup = pVgroup; mnodeIncVgroupRef(pVgroup); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, - .pObj = pVgroup, - .rowSize = sizeof(SVgObj), - .pMsg = pMsg, - .reqFp = mnodeCreateVgroupFp + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup, + .rowSize = sizeof(SVgObj), + .pMsg = pMsg, + .fpReq = mnodeCreateVgroupFp }; - code = sdbInsertRow(&oper); + code = sdbInsertRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { pMsg->pVgroup = NULL; mnodeDestroyVgroup(pVgroup); @@ -595,12 +595,12 @@ void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle) { } else { mDebug("vgId:%d, replica:%d is deleting from sdb", pVgroup->vgId, pVgroup->numOfVnodes); mnodeSendDropVgroupMsg(pVgroup, NULL); - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, - .pObj = pVgroup + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); } } @@ -663,13 +663,13 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p for (int32_t i = 0; i < pShow->maxReplica; ++i) { pShow->bytes[cols] = 2; pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; - snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "dnode%d", i + 1); + snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dDnode", i + 1); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dstatus", i + 1); + snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dStatus", i + 1); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; } @@ -694,7 +694,7 @@ static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) { return true; } - SChildTableObj *pCTable = (SChildTableObj *)pTable; + SCTableObj *pCTable = (SCTableObj *)pTable; if (pVgroup->vgId == pCTable->vgId) { return true; } else { @@ -770,7 +770,7 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v SDnodeObj * pDnode = pVgroup->vnodeGid[i].pDnode; const char *role = "NULL"; if (pDnode != NULL) { - role = mnodeGetMnodeRoleStr(pVgroup->vnodeGid[i].role); + role = syncRole[pVgroup->vnodeGid[i].role]; } pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; @@ -791,7 +791,7 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v return numOfRows; } -void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { +void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SCTableObj *pTable) { int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool); if (pTable->tid > idPoolSize) { mnodeAllocVgroupIdPool(pVgroup); @@ -807,7 +807,7 @@ void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { } } -void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { +void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SCTableObj *pTable) { if (pTable->tid >= 1) { taosFreeId(pVgroup->idPool, pTable->tid); pVgroup->numOfTables--; @@ -818,11 +818,11 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { } } -static SMDCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) { +static SCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) { SDbObj *pDb = pVgroup->pDb; if (pDb == NULL) return NULL; - SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg)); + SCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SCreateVnodeMsg)); if (pVnode == NULL) return NULL; strcpy(pVnode->db, pVgroup->dbName); @@ -830,7 +830,7 @@ static SMDCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) { //TODO: dynamic alloc tables in tsdb maxTables = MAX(10000, tsMaxTablePerVnode); - SMDVnodeCfg *pCfg = &pVnode->cfg; + SVnodeCfg *pCfg = &pVnode->cfg; pCfg->vgId = htonl(pVgroup->vgId); pCfg->cfgVersion = htonl(pDb->cfgVersion); pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize); @@ -850,8 +850,9 @@ static SMDCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) { pCfg->replications = (int8_t) pVgroup->numOfVnodes; pCfg->wals = 3; pCfg->quorum = pDb->cfg.quorum; + pCfg->update = pDb->cfg.update; - SMDVnodeDesc *pNodes = pVnode->nodes; + SVnodeDesc *pNodes = pVnode->nodes; for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) { SDnodeObj *pDnode = pVgroup->vnodeGid[j].pDnode; if (pDnode != NULL) { @@ -886,11 +887,11 @@ SRpcEpSet mnodeGetEpSetFromIp(char *ep) { } static void mnodeSendAlterVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet) { - SMDAlterVnodeMsg *pAlter = mnodeBuildVnodeMsg(pVgroup); + SAlterVnodeMsg *pAlter = mnodeBuildVnodeMsg(pVgroup); SRpcMsg rpcMsg = { .ahandle = NULL, .pCont = pAlter, - .contLen = pAlter ? sizeof(SMDAlterVnodeMsg) : 0, + .contLen = pAlter ? sizeof(SAlterVnodeMsg) : 0, .code = 0, .msgType = TSDB_MSG_TYPE_MD_ALTER_VNODE }; @@ -909,11 +910,11 @@ void mnodeSendAlterVgroupMsg(SVgObj *pVgroup) { } static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet, void *ahandle) { - SMDCreateVnodeMsg *pCreate = mnodeBuildVnodeMsg(pVgroup); + SCreateVnodeMsg *pCreate = mnodeBuildVnodeMsg(pVgroup); SRpcMsg rpcMsg = { .ahandle = ahandle, .pCont = pCreate, - .contLen = pCreate ? sizeof(SMDCreateVnodeMsg) : 0, + .contLen = pCreate ? sizeof(SCreateVnodeMsg) : 0, .code = 0, .msgType = TSDB_MSG_TYPE_MD_CREATE_VNODE }; @@ -956,34 +957,34 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (mnodeMsg->received != mnodeMsg->expected) return; if (mnodeMsg->received == mnodeMsg->successed) { - SSdbOper oper = { + SSdbRow row = { .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, + .pTable = tsVgroupSdb, .pObj = pVgroup, .rowSize = sizeof(SVgObj), .pMsg = mnodeMsg, - .writeCb = mnodeCreateVgroupCb + .fpRsp = mnodeCreateVgroupCb }; - int32_t code = sdbInsertRowImp(&oper); + int32_t code = sdbInsertRowToQueue(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mnodeMsg->pVgroup = NULL; mnodeDestroyVgroup(pVgroup); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, code); + dnodeSendRpcMWriteRsp(mnodeMsg, code); } } else { - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, - .pObj = pVgroup + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup }; - sdbDeleteRow(&oper); - dnodeSendRpcMnodeWriteRsp(mnodeMsg, mnodeMsg->code); + sdbDeleteRow(&row); + dnodeSendRpcMWriteRsp(mnodeMsg, mnodeMsg->code); } } -static SMDDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) { - SMDDropVnodeMsg *pDrop = rpcMallocCont(sizeof(SMDDropVnodeMsg)); +static SDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) { + SDropVnodeMsg *pDrop = rpcMallocCont(sizeof(SDropVnodeMsg)); if (pDrop == NULL) return NULL; pDrop->vgId = htonl(vgId); @@ -991,11 +992,11 @@ static SMDDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) { } void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle) { - SMDDropVnodeMsg *pDrop = mnodeBuildDropVnodeMsg(vgId); + SDropVnodeMsg *pDrop = mnodeBuildDropVnodeMsg(vgId); SRpcMsg rpcMsg = { .ahandle = ahandle, .pCont = pDrop, - .contLen = pDrop ? sizeof(SMDDropVnodeMsg) : 0, + .contLen = pDrop ? sizeof(SDropVnodeMsg) : 0, .code = 0, .msgType = TSDB_MSG_TYPE_MD_DROP_VNODE }; @@ -1030,21 +1031,21 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg) { if (mnodeMsg->received != mnodeMsg->expected) return; - SSdbOper oper = { - .type = SDB_OPER_GLOBAL, - .table = tsVgroupSdb, - .pObj = pVgroup + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup }; - int32_t code = sdbDeleteRow(&oper); + int32_t code = sdbDeleteRow(&row); if (code != 0) { code = TSDB_CODE_MND_SDB_ERROR; } - dnodeReprocessMnodeWriteMsg(mnodeMsg); + dnodeReprocessMWriteMsg(mnodeMsg); } static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) { - SDMConfigVnodeMsg *pCfg = pMsg->rpcMsg.pCont; + SConfigVnodeMsg *pCfg = pMsg->rpcMsg.pCont; pCfg->dnodeId = htonl(pCfg->dnodeId); pCfg->vgId = htonl(pCfg->vgId); @@ -1083,12 +1084,12 @@ void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode) { if (pVgroup->vnodeGid[0].dnodeId == pDropDnode->dnodeId) { mnodeDropAllChildTablesInVgroups(pVgroup); - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsVgroupSdb, - .pObj = pVgroup, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfVgroups++; } mnodeDecVgroupRef(pVgroup); @@ -1134,12 +1135,12 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb) { if (pVgroup == NULL) break; if (pVgroup->pDb == pDropDb) { - SSdbOper oper = { - .type = SDB_OPER_LOCAL, - .table = tsVgroupSdb, - .pObj = pVgroup, + SSdbRow row = { + .type = SDB_OPER_LOCAL, + .pTable = tsVgroupSdb, + .pObj = pVgroup, }; - sdbDeleteRow(&oper); + sdbDeleteRow(&row); numOfVgroups++; } diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c index d021745d2b754c21e422f87438d18539a15fd908..8893316ffc2e9ac3291a1411d7ea5054e198ff9b 100644 --- a/src/mnode/src/mnodeWrite.c +++ b/src/mnode/src/mnodeWrite.c @@ -43,7 +43,7 @@ void mnodeAddWriteMsgHandle(uint8_t msgType, int32_t (*fp)(SMnodeMsg *mnodeMsg)) int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { if (pMsg->rpcMsg.pCont == NULL) { - mError("app:%p:%p, msg:%s content is null", pMsg->rpcMsg.ahandle, pMsg, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, app:%p type:%s content is null", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_INVALID_MSG_LEN; } @@ -54,15 +54,15 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); - mDebug("app:%p:%p, msg:%s in write queue, will be redirected, numOfEps:%d inUse:%d", pMsg->rpcMsg.ahandle, pMsg, + mDebug("msg:%p, app:%p type:%s in write queue, will be redirected, numOfEps:%d inUse:%d", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], epSet->numOfEps, epSet->inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { epSet->inUse = (i + 1) % epSet->numOfEps; - mDebug("app:%p:%p, mnode index:%d ep:%s:%d, set inUse to %d", pMsg->rpcMsg.ahandle, pMsg, i, epSet->fqdn[i], + mDebug("msg:%p, app:%p mnode index:%d ep:%s:%d, set inUse to %d", pMsg, pMsg->rpcMsg.ahandle, i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { - mDebug("app:%p:%p, mnode index:%d ep:%s:%d", pMsg->rpcMsg.ahandle, pMsg, i, epSet->fqdn[i], + mDebug("msg:%p, app:%p mnode index:%d ep:%s:%d", pMsg, pMsg->rpcMsg.ahandle, i, epSet->fqdn[i], htons(epSet->port[i])); } } @@ -71,19 +71,19 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { } if (tsMnodeProcessWriteMsgFp[pMsg->rpcMsg.msgType] == NULL) { - mError("app:%p:%p, msg:%s not processed", pMsg->rpcMsg.ahandle, pMsg, taosMsg[pMsg->rpcMsg.msgType]); + mError("msg:%p, app:%p type:%s not processed", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_MSG_NOT_PROCESSED; } int32_t code = mnodeInitMsg(pMsg); if (code != TSDB_CODE_SUCCESS) { - mError("app:%p:%p, msg:%s not processed, reason:%s", pMsg->rpcMsg.ahandle, pMsg, taosMsg[pMsg->rpcMsg.msgType], + mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], tstrerror(code)); return code; } if (!pMsg->pUser->writeAuth) { - mError("app:%p:%p, msg:%s not processed, no write auth", pMsg->rpcMsg.ahandle, pMsg, + mError("msg:%p, app:%p type:%s not processed, no write auth", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); return TSDB_CODE_MND_NO_RIGHTS; } diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 86e16db8b1446308060945d3a7db2531287c62ec..9383ae48dc5c6151aea0d9e8a2641603b63da144 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -52,9 +52,9 @@ extern "C" { #include "osWindows.h" #endif +#include "osDef.h" #include "osAtomic.h" #include "osCommon.h" -#include "osDef.h" #include "osDir.h" #include "osFile.h" #include "osLz4.h" diff --git a/src/os/inc/osDarwin.h b/src/os/inc/osDarwin.h index c1a950fbe6375b14e3cf25277eb933b96e518f10..7bb844831e03dd1e21abd389a61976e14927ebbd 100644 --- a/src/os/inc/osDarwin.h +++ b/src/os/inc/osDarwin.h @@ -72,8 +72,6 @@ extern "C" { #include #define TAOS_OS_FUNC_FILE_SENDIFLE - #define taosFSendFile(outfile, infile, offset, count) taosFSendFileImp(outfile, infile, offset, size) - #define taosTSendFile(dfd, sfd, offset, size) taosTSendFileImp(dfd, sfd, offset, size) #define TAOS_OS_FUNC_SEMPHONE #define tsem_t dispatch_semaphore_t diff --git a/src/os/inc/osFile.h b/src/os/inc/osFile.h index dc19c8177c62307ca110486c130f6a6b56047ef4..62e44d8eb0b70fb1526693895847637daa72247a 100644 --- a/src/os/inc/osFile.h +++ b/src/os/inc/osFile.h @@ -20,46 +20,52 @@ extern "C" { #endif -ssize_t taosTReadImp(int fd, void *buf, size_t count); -ssize_t taosTWriteImp(int fd, void *buf, size_t count); +#define tread(fd, buf, count) read(fd, buf, count) +#define twrite(fd, buf, count) write(fd, buf, count) +#define tlseek(fd, offset, whence) lseek(fd, offset, whence) +#define tclose(fd) \ + { \ + if (FD_VALID(fd)) { \ + close(fd); \ + fd = FD_INITIALIZER; \ + } \ + } -ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size); -int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count); +int64_t taosReadImp(int32_t fd, void *buf, int64_t count); +int64_t taosWriteImp(int32_t fd, void *buf, int64_t count); +int64_t taosLSeekImp(int32_t fd, int64_t offset, int32_t whence); +int32_t taosRenameFile(char *fullPath, char *suffix, char delimiter, char **dstPath); -#ifndef TAOS_OS_FUNC_FILE_SENDIFLE - #define taosTSendFile(dfd, sfd, offset, size) taosTSendFileImp(dfd, sfd, offset, size) - #define taosFSendFile(outfile, infile, offset, count) taosTSendFileImp(fileno(outfile), fileno(infile), offset, size) -#endif +#define taosRead(fd, buf, count) taosReadImp(fd, buf, count) +#define taosWrite(fd, buf, count) taosWriteImp(fd, buf, count) +#define taosLSeek(fd, offset, whence) taosLSeekImp(fd, offset, whence) +#define taosClose(x) tclose(x) -#define taosTRead(fd, buf, count) taosTReadImp(fd, buf, count) -#define taosTWrite(fd, buf, count) taosTWriteImp(fd, buf, count) -#define taosLSeek(fd, offset, whence) lseek(fd, offset, whence) +// TAOS_OS_FUNC_FILE_SENDIFLE +int64_t taosSendFile(int32_t dfd, int32_t sfd, int64_t *offset, int64_t size); +int64_t taosFSendFile(FILE *outfile, FILE *infile, int64_t *offset, int64_t size); #ifdef TAOS_RANDOM_FILE_FAIL - void taosSetRandomFileFailFactor(int factor); + void taosSetRandomFileFailFactor(int32_t factor); void taosSetRandomFileFailOutput(const char *path); #ifdef TAOS_RANDOM_FILE_FAIL_TEST - ssize_t taosReadFileRandomFail(int fd, void *buf, size_t count, const char *file, uint32_t line); - ssize_t taosWriteFileRandomFail(int fd, void *buf, size_t count, const char *file, uint32_t line); - off_t taosLSeekRandomFail(int fd, off_t offset, int whence, const char *file, uint32_t line); - #undef taosTRead - #undef taosTWrite + int64_t taosReadFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line); + int64_t taosWriteFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line); + int64_t taosLSeekRandomFail(int32_t fd, int64_t offset, int32_t whence, const char *file, uint32_t line); + #undef taosRead + #undef taosWrite #undef taosLSeek - #define taosTRead(fd, buf, count) taosReadFileRandomFail(fd, buf, count, __FILE__, __LINE__) - #define taosTWrite(fd, buf, count) taosWriteFileRandomFail(fd, buf, count, __FILE__, __LINE__) + #define taosRead(fd, buf, count) taosReadFileRandomFail(fd, buf, count, __FILE__, __LINE__) + #define taosWrite(fd, buf, count) taosWriteFileRandomFail(fd, buf, count, __FILE__, __LINE__) #define taosLSeek(fd, offset, whence) taosLSeekRandomFail(fd, offset, whence, __FILE__, __LINE__) #endif #endif -int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstPath); - // TAOS_OS_FUNC_FILE_GETTMPFILEPATH void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath); -#ifndef TAOS_OS_FUNC_FILE_FTRUNCATE - #define taosFtruncate ftruncate -#endif - +// TAOS_OS_FUNC_FILE_FTRUNCATE +int32_t taosFtruncate(int32_t fd, int64_t length); #ifdef __cplusplus } #endif diff --git a/src/os/inc/osMemory.h b/src/os/inc/osMemory.h index 37d9dc9828ec1f51f36542e6c297ec3df8709f7e..0616006650eb2d53cdf7bd70b68c9f60748deac5 100644 --- a/src/os/inc/osMemory.h +++ b/src/os/inc/osMemory.h @@ -31,6 +31,7 @@ typedef enum { void taosSetAllocMode(int mode, const char *path, bool autoDump); void taosDumpMemoryLeak(); +// used in tsdb module void * taosTMalloc(size_t size); void * taosTCalloc(size_t nmemb, size_t size); void * taosTRealloc(void *ptr, size_t size); @@ -38,7 +39,14 @@ void taosTZfree(void *ptr); size_t taosTSizeof(void *ptr); void taosTMemset(void *ptr, int c); -#define taosTFree(x) \ +// used in other module +#define tmalloc(size) malloc(size) +#define tcalloc(num, size) calloc(num, size) +#define trealloc(ptr, size) realloc(ptr, size) +#define tstrdup(str) taosStrdupImp(str) +#define tstrndup(str, size) taosStrndupImp(str, size) +#define tgetline(lineptr, n, stream) taosGetlineImp(lineptr, n, stream) +#define tfree(x) \ do { \ if (x) { \ free((void *)(x)); \ @@ -46,37 +54,30 @@ void taosTMemset(void *ptr, int c); } \ } while (0); -#define taosMalloc(size) malloc(size) -#define taosCalloc(num, size) calloc(num, size) -#define taosRealloc(ptr, size) realloc(ptr, size) -#define taosFree(ptr) free(ptr) -#define taosStrdup(str) taosStrdupImp(str) -#define taosStrndup(str, size) taosStrndupImp(str, size) -#define taosGetline(lineptr, n, stream) taosGetlineImp(lineptr, n, stream) - #ifdef TAOS_MEM_CHECK #ifdef TAOS_MEM_CHECK_TEST - void * taos_malloc(size_t size, const char *file, uint32_t line); - void * taos_calloc(size_t num, size_t size, const char *file, uint32_t line); - void * taos_realloc(void *ptr, size_t size, const char *file, uint32_t line); - void taos_free(void *ptr, const char *file, uint32_t line); - char * taos_strdup(const char *str, const char *file, uint32_t line); - char * taos_strndup(const char *str, size_t size, const char *file, uint32_t line); - ssize_t taos_getline(char **lineptr, size_t *n, FILE *stream, const char *file, uint32_t line); - #undef taosMalloc - #undef taosCalloc - #undef taosRealloc - #undef taosFree - #undef taosStrdup - #undef taosStrndup - #undef taosGetline - #define taosMalloc(size) taos_malloc(size, __FILE__, __LINE__) - #define taosCalloc(num, size) taos_calloc(num, size, __FILE__, __LINE__) - #define taosRealloc(ptr, size) taos_realloc(ptr, size, __FILE__, __LINE__) - #define taosFree(ptr) taos_free(ptr, __FILE__, __LINE__) - //#define taosStrdup(str) taos_strdup(str, __FILE__, __LINE__) - //#define taosStrndup(str, size) taos_strndup(str, size, __FILE__, __LINE__) - //#define taosGetline(lineptr, n, stream) taos_getline(lineptr, n, stream, __FILE__, __LINE__) + void * taosMallocMem(size_t size, const char *file, uint32_t line); + void * taosCallocMem(size_t num, size_t size, const char *file, uint32_t line); + void * taosReallocMem(void *ptr, size_t size, const char *file, uint32_t line); + void taosFreeMem(void *ptr, const char *file, uint32_t line); + char * taosStrdupMem(const char *str, const char *file, uint32_t line); + char * taosStrndupMem(const char *str, size_t size, const char *file, uint32_t line); + ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char *file, uint32_t line); + #undef tmalloc + #undef tcalloc + #undef trealloc + #undef tfree + #define tmalloc(size) taosMallocMem(size, __FILE__, __LINE__) + #define tcalloc(num, size) taosCallocMem(num, size, __FILE__, __LINE__) + #define trealloc(ptr, size) taosReallocMem(ptr, size, __FILE__, __LINE__) + #define tfree(ptr) taosFreeMem(ptr, __FILE__, __LINE__) + + // #undef tstrdup + // #undef tstrndup + // #undef tgetline + // #define taosStrdup(str) taos_strdup(str, __FILE__, __LINE__) + // #define taosStrndup(str, size) taos_strndup(str, size, __FILE__, __LINE__) + // #define tgetline(lineptr, n, stream) taos_getline(lineptr, n, stream, __FILE__, __LINE__) #endif #endif diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index 0ab3ff0fcafa43a404533f145a0bb1595bb29061..baf7687dd03e1ee4f6dd92e3204244b3d31b7a1f 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -33,21 +33,19 @@ extern "C" { x = FD_INITIALIZER; \ } \ } - typedef int SOCKET; + typedef int32_t SOCKET; #endif #ifndef TAOS_OS_DEF_EPOLL - #define TAOS_EPOLL_WAIT_TIME -1 + #define TAOS_EPOLL_WAIT_TIME 500 #endif -#define taosClose(x) taosCloseSocket(x) - #ifdef TAOS_RANDOM_NETWORK_FAIL #ifdef TAOS_RANDOM_NETWORK_FAIL_TEST - ssize_t taosSendRandomFail(int sockfd, const void *buf, size_t len, int flags); - ssize_t taosSendToRandomFail(int sockfd, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen); - ssize_t taosReadSocketRandomFail(int fd, void *buf, size_t count); - ssize_t taosWriteSocketRandomFail(int fd, const void *buf, size_t count); + int64_t taosSendRandomFail(int32_t sockfd, const void *buf, size_t len, int32_t flags); + int64_t taosSendToRandomFail(int32_t sockfd, const void *buf, size_t len, int32_t flags, const struct sockaddr *dest_addr, socklen_t addrlen); + int64_t taosReadSocketRandomFail(int32_t fd, void *buf, size_t count); + int64_t taosWriteSocketRandomFail(int32_t fd, const void *buf, size_t count); #undef taosSend #undef taosSendto #undef taosReadSocket @@ -60,14 +58,14 @@ extern "C" { #endif // TAOS_OS_FUNC_SOCKET -int taosSetNonblocking(SOCKET sock, int on); -void taosBlockSIGPIPE(); +int32_t taosSetNonblocking(SOCKET sock, int32_t on); +void taosBlockSIGPIPE(); // TAOS_OS_FUNC_SOCKET_SETSOCKETOPT -int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int optlen); +int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen); // TAOS_OS_FUNC_SOCKET_INET -uint32_t taosInetAddr(char *ipAddr); +uint32_t taosInetAddr(char *ipAddr); const char *taosInetNtoa(struct in_addr ipInt); #ifdef __cplusplus diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h index 6b209219c6c736ed95c69659a78d14037987be00..b20ccadadb22a04733d97bd19b919660ee677d0d 100644 --- a/src/os/inc/osTime.h +++ b/src/os/inc/osTime.h @@ -38,14 +38,14 @@ int32_t taosGetTimestampSec(); static FORCE_INLINE int64_t taosGetTimestampMs() { struct timeval systemTime; gettimeofday(&systemTime, NULL); - return (int64_t)systemTime.tv_sec * 1000L + (uint64_t)systemTime.tv_usec / 1000; + return (int64_t)systemTime.tv_sec * 1000L + (int64_t)systemTime.tv_usec / 1000; } //@return timestamp in microsecond static FORCE_INLINE int64_t taosGetTimestampUs() { struct timeval systemTime; gettimeofday(&systemTime, NULL); - return (int64_t)systemTime.tv_sec * 1000000L + (uint64_t)systemTime.tv_usec; + return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec; } /* diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index dc1da35037dc06c258856a1a143146492eeed0e5..5003e48c44a75011a0006a923a6e53d3d7639195 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -43,6 +43,7 @@ #include "msvcProcess.h" #include "msvcDirect.h" #include "msvcFcntl.h" +#include "msvcLibgen.h" #include "msvcStdio.h" #include "sys/msvcStat.h" #include "sys/msvcTypes.h" @@ -62,11 +63,8 @@ extern "C" { #define TAOS_OS_FUNC_FILE_ISDIR #define TAOS_OS_FUNC_FILE_ISLNK #define TAOS_OS_FUNC_FILE_SENDIFLE - #define taosFSendFile(outfile, infile, offset, count) taosFSendFileImp(outfile, infile, offset, size) - #define taosTSendFile(dfd, sfd, offset, size) taosTSendFileImp(dfd, sfd, offset, size) #define TAOS_OS_FUNC_FILE_GETTMPFILEPATH -#define TAOS_OS_FUNC_FILE_FTRUNCATE - extern int taosFtruncate(int fd, int64_t length); +#define TAOS_OS_FUNC_FILE_FTRUNCATE #define TAOS_OS_FUNC_MATH #define SWAP(a, b, c) \ @@ -139,7 +137,6 @@ typedef int (*__compar_fn_t)(const void *, const void *); #define in_addr_t unsigned long #define socklen_t int #define htobe64 htonll -#define twrite write #define getpid _getpid struct tm *localtime_r(const time_t *timep, struct tm *result); diff --git a/src/os/src/darwin/darwinFile.c b/src/os/src/darwin/darwinFile.c index 66bdb5b939d77ab03b30f024466fe3e3ff4e9388..dacf4db74137b8cc58db2b390ba971cf9dfbc012 100644 --- a/src/os/src/darwin/darwinFile.c +++ b/src/os/src/darwin/darwinFile.c @@ -19,21 +19,19 @@ #define _SEND_FILE_STEP_ 1000 -int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count) { +int64_t taosFSendFile(FILE *out_file, FILE *in_file, int64_t *offset, int64_t count) { fseek(in_file, (int32_t)(*offset), 0); - int writeLen = 0; - uint8_t buffer[_SEND_FILE_STEP_] = { 0 }; + int writeLen = 0; + uint8_t buffer[_SEND_FILE_STEP_] = {0}; for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); if (rlen <= 0) { return writeLen; - } - else if (rlen < _SEND_FILE_STEP_) { + } else if (rlen < _SEND_FILE_STEP_) { fwrite(buffer, 1, rlen, out_file); return (int)(writeLen + rlen); - } - else { + } else { fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); writeLen += _SEND_FILE_STEP_; } @@ -44,8 +42,7 @@ int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t cou size_t rlen = fread(buffer, 1, remain, in_file); if (rlen <= 0) { return writeLen; - } - else { + } else { fwrite(buffer, 1, remain, out_file); writeLen += remain; } @@ -54,7 +51,7 @@ int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t cou return writeLen; } -ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size) { - uError("not implemented yet"); +int64_t taosSendFile(int32_t dfd, int32_t sfd, int64_t* offset, int64_t size) { + uError("taosSendFile not implemented yet"); return -1; } \ No newline at end of file diff --git a/src/os/src/detail/osFail.c b/src/os/src/detail/osFail.c index e0eb200851f976e1c9dad47afbc152bfb021e4e5..a99bcd01dbccd0290de1fc38a1220b573ab74b22 100644 --- a/src/os/src/detail/osFail.c +++ b/src/os/src/detail/osFail.c @@ -20,7 +20,7 @@ #ifdef TAOS_RANDOM_NETWORK_FAIL -ssize_t taosSendRandomFail(int sockfd, const void *buf, size_t len, int flags) { +int64_t taosSendRandomFail(int32_t sockfd, const void *buf, size_t len, int32_t flags) { if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) { errno = ECONNRESET; return -1; @@ -29,8 +29,8 @@ ssize_t taosSendRandomFail(int sockfd, const void *buf, size_t len, int flags) { return send(sockfd, buf, len, flags); } -ssize_t taosSendToRandomFail(int sockfd, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, - socklen_t addrlen) { +int64_t taosSendToRandomFail(int32_t sockfd, const void *buf, size_t len, int32_t flags, + const struct sockaddr *dest_addr, socklen_t addrlen) { if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) { errno = ECONNRESET; return -1; @@ -39,7 +39,7 @@ ssize_t taosSendToRandomFail(int sockfd, const void *buf, size_t len, int flags, return sendto(sockfd, buf, len, flags, dest_addr, addrlen); } -ssize_t taosReadSocketRandomFail(int fd, void *buf, size_t count) { +int64_t taosReadSocketRandomFail(int32_t fd, void *buf, size_t count) { if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) { errno = ECONNRESET; return -1; @@ -48,7 +48,7 @@ ssize_t taosReadSocketRandomFail(int fd, void *buf, size_t count) { return read(fd, buf, count); } -ssize_t taosWriteSocketRandomFail(int fd, const void *buf, size_t count) { +int64_t taosWriteSocketRandomFail(int32_t fd, const void *buf, size_t count) { if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) { errno = EINTR; return -1; @@ -61,10 +61,10 @@ ssize_t taosWriteSocketRandomFail(int fd, const void *buf, size_t count) { #ifdef TAOS_RANDOM_FILE_FAIL -static int random_file_fail_factor = 20; +static int32_t random_file_fail_factor = 20; static FILE *fpRandomFileFailOutput = NULL; -void taosSetRandomFileFailFactor(int factor) { +void taosSetRandomFileFailFactor(int32_t factor) { random_file_fail_factor = factor; } @@ -77,7 +77,7 @@ static void close_random_file_fail_output() { } } -static void random_file_fail_output_sig(int sig) { +static void random_file_fail_output_sig(int32_t sig) { fprintf(fpRandomFileFailOutput, "signal %d received.\n", sig); struct sigaction act = {0}; @@ -105,7 +105,7 @@ void taosSetRandomFileFailOutput(const char *path) { sigaction(SIGILL, &act, NULL); } -ssize_t taosReadFileRandomFail(int fd, void *buf, size_t count, const char *file, uint32_t line) { +int64_t taosReadFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line) { if (random_file_fail_factor > 0) { if (rand() % random_file_fail_factor == 0) { errno = EIO; @@ -113,10 +113,10 @@ ssize_t taosReadFileRandomFail(int fd, void *buf, size_t count, const char *file } } - return taosTReadImp(fd, buf, count); + return taosReadImp(fd, buf, count); } -ssize_t taosWriteFileRandomFail(int fd, void *buf, size_t count, const char *file, uint32_t line) { +int64_t taosWriteFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line) { if (random_file_fail_factor > 0) { if (rand() % random_file_fail_factor == 0) { errno = EIO; @@ -124,10 +124,10 @@ ssize_t taosWriteFileRandomFail(int fd, void *buf, size_t count, const char *fil } } - return taosTWriteImp(fd, buf, count); + return taosWriteImp(fd, buf, count); } -off_t taosLSeekRandomFail(int fd, off_t offset, int whence, const char *file, uint32_t line) { +int64_t taosLSeekRandomFail(int32_t fd, int64_t offset, int32_t whence, const char *file, uint32_t line) { if (random_file_fail_factor > 0) { if (rand() % random_file_fail_factor == 0) { errno = EIO; @@ -135,7 +135,7 @@ off_t taosLSeekRandomFail(int fd, off_t offset, int whence, const char *file, ui } } - return lseek(fd, offset, whence); + return taosLSeekImp(fd, offset, whence); } #endif //TAOS_RANDOM_FILE_FAIL diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index 8f055dd8129f340f267e64cdd905505a7b675a2d..23fc88b8e1f5d4c0e2fe6b2140278b1e32d35abb 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -15,16 +15,22 @@ #define _DEFAULT_SOURCE #include "os.h" +#include "tglobal.h" #ifndef TAOS_OS_FUNC_FILE_GETTMPFILEPATH + void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { const char *tdengineTmpFileNamePrefix = "tdengine-"; char tmpPath[PATH_MAX]; - char *tmpDir = "/tmp/"; + int32_t len = strlen(tsTempDir); + memcpy(tmpPath, tsTempDir, len); + + if (tmpPath[len - 1] != '/') { + tmpPath[len++] = '/'; + } - strcpy(tmpPath, tmpDir); - strcat(tmpPath, tdengineTmpFileNamePrefix); + strcpy(tmpPath + len, tdengineTmpFileNamePrefix); if (strlen(tmpPath) + strlen(fileNamePrefix) + strlen("-%d-%s") < PATH_MAX) { strcat(tmpPath, fileNamePrefix); strcat(tmpPath, "-%d-%s"); @@ -34,10 +40,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { taosRandStr(rand, tListLen(rand) - 1); snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); } + #endif -// rename file name -int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstPath) { +int32_t taosRenameFile(char *fullPath, char *suffix, char delimiter, char **dstPath) { int32_t ts = taosGetTimestampSec(); char fname[PATH_MAX] = {0}; // max file name length must be less than 255 @@ -46,12 +52,13 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP if (delimiterPos == NULL) return -1; int32_t fileNameLen = 0; - if (suffix) + if (suffix) { fileNameLen = snprintf(fname, PATH_MAX, "%s.%d.%s", delimiterPos + 1, ts, suffix); - else + } else { fileNameLen = snprintf(fname, PATH_MAX, "%s.%d", delimiterPos + 1, ts); + } - size_t len = (size_t)((delimiterPos - fullPath) + fileNameLen + 1); + int32_t len = (int32_t)((delimiterPos - fullPath) + fileNameLen + 1); if (*dstPath == NULL) { *dstPath = calloc(1, len + 1); if (*dstPath == NULL) return -1; @@ -64,9 +71,9 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP return rename(fullPath, *dstPath); } -ssize_t taosTReadImp(int fd, void *buf, size_t count) { - size_t leftbytes = count; - ssize_t readbytes; +int64_t taosReadImp(int32_t fd, void *buf, int64_t count) { + int64_t leftbytes = count; + int64_t readbytes; char * tbuf = (char *)buf; while (leftbytes > 0) { @@ -78,19 +85,19 @@ ssize_t taosTReadImp(int fd, void *buf, size_t count) { return -1; } } else if (readbytes == 0) { - return (ssize_t)(count - leftbytes); + return (int64_t)(count - leftbytes); } leftbytes -= readbytes; tbuf += readbytes; } - return (ssize_t)count; + return count; } -ssize_t taosTWriteImp(int fd, void *buf, size_t n) { - size_t nleft = n; - ssize_t nwritten = 0; +int64_t taosWriteImp(int32_t fd, void *buf, int64_t n) { + int64_t nleft = n; + int64_t nwritten = 0; char * tbuf = (char *)buf; while (nleft > 0) { @@ -105,13 +112,18 @@ ssize_t taosTWriteImp(int fd, void *buf, size_t n) { tbuf += nwritten; } - return (ssize_t)n; + return n; +} + +int64_t taosLSeekImp(int32_t fd, int64_t offset, int32_t whence) { + return (int64_t)tlseek(fd, (long)offset, whence); } #ifndef TAOS_OS_FUNC_FILE_SENDIFLE -ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size) { - size_t leftbytes = size; - ssize_t sentbytes; + +int64_t taosSendFile(int32_t dfd, int32_t sfd, int64_t *offset, int64_t size) { + int64_t leftbytes = size; + int64_t sentbytes; while (leftbytes > 0) { /* @@ -120,13 +132,13 @@ ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size) { // if (leftbytes > 1000000000) leftbytes = 1000000000; sentbytes = sendfile(dfd, sfd, offset, leftbytes); if (sentbytes == -1) { - if (errno == EINTR) { + if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { continue; } else { return -1; } } else if (sentbytes == 0) { - return (ssize_t)(size - leftbytes); + return (int64_t)(size - leftbytes); } leftbytes -= sentbytes; @@ -134,4 +146,17 @@ ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size) { return size; } + +int64_t taosFSendFile(FILE *outfile, FILE *infile, int64_t *offset, int64_t size) { + return taosSendFile(fileno(outfile), fileno(infile), offset, size); +} + +#endif + +#ifndef TAOS_OS_FUNC_FILE_FTRUNCATE + +int32_t taosFtruncate(int32_t fd, int64_t length) { + return ftruncate(fd, length); +} + #endif \ No newline at end of file diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index dfd320be89a75a4700a4918f68d56ee66faa3135..53310d179c0090382e009de949e5158146dc282a 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -28,7 +28,7 @@ static FILE* fpAllocLog = NULL; extern int32_t taosGetTimestampSec(); static int32_t startTime = INT32_MAX; -static bool random_alloc_fail(size_t size, const char* file, uint32_t line) { +static bool taosRandomAllocFail(size_t size, const char* file, uint32_t line) { if (taosGetTimestampSec() < startTime) { return false; } @@ -48,33 +48,33 @@ static bool random_alloc_fail(size_t size, const char* file, uint32_t line) { return true; } -static void* malloc_random(size_t size, const char* file, uint32_t line) { - return random_alloc_fail(size, file, line) ? NULL : malloc(size); +static void* taosRandmoMalloc(size_t size, const char* file, uint32_t line) { + return taosRandomAllocFail(size, file, line) ? NULL : malloc(size); } -static void* calloc_random(size_t num, size_t size, const char* file, uint32_t line) { - return random_alloc_fail(num * size, file, line) ? NULL : calloc(num, size); +static void* taosRandomCalloc(size_t num, size_t size, const char* file, uint32_t line) { + return taosRandomAllocFail(num * size, file, line) ? NULL : calloc(num, size); } -static void* realloc_random(void* ptr, size_t size, const char* file, uint32_t line) { - return random_alloc_fail(size, file, line) ? NULL : realloc(ptr, size); +static void* taosRandomRealloc(void* ptr, size_t size, const char* file, uint32_t line) { + return taosRandomAllocFail(size, file, line) ? NULL : realloc(ptr, size); } -static char* strdup_random(const char* str, const char* file, uint32_t line) { +static char* taosRandomStrdup(const char* str, const char* file, uint32_t line) { size_t len = strlen(str); - return random_alloc_fail(len + 1, file, line) ? NULL : taosStrdupImp(str); + return taosRandomAllocFail(len + 1, file, line) ? NULL : taosStrdupImp(str); } -static char* strndup_random(const char* str, size_t size, const char* file, uint32_t line) { +static char* taosRandomStrndup(const char* str, size_t size, const char* file, uint32_t line) { size_t len = strlen(str); if (len > size) { len = size; } - return random_alloc_fail(len + 1, file, line) ? NULL : taosStrndupImp(str, len); + return taosRandomAllocFail(len + 1, file, line) ? NULL : taosStrndupImp(str, len); } -static ssize_t getline_random(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { - return random_alloc_fail(*n, file, line) ? -1 : taosGetlineImp(lineptr, n, stream); +static ssize_t taosRandomGetline(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { + return taosRandomAllocFail(*n, file, line) ? -1 : taosGetlineImp(lineptr, n, stream); } //////////////////////////////////////////////////////////////////////////////// @@ -96,7 +96,7 @@ typedef struct SMemBlock { static SMemBlock *blocks = NULL; static uintptr_t lock = 0; -static void add_mem_block(SMemBlock* blk) { +static void taosAddMemBlock(SMemBlock* blk) { blk->prev = NULL; while (atomic_val_compare_exchange_ptr(&lock, 0, 1) != 0); blk->next = blocks; @@ -107,7 +107,7 @@ static void add_mem_block(SMemBlock* blk) { atomic_store_ptr(&lock, 0); } -static void remove_mem_block(SMemBlock* blk) { +static void taosRemoveMemBlock(SMemBlock* blk) { while (atomic_val_compare_exchange_ptr(&lock, 0, 1) != 0); if (blocks == blk) { @@ -126,7 +126,7 @@ static void remove_mem_block(SMemBlock* blk) { blk->next = NULL; } -static void free_detect_leak(void* ptr, const char* file, uint32_t line) { +static void taosFreeDetectLeak(void* ptr, const char* file, uint32_t line) { if (ptr == NULL) { return; } @@ -140,11 +140,11 @@ static void free_detect_leak(void* ptr, const char* file, uint32_t line) { return; } - remove_mem_block(blk); + taosRemoveMemBlock(blk); free(blk); } -static void* malloc_detect_leak(size_t size, const char* file, uint32_t line) { +static void* taosMallocDetectLeak(size_t size, const char* file, uint32_t line) { if (size == 0) { return NULL; } @@ -166,28 +166,28 @@ static void* malloc_detect_leak(size_t size, const char* file, uint32_t line) { blk->line = (uint16_t)line; blk->magic = MEMBLK_MAGIC; blk->size = size; - add_mem_block(blk); + taosAddMemBlock(blk); return blk->data; } -static void* calloc_detect_leak(size_t num, size_t size, const char* file, uint32_t line) { +static void* taosCallocDetectLeak(size_t num, size_t size, const char* file, uint32_t line) { size *= num; - void* p = malloc_detect_leak(size, file, line); + void* p = taosMallocDetectLeak(size, file, line); if (p != NULL) { memset(p, 0, size); } return p; } -static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint32_t line) { +static void* taosReallocDetectLeak(void* ptr, size_t size, const char* file, uint32_t line) { if (size == 0) { - free_detect_leak(ptr, file, line); + taosFreeDetectLeak(ptr, file, line); return NULL; } if (ptr == NULL) { - return malloc_detect_leak(size, file, line); + return taosMallocDetectLeak(size, file, line); } SMemBlock* blk = (SMemBlock *)((char*)ptr) - sizeof(SMemBlock); @@ -198,11 +198,11 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3 return realloc(ptr, size); } - remove_mem_block(blk); + taosRemoveMemBlock(blk); void* p = realloc(blk, size + sizeof(SMemBlock)); if (p == NULL) { - add_mem_block(blk); + taosAddMemBlock(blk); return NULL; } @@ -212,13 +212,13 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3 blk = (SMemBlock*)p; blk->size = size; - add_mem_block(blk); + taosAddMemBlock(blk); return blk->data; } -static char* strdup_detect_leak(const char* str, const char* file, uint32_t line) { +static char* taosStrdupDetectLeak(const char* str, const char* file, uint32_t line) { size_t len = strlen(str); - char *p = malloc_detect_leak(len + 1, file, line); + char *p = taosMallocDetectLeak(len + 1, file, line); if (p != NULL) { memcpy(p, str, len); p[len] = 0; @@ -226,12 +226,12 @@ static char* strdup_detect_leak(const char* str, const char* file, uint32_t line return p; } -static char* strndup_detect_leak(const char* str, size_t size, const char* file, uint32_t line) { +static char* taosStrndupDetectLeak(const char* str, size_t size, const char* file, uint32_t line) { size_t len = strlen(str); if (len > size) { len = size; } - char *p = malloc_detect_leak(len + 1, file, line); + char *p = taosMallocDetectLeak(len + 1, file, line); if (p != NULL) { memcpy(p, str, len); p[len] = 0; @@ -239,13 +239,13 @@ static char* strndup_detect_leak(const char* str, size_t size, const char* file, return p; } -static ssize_t getline_detect_leak(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { +static ssize_t taosGetlineDetectLeak(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { char* buf = NULL; size_t bufSize = 0; ssize_t size = taosGetlineImp(&buf, &bufSize, stream); if (size != -1) { if (*n < size + 1) { - void* p = realloc_detect_leak(*lineptr, size + 1, file, line); + void* p = taosReallocDetectLeak(*lineptr, size + 1, file, line); if (p == NULL) { free(buf); return -1; @@ -260,7 +260,7 @@ static ssize_t getline_detect_leak(char **lineptr, size_t *n, FILE *stream, cons return size; } -static void dump_memory_leak() { +static void taosDumpMemoryLeakImp() { const char* hex = "0123456789ABCDEF"; const char* fmt = ":%d: addr=%p, size=%d, content(first 16 bytes)="; size_t numOfBlk = 0, totalSize = 0; @@ -299,7 +299,7 @@ static void dump_memory_leak() { fflush(fpAllocLog); } -static void dump_memory_leak_on_sig(int sig) { +static void taosDumpMemoryLeakOnSig(int sig) { fprintf(fpAllocLog, "signal %d received.\n", sig); // restore default signal handler @@ -307,55 +307,55 @@ static void dump_memory_leak_on_sig(int sig) { act.sa_handler = SIG_DFL; sigaction(sig, &act, NULL); - dump_memory_leak(); + taosDumpMemoryLeakImp(); } //////////////////////////////////////////////////////////////////////////////// // interface functions -void* taos_malloc(size_t size, const char* file, uint32_t line) { +void* taosMallocMem(size_t size, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return malloc(size); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return malloc_random(size, file, line); + return taosRandmoMalloc(size, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return malloc_detect_leak(size, file, line); + return taosMallocDetectLeak(size, file, line); } return malloc(size); } -void* taos_calloc(size_t num, size_t size, const char* file, uint32_t line) { +void* taosCallocMem(size_t num, size_t size, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return calloc(num, size); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return calloc_random(num, size, file, line); + return taosRandomCalloc(num, size, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return calloc_detect_leak(num, size, file, line); + return taosCallocDetectLeak(num, size, file, line); } return calloc(num, size); } -void* taos_realloc(void* ptr, size_t size, const char* file, uint32_t line) { +void* taosReallocMem(void* ptr, size_t size, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return realloc(ptr, size); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return realloc_random(ptr, size, file, line); + return taosRandomRealloc(ptr, size, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return realloc_detect_leak(ptr, size, file, line); + return taosReallocDetectLeak(ptr, size, file, line); } return realloc(ptr, size); } -void taos_free(void* ptr, const char* file, uint32_t line) { +void taosFreeMem(void* ptr, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return free(ptr); @@ -364,54 +364,54 @@ void taos_free(void* ptr, const char* file, uint32_t line) { return free(ptr); case TAOS_ALLOC_MODE_DETECT_LEAK: - return free_detect_leak(ptr, file, line); + return taosFreeDetectLeak(ptr, file, line); } return free(ptr); } -char* taos_strdup(const char* str, const char* file, uint32_t line) { +char* taosStrdupMem(const char* str, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return taosStrdupImp(str); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return strdup_random(str, file, line); + return taosRandomStrdup(str, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return strdup_detect_leak(str, file, line); + return taosStrdupDetectLeak(str, file, line); } return taosStrdupImp(str); } -char* taos_strndup(const char* str, size_t size, const char* file, uint32_t line) { +char* taosStrndupMem(const char* str, size_t size, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return taosStrndupImp(str, size); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return strndup_random(str, size, file, line); + return taosRandomStrndup(str, size, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return strndup_detect_leak(str, size, file, line); + return taosStrndupDetectLeak(str, size, file, line); } return taosStrndupImp(str, size); } -ssize_t taos_getline(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { +ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) { switch (allocMode) { case TAOS_ALLOC_MODE_DEFAULT: return taosGetlineImp(lineptr, n, stream); case TAOS_ALLOC_MODE_RANDOM_FAIL: - return getline_random(lineptr, n, stream, file, line); + return taosRandomGetline(lineptr, n, stream, file, line); case TAOS_ALLOC_MODE_DETECT_LEAK: - return getline_detect_leak(lineptr, n, stream, file, line); + return taosGetlineDetectLeak(lineptr, n, stream, file, line); } return taosGetlineImp(lineptr, n, stream); } -static void close_alloc_log() { +static void taosCloseAllocLog() { if (fpAllocLog != NULL) { if (fpAllocLog != stdout) { fclose(fpAllocLog); @@ -432,7 +432,7 @@ void taosSetAllocMode(int mode, const char* path, bool autoDump) { if (path == NULL || path[0] == 0) { fpAllocLog = stdout; } else if ((fpAllocLog = fopen(path, "w")) != NULL) { - atexit(close_alloc_log); + atexit(taosCloseAllocLog); } else { printf("failed to open memory allocation log file '%s', errno=%d\n", path, errno); return; @@ -446,10 +446,10 @@ void taosSetAllocMode(int mode, const char* path, bool autoDump) { } if (autoDump && mode == TAOS_ALLOC_MODE_DETECT_LEAK) { - atexit(dump_memory_leak); + atexit(taosDumpMemoryLeakImp); struct sigaction act = {0}; - act.sa_handler = dump_memory_leak_on_sig; + act.sa_handler = taosDumpMemoryLeakOnSig; sigaction(SIGFPE, &act, NULL); sigaction(SIGSEGV, &act, NULL); sigaction(SIGILL, &act, NULL); @@ -457,8 +457,8 @@ void taosSetAllocMode(int mode, const char* path, bool autoDump) { } void taosDumpMemoryLeak() { - dump_memory_leak(); - close_alloc_log(); + taosDumpMemoryLeakImp(); + taosCloseAllocLog(); } #else // 'TAOS_MEM_CHECK' not defined diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index 8a51c389e9f2efdcf3eaad97d434269ff3c0061f..c7c9d774271555ae5989aa18b7e00d325a0eddde 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -19,8 +19,8 @@ #ifndef TAOS_OS_FUNC_SOCKET -int taosSetNonblocking(SOCKET sock, int on) { - int flags = 0; +int32_t taosSetNonblocking(SOCKET sock, int32_t on) { + int32_t flags = 0; if ((flags = fcntl(sock, F_GETFL, 0)) < 0) { uError("fcntl(F_GETFL) error: %d (%s)\n", errno, strerror(errno)); return 1; @@ -43,7 +43,7 @@ void taosBlockSIGPIPE() { sigset_t signal_mask; sigemptyset(&signal_mask); sigaddset(&signal_mask, SIGPIPE); - int rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); + int32_t rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); if (rc != 0) { uError("failed to block SIGPIPE"); } @@ -53,7 +53,7 @@ void taosBlockSIGPIPE() { #ifndef TAOS_OS_FUNC_SOCKET_SETSOCKETOPT -int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int optlen) { +int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen) { return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index 8df671f9c8974ecf77e5bf0943950772e3f9d192..b0ca6139ed9461b12ab2845d0060b51dc388bcab 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -61,7 +61,7 @@ bool taosGetProcMemory(float *memoryUsedMB) { size_t len; char * line = NULL; while (!feof(fp)) { - taosTFree(line); + tfree(line); len = 0; getline(&line, &len, fp); if (line == NULL) { @@ -83,7 +83,7 @@ bool taosGetProcMemory(float *memoryUsedMB) { sscanf(line, "%s %" PRId64, tmp, &memKB); *memoryUsedMB = (float)((double)memKB / 1024); - taosTFree(line); + tfree(line); fclose(fp); return true; } @@ -107,7 +107,7 @@ static bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { char cpu[10] = {0}; sscanf(line, "%s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); - taosTFree(line); + tfree(line); fclose(fp); return true; } @@ -136,7 +136,7 @@ static bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { } } - taosTFree(line); + tfree(line); fclose(fp); return true; } @@ -377,7 +377,7 @@ static bool taosGetCardInfo(int64_t *bytes) { *bytes += (rbytes + tbytes); } - taosTFree(line); + tfree(line); fclose(fp); return true; @@ -432,7 +432,7 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { int readIndex = 0; while (!feof(fp)) { - taosTFree(line); + tfree(line); len = 0; getline(&line, &len, fp); if (line == NULL) { @@ -450,7 +450,7 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { if (readIndex >= 2) break; } - taosTFree(line); + tfree(line); fclose(fp); if (readIndex < 2) { diff --git a/src/os/src/detail/osTimer.c b/src/os/src/detail/osTimer.c index 22f7b94c3aea55432b3e59c4ad8664cb3ef6020e..9883a03a0933075616b69800fe6e34a36fc6c746 100644 --- a/src/os/src/detail/osTimer.c +++ b/src/os/src/detail/osTimer.c @@ -111,6 +111,9 @@ void taosUninitTimer() { pthread_sigmask(SIG_BLOCK, &set, NULL); */ void taosMsleep(int mseconds) { +#if 1 + usleep(mseconds * 1000); +#else struct timeval timeout; int seconds, useconds; @@ -126,7 +129,8 @@ void taosMsleep(int mseconds) { select(0, NULL, NULL, NULL, &timeout); - /* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */ +/* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */ +#endif } #endif \ No newline at end of file diff --git a/src/os/src/windows/wAtomic.c b/src/os/src/windows/wAtomic.c index a025cb8f0e67ff65ceb0cfadd420fd10cb99891b..b645893030b852428bff13744050b1f6f8ef1c42 100644 --- a/src/os/src/windows/wAtomic.c +++ b/src/os/src/windows/wAtomic.c @@ -44,7 +44,7 @@ long interlocked_add_fetch_32(long volatile* ptr, long val) { __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { //#ifdef _WIN64 - return _InterlockedExchangeAdd64(ptr, val) + val; + return InterlockedExchangeAdd64(ptr, val) + val; //#else // return _InterlockedExchangeAdd(ptr, val) + val; //#endif diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index 8110a194904bbbc1166ac57de187de284db34a6b..19351eb7c964a4c2a8a4d1d5d4d1c8ec669908dc 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -46,5 +46,16 @@ void osInit() { strcpy(tsDnodeDir, ""); strcpy(tsMnodeDir, ""); strcpy(tsOsName, "Windows"); + + const char *tmpDir = getenv("tmp"); + if (tmpDir != NULL) { + tmpDir = getenv("temp"); + } + if (tmpDir != NULL) { + strcpy(tsTempDir, tmpDir); + } else { + strcpy(tsTempDir, "C:\\Windows\\Temp"); + } + taosWinSocketInit(); } diff --git a/src/os/src/windows/wFile.c b/src/os/src/windows/wFile.c index 5549c078a5220d3e493ce21e2c19e38fb9611cbb..2204135ae657eb813144dc2bd1ae637d9510842e 100644 --- a/src/os/src/windows/wFile.c +++ b/src/os/src/windows/wFile.c @@ -16,17 +16,20 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tulog.h" +#include "tglobal.h" void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { const char* tdengineTmpFileNamePrefix = "tdengine-"; - char tmpPath[PATH_MAX]; + char tmpPath[PATH_MAX]; - char *tmpDir = getenv("tmp"); - if (tmpDir == NULL) { - tmpDir = ""; + int32_t len = (int32_t)strlen(tsTempDir); + memcpy(tmpPath, tsTempDir, len); + + if (tmpPath[len - 1] != '/' && tmpPath[len - 1] != '\\') { + tmpPath[len++] = '\\'; } - - strcpy(tmpPath, tmpDir); + + strcpy(tmpPath + len, tdengineTmpFileNamePrefix); strcat(tmpPath, tdengineTmpFileNamePrefix); if (strlen(tmpPath) + strlen(fileNamePrefix) + strlen("-%d-%s") < PATH_MAX) { strcat(tmpPath, fileNamePrefix); @@ -40,19 +43,19 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { #define _SEND_FILE_STEP_ 1000 -int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count) { +int64_t taosFSendFile(FILE *out_file, FILE *in_file, int64_t *offset, int64_t count) { fseek(in_file, (int32_t)(*offset), 0); - int writeLen = 0; + int64_t writeLen = 0; uint8_t buffer[_SEND_FILE_STEP_] = { 0 }; - for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { + for (int64_t len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); if (rlen <= 0) { return writeLen; } else if (rlen < _SEND_FILE_STEP_) { fwrite(buffer, 1, rlen, out_file); - return (int)(writeLen + rlen); + return (int64_t)(writeLen + rlen); } else { fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); @@ -60,14 +63,14 @@ int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t cou } } - int remain = count - writeLen; + int64_t remain = count - writeLen; if (remain > 0) { - size_t rlen = fread(buffer, 1, remain, in_file); + size_t rlen = fread(buffer, 1, (size_t) remain, in_file); if (rlen <= 0) { return writeLen; } else { - fwrite(buffer, 1, remain, out_file); + fwrite(buffer, 1, (size_t) remain, out_file); writeLen += remain; } } @@ -75,12 +78,12 @@ int taosFSendFileImp(FILE* out_file, FILE* in_file, int64_t* offset, int32_t cou return writeLen; } -ssize_t taosTSendFileImp(int dfd, int sfd, off_t *offset, size_t size) { - uError("taosTSendFileImp no implemented yet"); +int64_t taosSendFile(int32_t dfd, int32_t sfd, int64_t* offset, int64_t size) { + uError("taosSendFile no implemented yet"); return 0; } -int taosFtruncate(int fd, int64_t length) { +int32_t taosFtruncate(int32_t fd, int64_t length) { uError("taosFtruncate no implemented yet"); return 0; } \ No newline at end of file diff --git a/src/os/src/windows/wSocket.c b/src/os/src/windows/wSocket.c index da9242d6a3bff471c4e749f0faf7c95b6c3a9d8c..3b091b269931e644d9f8c2c01ba3c9cb9ddc520c 100644 --- a/src/os/src/windows/wSocket.c +++ b/src/os/src/windows/wSocket.c @@ -34,7 +34,7 @@ void taosWinSocketInit() { } } -int taosSetNonblocking(SOCKET sock, int on) { +int32_t taosSetNonblocking(SOCKET sock, int32_t on) { u_long mode; if (on) { mode = 1; @@ -48,7 +48,7 @@ int taosSetNonblocking(SOCKET sock, int on) { void taosBlockSIGPIPE() {} -int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int optlen) { +int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen) { if (level == SOL_SOCKET && optname == TCP_KEEPCNT) { return 0; } @@ -72,7 +72,7 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op uint32_t taosInetAddr(char *ipAddr) { uint32_t value; - int ret = inet_pton(AF_INET, ipAddr, &value); + int32_t ret = inet_pton(AF_INET, ipAddr, &value); if (ret <= 0) { return INADDR_NONE; } else { diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 61adc3ee14ed4a815117d306559970f67a961ea8..1bfee25c4a6a1851bdfbc16e795185412f007df2 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -31,7 +31,10 @@ #pragma comment(lib, "Mswsock.lib ") #endif +#pragma warning(push) +#pragma warning(disable:4091) #include +#pragma warning(pop) static void taosGetSystemTimezone() { // get and set default timezone diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index affc0e838ea97f25dcb2e53947ecbb7b025dffb1..ebdfabf3101001d020e24bf63c3d699492ff21d0 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -118,7 +118,7 @@ typedef struct { typedef struct { char *module; - bool (*decodeFp)(struct HttpContext *pContext); + bool (*fpDecode)(struct HttpContext *pContext); } HttpDecodeMethod; typedef struct { diff --git a/src/plugins/http/inc/httpQueue.h b/src/plugins/http/inc/httpQueue.h index a4590719ff24d48eee875b2f2c4ff2f28a0a31f6..1ffbd5148164ab4d1f74bdac781c8fcf731ad772 100644 --- a/src/plugins/http/inc/httpQueue.h +++ b/src/plugins/http/inc/httpQueue.h @@ -22,9 +22,11 @@ extern "C" { #include +typedef void (*FHttpResultFp)(void *param, void *result, int32_t code, int32_t rows); + bool httpInitResultQueue(); void httpCleanupResultQueue(); -void httpDispatchToResultQueue(); +void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t code, int32_t rows, FHttpResultFp fp); #ifdef __cplusplus } diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index ec60b984b2b0fcc72367295cef9d956165e678d5..22f464924e2c21cdeccb0c152457fad11a10294a 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -63,7 +63,7 @@ static void httpDestroyContext(void *data) { pContext->parser = NULL; } - taosTFree(pContext); + tfree(pContext); } bool httpInitContexts() { diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index b50217cfc4bffe0835de8a744a78997426db6d3b..7c565075147c03cc2d65055c5d4d4773424ab927 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -21,11 +21,11 @@ #include "httpHandle.h" bool httpDecodeRequest(HttpContext* pContext) { - if (pContext->decodeMethod->decodeFp == NULL) { + if (pContext->decodeMethod->fpDecode == NULL) { return false; } - return (*pContext->decodeMethod->decodeFp)(pContext); + return (*pContext->decodeMethod->fpDecode)(pContext); } /** diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c index 43a8ddbd1a0d207addf0576951b2af9b2be16555..1c039abb4d4cd546f6673648217a2410f57ae8e7 100644 --- a/src/plugins/http/src/httpQueue.c +++ b/src/plugins/http/src/httpQueue.c @@ -25,6 +25,7 @@ #include "httpResp.h" #include "httpAuth.h" #include "httpSession.h" +#include "httpQueue.h" typedef struct { pthread_t thread; @@ -37,42 +38,45 @@ typedef struct { } SHttpWorkerPool; typedef struct { - void *param; - void *result; - int32_t numOfRows; - void (*fp)(void *param, void *result, int32_t numOfRows); + void * param; + void * result; + int32_t code; + int32_t rows; + FHttpResultFp fp; } SHttpResult; static SHttpWorkerPool tsHttpPool; static taos_qset tsHttpQset; static taos_queue tsHttpQueue; -void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t numOfRows, void (*fp)(void *param, void *result, int32_t numOfRows)) { +void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t code, int32_t rows, FHttpResultFp fp) { if (tsHttpQueue != NULL) { - SHttpResult *pMsg = (SHttpResult *)taosAllocateQitem(sizeof(SHttpResult)); + SHttpResult *pMsg = taosAllocateQitem(sizeof(SHttpResult)); pMsg->param = param; pMsg->result = result; - pMsg->numOfRows = numOfRows; + pMsg->code = code; + pMsg->rows = rows; pMsg->fp = fp; taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg); } else { - (*fp)(param, result, numOfRows); + (*fp)(param, result, code, rows); } } static void *httpProcessResultQueue(void *param) { SHttpResult *pMsg; - int32_t type; - void *unUsed; - + int32_t type; + void * unUsed; + while (1) { if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) { httpDebug("qset:%p, http queue got no message from qset, exiting", tsHttpQset); break; } - httpTrace("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result); - (*pMsg->fp)(pMsg->param, pMsg->result, pMsg->numOfRows); + httpTrace("context:%p, res:%p will be processed in result queue, code:%d rows:%d", pMsg->param, pMsg->result, + pMsg->code, pMsg->rows); + (*pMsg->fp)(pMsg->param, pMsg->result, pMsg->code, pMsg->rows); taosFreeQitem(pMsg); } diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index f0a7249b512d2c9161592abe91822aefba786c20..4896d50c6cabf6d924161c97f7fe020fe81e2ebf 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -85,7 +85,7 @@ static void httpProcessHttpData(void *param) { while (1) { struct epoll_event events[HTTP_MAX_EVENTS]; //-1 means uncertainty, 0-nowait, 1-wait 1 ms, set it from -1 to 1 - fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, 1); + fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, TAOS_EPOLL_WAIT_TIME); if (pThread->stop) { httpDebug("%p, http thread get stop event, exiting...", pThread); break; diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 70d644146cf5ebe15a42c8f00a2e6e4bd603d081..564f555c403b6f5b3deb5832da6505722104eff8 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -29,9 +29,9 @@ void httpProcessMultiSql(HttpContext *pContext); -void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows); +void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int32_t numOfRows); -void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) { +void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int32_t code, int32_t numOfRows) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; @@ -43,7 +43,7 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n bool isContinue = false; - if (numOfRows > 0) { + if (code == TSDB_CODE_SUCCESS && numOfRows > 0) { if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->buildQueryJsonFp) { isContinue = (encode->buildQueryJsonFp)(pContext, singleCmd, result, numOfRows); } @@ -58,9 +58,9 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n httpDebug("context:%p, fd:%d, user:%s, process pos:%d, stop retrieve, numOfRows:%d, sql:%s", pContext, pContext->fd, pContext->user, multiCmds->pos, numOfRows, sql); - if (numOfRows < 0) { + if (code < 0) { httpError("context:%p, fd:%d, user:%s, process pos:%d, retrieve failed code:%s, sql:%s", pContext, pContext->fd, - pContext->user, multiCmds->pos, tstrerror(numOfRows), sql); + pContext->user, multiCmds->pos, tstrerror(code), sql); } taos_free_result(result); @@ -73,15 +73,15 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n } } -void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) { - httpDispatchToResultQueue(param, result, numOfRows, httpProcessMultiSqlRetrieveCallBackImp); +void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int32_t numOfRows) { + int32_t code = taos_errno(result); + httpDispatchToResultQueue(param, result, code, numOfRows, httpProcessMultiSqlRetrieveCallBackImp); } -void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) { +void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int32_t code, int32_t affectRowsInput) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; - code = taos_errno(result); HttpSqlCmds *multiCmds = pContext->multiCmds; HttpEncodeMethod *encode = pContext->encodeMethod; @@ -94,7 +94,7 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) { return; } - if (code < 0) { + if (code != TSDB_CODE_SUCCESS) { if (encode->checkFinishedFp != NULL && !encode->checkFinishedFp(pContext, singleCmd, code)) { singleCmd->code = code; httpDebug("context:%p, fd:%d, user:%s, process pos jump to:%d, last code:%s, last sql:%s", pContext, pContext->fd, @@ -119,7 +119,7 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) { bool isUpdate = tscIsUpdateQuery(result); if (isUpdate) { // not select or show commands - int affectRows = taos_affected_rows(result); + int32_t affectRows = taos_affected_rows(result); httpDebug("context:%p, fd:%d, user:%s, process pos:%d, affect rows:%d, sql:%s", pContext, pContext->fd, pContext->user, multiCmds->pos, affectRows, sql); @@ -156,8 +156,10 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) { } } -void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) { - httpDispatchToResultQueue(param, result, unUsedCode, httpProcessMultiSqlCallBackImp); +void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int32_t unUsedCode) { + int32_t code = taos_errno(result); + int32_t affectRows = taos_affected_rows(result); + httpDispatchToResultQueue(param, result, code, affectRows, httpProcessMultiSqlCallBackImp); } void httpProcessMultiSql(HttpContext *pContext) { @@ -202,9 +204,9 @@ void httpProcessMultiSqlCmd(HttpContext *pContext) { httpProcessMultiSql(pContext); } -void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows); +void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int32_t numOfRows); -void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) { +void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int32_t code, int32_t numOfRows) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; @@ -212,7 +214,7 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int bool isContinue = false; - if (numOfRows > 0) { + if (code == TSDB_CODE_SUCCESS && numOfRows > 0) { if (encode->buildQueryJsonFp) { isContinue = (encode->buildQueryJsonFp)(pContext, &pContext->singleCmd, result, numOfRows); } @@ -227,9 +229,9 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int httpDebug("context:%p, fd:%d, user:%s, stop retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user, numOfRows); - if (numOfRows < 0) { + if (code < 0) { httpError("context:%p, fd:%d, user:%s, retrieve failed, code:%s", pContext, pContext->fd, pContext->user, - tstrerror(numOfRows)); + tstrerror(code)); } taos_free_result(result); @@ -242,30 +244,30 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int } } -void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) { - httpDispatchToResultQueue(param, result, numOfRows, httpProcessSingleSqlRetrieveCallBackImp); +void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int32_t numOfRows) { + int32_t code = taos_errno(result); + httpDispatchToResultQueue(param, result, code, numOfRows, httpProcessSingleSqlRetrieveCallBackImp); } -void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCode) { +void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int32_t code, int32_t affectRowsInput) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; - int32_t code = taos_errno(result); - HttpEncodeMethod *encode = pContext->encodeMethod; if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { httpError("context:%p, fd:%d, user:%s, query error, code:%s:inprogress, sqlObj:%p", pContext, pContext->fd, - pContext->user, tstrerror(code), (SSqlObj *)result); + pContext->user, tstrerror(code), result); return; } - if (code < 0) { + if (code != TSDB_CODE_SUCCESS) { SSqlObj *pObj = (SSqlObj *)result; if (code == TSDB_CODE_TSC_INVALID_SQL) { - httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p, error:%s", pContext, - pContext->fd, pContext->user, tstrerror(code), pObj, pObj->cmd.payload); - httpSendTaosdInvalidSqlErrorResp(pContext, pObj->cmd.payload); + terrno = code; + httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p, error:%s", pContext, pContext->fd, + pContext->user, tstrerror(code), pObj, taos_errstr(pObj)); + httpSendTaosdInvalidSqlErrorResp(pContext, taos_errstr(pObj)); } else { httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p", pContext, pContext->fd, pContext->user, tstrerror(code), pObj); @@ -278,7 +280,8 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo bool isUpdate = tscIsUpdateQuery(result); if (isUpdate) { // not select or show commands - int affectRows = taos_affected_rows(result); + int32_t affectRows = taos_affected_rows(result); + assert(affectRows == affectRowsInput); httpDebug("context:%p, fd:%d, user:%s, affect rows:%d, stop query, sqlObj:%p", pContext, pContext->fd, pContext->user, affectRows, result); @@ -308,8 +311,10 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo } } -void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) { - httpDispatchToResultQueue(param, result, unUsedCode, httpProcessSingleSqlCallBackImp); +void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int32_t unUsedCode) { + int32_t code = taos_errno(result); + int32_t affectRows = taos_affected_rows(result); + httpDispatchToResultQueue(param, result, code, affectRows, httpProcessSingleSqlCallBackImp); } void httpProcessSingleSqlCmd(HttpContext *pContext) { @@ -373,7 +378,7 @@ void httpExecCmd(HttpContext *pContext) { } } -void httpProcessRequestCb(void *param, TAOS_RES *result, int code) { +void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { HttpContext *pContext = param; taos_free_result(result); diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c index 8993b233dd1dde9283679d337845d42f5d649715..3b8858b62e1297f0db5ece60c59a26e376b546f3 100644 --- a/src/plugins/http/src/httpSystem.c +++ b/src/plugins/http/src/httpSystem.c @@ -107,7 +107,7 @@ void httpCleanUpSystem() { httpCleanupResultQueue(); pthread_mutex_destroy(&tsHttpServer.serverMutex); - taosTFree(tsHttpServer.pThreads); + tfree(tsHttpServer.pThreads); tsHttpServer.pThreads = NULL; tsHttpServer.status = HTTP_SERVER_CLOSED; diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 048f839b728fd7ed2a74d59744ebff5d1223cc33..24998b54cd1a0114697c411fa145465f52e1e48a 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -27,12 +27,12 @@ #include "monitor.h" #include "taoserror.h" -#define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }} -#define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }} -#define monitorWarn(...) { if (monitorDebugFlag & DEBUG_WARN) { taosPrintLog("MON WARN ", 255, __VA_ARGS__); }} -#define monitorInfo(...) { if (monitorDebugFlag & DEBUG_INFO) { taosPrintLog("MON ", 255, __VA_ARGS__); }} -#define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} -#define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} +#define mnFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }} +#define mnError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }} +#define mnWarn(...) { if (monitorDebugFlag & DEBUG_WARN) { taosPrintLog("MON WARN ", 255, __VA_ARGS__); }} +#define mnInfo(...) { if (monitorDebugFlag & DEBUG_INFO) { taosPrintLog("MON ", 255, __VA_ARGS__); }} +#define mnDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} +#define mnTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monitorDebugFlag, __VA_ARGS__); }} #define SQL_LENGTH 1030 #define LOG_LEN_STR 100 @@ -91,12 +91,12 @@ int32_t monitorInitSystem() { pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); if (pthread_create(&tsMonitor.thread, &thAttr, monitorThreadFunc, NULL)) { - monitorError("failed to create thread to for monitor module, reason:%s", strerror(errno)); + mnError("failed to create thread to for monitor module, reason:%s", strerror(errno)); return -1; } pthread_attr_destroy(&thAttr); - monitorDebug("monitor thread is launched"); + mnDebug("monitor thread is launched"); monitorStartSystemFp = monitorStartSystem; monitorStopSystemFp = monitorStopSystem; @@ -107,12 +107,12 @@ int32_t monitorStartSystem() { taos_init(); tsMonitor.start = 1; monitorExecuteSQLFp = monitorExecuteSQL; - monitorInfo("monitor module start"); + mnInfo("monitor module start"); return 0; } static void *monitorThreadFunc(void *param) { - monitorDebug("starting to initialize monitor module ..."); + mnDebug("starting to initialize monitor module ..."); while (1) { static int32_t accessTimes = 0; @@ -121,7 +121,7 @@ static void *monitorThreadFunc(void *param) { if (tsMonitor.quiting) { tsMonitor.state = MON_STATE_NOT_INIT; - monitorInfo("monitor thread will quit, for taosd is quiting"); + mnInfo("monitor thread will quit, for taosd is quiting"); break; } else { taosGetDisk(); @@ -132,7 +132,7 @@ static void *monitorThreadFunc(void *param) { } if (dnodeGetDnodeId() <= 0) { - monitorDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); + mnDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); continue; } @@ -140,29 +140,31 @@ static void *monitorThreadFunc(void *param) { tsMonitor.state = MON_STATE_NOT_INIT; tsMonitor.conn = taos_connect(NULL, "monitor", tsInternalPass, "", 0); if (tsMonitor.conn == NULL) { - monitorError("failed to connect to database, reason:%s", tstrerror(terrno)); + mnError("failed to connect to database, reason:%s", tstrerror(terrno)); continue; } else { - monitorDebug("connect to database success"); + mnDebug("connect to database success"); } } if (tsMonitor.state == MON_STATE_NOT_INIT) { + int code = 0; + for (; tsMonitor.cmdIndex < MON_CMD_MAX; ++tsMonitor.cmdIndex) { monitorBuildMonitorSql(tsMonitor.sql, tsMonitor.cmdIndex); void *res = taos_query(tsMonitor.conn, tsMonitor.sql); - int code = taos_errno(res); + code = taos_errno(res); taos_free_result(res); if (code != 0) { - monitorError("failed to exec sql:%s, reason:%s", tsMonitor.sql, tstrerror(code)); + mnError("failed to exec sql:%s, reason:%s", tsMonitor.sql, tstrerror(code)); break; } else { - monitorDebug("successfully to exec sql:%s", tsMonitor.sql); + mnDebug("successfully to exec sql:%s", tsMonitor.sql); } } - if (tsMonitor.start) { + if (tsMonitor.start && code == 0) { tsMonitor.state = MON_STATE_INITED; } } @@ -174,7 +176,7 @@ static void *monitorThreadFunc(void *param) { } } - monitorInfo("monitor thread is stopped"); + mnInfo("monitor thread is stopped"); return NULL; } @@ -238,7 +240,7 @@ void monitorStopSystem() { tsMonitor.start = 0; tsMonitor.state = MON_STATE_NOT_INIT; monitorExecuteSQLFp = NULL; - monitorInfo("monitor module stopped"); + mnInfo("monitor module stopped"); } void monitorCleanUpSystem() { @@ -249,7 +251,7 @@ void monitorCleanUpSystem() { taos_close(tsMonitor.conn); tsMonitor.conn = NULL; } - monitorInfo("monitor module is cleaned up"); + mnInfo("monitor module is cleaned up"); } // unit is MB @@ -257,13 +259,13 @@ static int32_t monitorBuildMemorySql(char *sql) { float sysMemoryUsedMB = 0; bool suc = taosGetSysMemory(&sysMemoryUsedMB); if (!suc) { - monitorDebug("failed to get sys memory info"); + mnDebug("failed to get sys memory info"); } float procMemoryUsedMB = 0; suc = taosGetProcMemory(&procMemoryUsedMB); if (!suc) { - monitorDebug("failed to get proc memory info"); + mnDebug("failed to get proc memory info"); } return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); @@ -274,7 +276,7 @@ static int32_t monitorBuildCpuSql(char *sql) { float sysCpuUsage = 0, procCpuUsage = 0; bool suc = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage); if (!suc) { - monitorDebug("failed to get cpu usage"); + mnDebug("failed to get cpu usage"); } if (sysCpuUsage <= procCpuUsage) { @@ -294,14 +296,14 @@ static int32_t monitorBuildBandSql(char *sql) { float bandSpeedKb = 0; bool suc = taosGetBandSpeed(&bandSpeedKb); if (!suc) { - monitorDebug("failed to get bandwidth speed"); + mnDebug("failed to get bandwidth speed"); } return sprintf(sql, ", %f", bandSpeedKb); } static int32_t monitorBuildReqSql(char *sql) { - SDnodeStatisInfo info = dnodeGetStatisInfo(); + SStatisInfo info = dnodeGetStatisInfo(); return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.queryReqNum, info.submitReqNum); } @@ -309,7 +311,7 @@ static int32_t monitorBuildIoSql(char *sql) { float readKB = 0, writeKB = 0; bool suc = taosGetProcIO(&readKB, &writeKB); if (!suc) { - monitorDebug("failed to get io info"); + mnDebug("failed to get io info"); } return sprintf(sql, ", %f, %f", readKB, writeKB); @@ -332,19 +334,19 @@ static void monitorSaveSystemInfo() { taos_free_result(res); if (code != 0) { - monitorError("failed to save system info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); + mnError("failed to save system info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); } else { - monitorDebug("successfully to save system info, sql:%s", tsMonitor.sql); + mnDebug("successfully to save system info, sql:%s", tsMonitor.sql); } } static void montiorExecSqlCb(void *param, TAOS_RES *result, int32_t code) { int32_t c = taos_errno(result); if (c != TSDB_CODE_SUCCESS) { - monitorError("save %s failed, reason:%s", (char *)param, tstrerror(c)); + mnError("save %s failed, reason:%s", (char *)param, tstrerror(c)); } else { int32_t rows = taos_affected_rows(result); - monitorDebug("save %s succ, rows:%d", (char *)param, rows); + mnDebug("save %s succ, rows:%d", (char *)param, rows); } taos_free_result(result); @@ -380,7 +382,7 @@ void monitorSaveAcctLog(SAcctMonitorObj *pMon) { pMon->totalConns, pMon->maxConns, pMon->accessState); - monitorDebug("save account info, sql:%s", sql); + mnDebug("save account info, sql:%s", sql); taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "account info"); } @@ -401,13 +403,13 @@ void monitorSaveLog(int32_t level, const char *const format, ...) { len += sprintf(sql + len, "', '%s')", tsLocalEp); sql[len++] = 0; - monitorDebug("save log, sql: %s", sql); + mnDebug("save log, sql: %s", sql); taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "log"); } void monitorExecuteSQL(char *sql) { if (tsMonitor.state != MON_STATE_INITED) return; - monitorDebug("execute sql:%s", sql); + mnDebug("execute sql:%s", sql); taos_query_a(tsMonitor.conn, sql, montiorExecSqlCb, "sql"); } diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h index d3e60c21dc376e6a1e88f959fbf4b43bf47ab4b0..28c1c7b838236d10a3f67fa7bbfc15a9b36c4612 100644 --- a/src/query/inc/qAst.h +++ b/src/query/inc/qAst.h @@ -74,20 +74,18 @@ typedef struct tExprNode { }; } tExprNode; -void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); - void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param); void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, char *(*cb)(void *, const char*, int32_t)); -uint8_t getBinaryExprOptr(SStrToken *pToken); +tExprNode* exprTreeFromBinary(const void* data, size_t size); +tExprNode* exprTreeFromTableName(const char* tbnameCond); -void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); -tExprNode* exprTreeFromBinary(const void* data, size_t size); -tExprNode* exprTreeFromTableName(const char* tbnameCond); +void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); +void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); #ifdef __cplusplus } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index f392644e6736f528fa75f21ee2857c570c0c22c9..895b414a56287bb6e35bcc0c50adcc338edbee1b 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -33,17 +33,26 @@ struct SColumnFilterElem; typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2); typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order); -typedef struct SPosInfo { - int32_t pageId:20; - int32_t rowId:12; -} SPosInfo; - typedef struct SGroupResInfo { int32_t groupId; int32_t numOfDataPages; - SPosInfo pos; + int32_t pageId; + int32_t rowId; } SGroupResInfo; +typedef struct SResultRowPool { + int32_t elemSize; + int32_t blockSize; + int32_t numOfElemPerBlock; + + struct { + int32_t blockIndex; + int32_t pos; + } position; + + SArray* pData; // SArray +} SResultRowPool; + typedef struct SSqlGroupbyExpr { int16_t tableIndex; SArray* columnInfo; // SArray, group by columns information @@ -52,13 +61,14 @@ typedef struct SSqlGroupbyExpr { int16_t orderType; // order by type: asc/desc } SSqlGroupbyExpr; -typedef struct SWindowResult { - SPosInfo pos; // Position of current result in disk-based output buffer +typedef struct SResultRow { + int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer + int32_t rowId:15; + bool closed:1; // this result status: closed or opened uint16_t numOfRows; // number of rows of current time window - bool closed; // this result status: closed or opened - SResultInfo* resultInfo; // For each result column, there is a resultInfo + SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo union {STimeWindow win; char* key;}; // start key of current time window -} SWindowResult; +} SResultRow; /** * If the number of generated results is greater than this value, @@ -72,16 +82,14 @@ typedef struct SResultRec { } SResultRec; typedef struct SWindowResInfo { - SWindowResult* pResult; // result list - SHashObj* hashList; // hash list for quick access - int16_t type; // data type for hash key + SResultRow** pResult; // result list + int16_t type:8; // data type for hash key + int32_t size:24; // number of result set + int32_t threshold; // threshold to halt query and return the generated results. int32_t capacity; // max capacity int32_t curIndex; // current start active index - int32_t size; // number of result set int64_t startTime; // start time of the first time window for sliding query int64_t prevSKey; // previous (not completed) sliding window start key - int64_t threshold; // threshold to halt query and return the generated results. - int64_t interval; // time window interval } SWindowResInfo; typedef struct SColumnFilterElem { @@ -97,7 +105,7 @@ typedef struct SSingleColumnFilterInfo { SColumnFilterElem* pFilters; } SSingleColumnFilterInfo; -typedef struct STableQueryInfo { // todo merge with the STableQueryInfo struct +typedef struct STableQueryInfo { TSKEY lastKey; int32_t groupIndex; // group id in table list int16_t queryRangeSet; // denote if the query range is set, only available for interval query @@ -125,7 +133,9 @@ typedef struct SQueryCostInfo { uint32_t discardBlocks; uint64_t elapsedTime; uint64_t firstStageMergeTime; - uint64_t internalSupSize; + uint64_t winInfoSize; + uint64_t tableInfoSize; + uint64_t hashSize; uint64_t numOfTimeWindows; } SQueryCostInfo; @@ -142,7 +152,10 @@ typedef struct SQuery { SLimitVal limit; int32_t rowSize; SSqlGroupbyExpr* pGroupbyExpr; - SExprInfo* pSelectExpr; + SExprInfo* pExpr1; + SExprInfo* pExpr2; + int32_t numOfExpr2; + SColumnInfo* colList; SColumnInfo* tagColList; int32_t numOfFilterCols; @@ -158,11 +171,10 @@ typedef struct SQuery { typedef struct SQueryRuntimeEnv { jmp_buf env; - SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo SQuery* pQuery; SQLFunctionCtx* pCtx; int32_t numOfRowsPerPage; - int16_t offset[TSDB_MAX_COLUMNS]; + uint16_t* offset; uint16_t scanFlag; // denotes reversed scan of data or not SFillInfo* pFillInfo; SWindowResInfo windowResInfo; @@ -178,6 +190,11 @@ typedef struct SQueryRuntimeEnv { int32_t interBufSize; // intermediate buffer sizse int32_t prevGroupId; // previous executed group id SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file + SHashObj* pResultRowHashTable; // quick locate the window object for each result + char* keyBuf; // window key buffer + SResultRowPool* pool; // window result object pool + + int32_t* rowCellInfoOffset;// offset value for each row result cell info } SQueryRuntimeEnv; enum { @@ -185,12 +202,6 @@ enum { QUERY_RESULT_READY = 2, }; -typedef struct SMemRef { - int32_t ref; - void *mem; - void *imem; -} SMemRef; - typedef struct SQInfo { void* signature; int32_t code; // error code to returned to client diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index 6d44fee09557fa8b8d73527677a2c7b238ecb42c..385ae8854327b036cbb6ee46de70e97ae56ce25b 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -28,6 +28,7 @@ typedef struct { STColumn col; // column info int16_t functionId; // sql function id int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN + int16_t tagIndex; // index of current tag in SFillTagColInfo array list union {int64_t i; double d;} fillVal; } SFillColInfo; @@ -37,27 +38,29 @@ typedef struct { } SFillTagColInfo; typedef struct SFillInfo { - TSKEY start; // start timestamp - TSKEY endKey; // endKey for fill - int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC] - int32_t fillType; // fill type - int32_t numOfRows; // number of rows in the input data block - int32_t rowIdx; // rowIdx - int32_t numOfTotal; // number of filled rows in one round - int32_t numOfCurrent; // number of filled rows in current results - - int32_t numOfTags; // number of tags - int32_t numOfCols; // number of columns, including the tags columns - int32_t rowSize; // size of each row -// char ** pTags; // tags value for current interpolation - SFillTagColInfo* pTags; // tags value for filling gap + TSKEY start; // start timestamp + TSKEY end; // endKey for fill + TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure. + int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC] + int32_t type; // fill type + int32_t numOfRows; // number of rows in the input data block + int32_t index; // active row index + int32_t numOfTotal; // number of filled rows in one round + int32_t numOfCurrent; // number of filled rows in current results + + int32_t numOfTags; // number of tags + int32_t numOfCols; // number of columns, including the tags columns + int32_t rowSize; // size of each row SInterval interval; - char * prevValues; // previous row of data, to generate the interpolation results - char * nextValues; // next row of data - char** pData; // original result data block involved in filling data - int32_t capacityInRows; // data buffer size in rows - int8_t precision; // time resoluation - SFillColInfo* pFillCol; // column info for fill operations + char * prevValues; // previous row of data, to generate the interpolation results + char * nextValues; // next row of data + char** pData; // original result data block involved in filling data + int32_t alloc; // data buffer size in rows + int8_t precision; // time resoluation + + SFillColInfo* pFillCol; // column info for fill operations + SFillTagColInfo* pTags; // tags value for filling gap + void* handle; // for dubug purpose } SFillInfo; typedef struct SPoint { @@ -67,25 +70,25 @@ typedef struct SPoint { SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, - SFillColInfo* pFillCol); + SFillColInfo* pFillCol, void* handle); void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp); -void* taosDestoryFillInfo(SFillInfo *pFillInfo); +void* taosDestroyFillInfo(SFillInfo *pFillInfo); void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput); +void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput); -void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput); +void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput); -int64_t getFilledNumOfRes(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); +int64_t getNumOfResWithFill(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); int32_t taosNumOfRemainRows(SFillInfo *pFillInfo); -int taosDoLinearInterpolation(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); +int32_t taosGetLinearInterpolationVal(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); -int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity); +int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity); #ifdef __cplusplus } diff --git a/src/query/inc/qHistogram.h b/src/query/inc/qHistogram.h index bb058449e806c8270dbf141ca3a81103f63c6e5c..442e61750b06d269ef48c5e74ede7646f7ac62e1 100644 --- a/src/query/inc/qHistogram.h +++ b/src/query/inc/qHistogram.h @@ -43,7 +43,8 @@ typedef struct SHistogramInfo { int32_t numOfElems; int32_t numOfEntries; int32_t maxEntries; - + double min; + double max; #if defined(USE_ARRAYLIST) SHistBin* elems; #else @@ -52,9 +53,6 @@ typedef struct SHistogramInfo { int32_t maxIndex; bool ordered; #endif - - double min; - double max; } SHistogramInfo; SHistogramInfo* tHistogramCreate(int32_t numOfBins); diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index bc8f9a5e23df72f82bcb3bb5140bec42fe426268..513ab090f921b15ca18e384643ab79f51a119a8d 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -20,10 +20,10 @@ extern "C" { #endif -#include #include "taos.h" #include "taosmsg.h" #include "tstoken.h" +#include "tstrbuild.h" #include "tvariant.h" #define ParseTOKENTYPE SStrToken @@ -37,12 +37,6 @@ extern char tTokenTypeSwitcher[13]; (x) = tTokenTypeSwitcher[(x)]; \ } \ } while (0) - -typedef struct tFieldList { - int32_t nField; - int32_t nAlloc; - TAOS_FIELD *p; -} tFieldList; typedef struct SLimitVal { int64_t limit; @@ -59,12 +53,6 @@ typedef struct tVariantListItem { uint8_t sortOrder; } tVariantListItem; -typedef struct tVariantList { - int32_t nExpr; /* Number of expressions on the list */ - int32_t nAlloc; /* Number of entries allocated below */ - tVariantListItem *a; /* One entry for each expression */ -} tVariantList; - typedef struct SIntervalVal { SStrToken interval; SStrToken offset; @@ -72,16 +60,16 @@ typedef struct SIntervalVal { typedef struct SQuerySQL { struct tSQLExprList *pSelection; // select clause - tVariantList * from; // from clause + SArray * from; // from clause SArray struct tSQLExpr * pWhere; // where clause [optional] - tVariantList * pGroupby; // groupby clause, only for tags[optional] - tVariantList * pSortOrder; // orderby [optional] + SArray * pGroupby; // groupby clause, only for tags[optional], SArray + SArray * pSortOrder; // orderby [optional], SArray SStrToken interval; // interval [optional] SStrToken offset; // offset window [optional] SStrToken sliding; // sliding window [optional] SLimitVal limit; // limit offset [optional] SLimitVal slimit; // group limit offset [optional] - tVariantList * fillType; // fill type[optional] + SArray * fillType; // fill type[optional], SArray SStrToken selectToken; // sql string } SQuerySQL; @@ -91,26 +79,25 @@ typedef struct SCreateTableSQL { int8_t type; // create normal table/from super table/ stream struct { - tFieldList *pTagColumns; // for normal table, pTagColumns = NULL; - tFieldList *pColumns; + SArray *pTagColumns; // SArray + SArray *pColumns; // SArray } colInfo; struct { - SStrToken stableName; // super table name, for using clause - tVariantList *pTagVals; // create by using metric, tag value - STagData tagdata; + SStrToken stableName; // super table name, for using clause + SArray *pTagVals; // create by using metric, tag value + STagData tagdata; } usingInfo; - SQuerySQL *pSelect; + SQuerySQL *pSelect; } SCreateTableSQL; typedef struct SAlterTableSQL { SStrToken name; int16_t type; STagData tagData; - - tFieldList * pAddColumns; - tVariantList *varList; // set t=val or: change src dst + SArray *pAddColumns; // SArray + SArray *varList; // set t=val or: change src dst, SArray } SAlterTableSQL; typedef struct SCreateDBInfo { @@ -129,8 +116,9 @@ typedef struct SCreateDBInfo { int32_t compressionLevel; SStrToken precision; bool ignoreExists; + int8_t update; - tVariantList *keep; + SArray *keep; } SCreateDBInfo; typedef struct SCreateAcctSQL { @@ -168,7 +156,7 @@ typedef struct tDCLSQL { SCreateDBInfo dbOpt; SCreateAcctSQL acctOpt; SShowInfo showOpt; - SStrToken ip; + SStrToken ip; }; SUserInfo user; @@ -181,33 +169,32 @@ typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause } SSubclauseInfo; typedef struct SSqlInfo { - int32_t type; - bool valid; + int32_t type; + bool valid; union { SCreateTableSQL *pCreateTableInfo; - SAlterTableSQL * pAlterInfo; - tDCLSQL * pDCLInfo; + SAlterTableSQL *pAlterInfo; + tDCLSQL *pDCLInfo; }; - SSubclauseInfo subclauseInfo; - char pzErrMsg[256]; + SSubclauseInfo subclauseInfo; + char pzErrMsg[256]; } SSqlInfo; typedef struct tSQLExpr { - // TK_FUNCTION: sql function, TK_LE: less than(binary expr) - uint32_t nSQLOptr; + uint32_t nSQLOptr; // TK_FUNCTION: sql function, TK_LE: less than(binary expr) // the full sql string of function(col, param), which is actually the raw // field name, since the function name is kept in nSQLOptr already - SStrToken operand; - SStrToken colInfo; // field id - tVariant val; // value only for string, float, int - + SStrToken operand; + SStrToken colInfo; // field id + tVariant val; // value only for string, float, int + SStrToken token; // original sql expr string + struct tSQLExpr *pLeft; // left child struct tSQLExpr *pRight; // right child struct tSQLExprList *pParam; // function parameters - SStrToken token; // original sql expr string } tSQLExpr; // used in select clause. select from xxx @@ -223,13 +210,6 @@ typedef struct tSQLExprList { tSQLExprItem *a; /* One entry for each expression */ } tSQLExprList; -typedef struct tSQLExprListList { - int32_t nList; /* Number of expressions on the list */ - int32_t nAlloc; /* Number of entries allocated below */ - tSQLExprList **a; /* one entry for each row */ -} tSQLExprListList; - - /** * * @param yyp The parser @@ -245,16 +225,9 @@ void Parse(void *yyp, int yymajor, ParseTOKENTYPE yyminor, SSqlInfo *); */ void ParseFree(void *p, void (*freeProc)(void *)); -tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder); - -tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); - -tVariantList *tVariantListAppendToken(tVariantList *pList, SStrToken *pAliasToken, uint8_t sortOrder); -void tVariantListDestroy(tVariantList *pList); - -tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField); - -void tFieldListDestroy(tFieldList *pList); +SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); +SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optType); @@ -264,17 +237,16 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken void tSQLExprListDestroy(tSQLExprList *pList); -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, - tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); +SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, + SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName, - tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type); +SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pMetricName, + SArray *pTagVals, SQuerySQL *pSelect, int32_t type); -void tSQLExprNodeDestroy(tSQLExpr *pExpr); -tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr); +void tSQLExprNodeDestroy(tSQLExpr *pExpr); -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); +SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type); void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); @@ -316,9 +288,6 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *pToken); void *ParseAlloc(void *(*mallocProc)(size_t)); -// convert the sql filter expression into binary data -int32_t tSQLExprToBinary(tSQLExpr* pExpr, SStringBuilder* sb); - enum { TSQL_NODE_TYPE_EXPR = 0x1, TSQL_NODE_TYPE_ID = 0x2, diff --git a/src/query/inc/qTsbuf.h b/src/query/inc/qTsbuf.h index 46e6f79014f32c9b3052824bdb702556f5c4f060..90bd64336fdeed91deb68b9b490224a7fb29bc80 100644 --- a/src/query/inc/qTsbuf.h +++ b/src/query/inc/qTsbuf.h @@ -26,7 +26,7 @@ extern "C" { #define MEM_BUF_SIZE (1 << 20) #define TS_COMP_FILE_MAGIC 0x87F5EC4C -#define TS_COMP_FILE_VNODE_MAX 512 +#define TS_COMP_FILE_GROUP_MAX 512 typedef struct STSList { char* rawBuf; @@ -35,17 +35,10 @@ typedef struct STSList { int32_t len; } STSList; -typedef struct STSRawBlock { - int32_t vnode; - int64_t tag; - TSKEY* ts; - int32_t len; -} STSRawBlock; - typedef struct STSElem { TSKEY ts; - tVariant tag; - int32_t vnode; + tVariant* tag; + int32_t id; } STSElem; typedef struct STSCursor { @@ -67,26 +60,27 @@ typedef struct STSBlock { * The size of buffer file should not be greater than 2G, * and the offset of int32_t type is enough */ -typedef struct STSVnodeBlockInfo { - int32_t vnode; // vnode id +typedef struct STSGroupBlockInfo { + int32_t id; // group id int32_t offset; // offset set value in file int32_t numOfBlocks; // number of total blocks int32_t compLen; // compressed size -} STSVnodeBlockInfo; +} STSGroupBlockInfo; -typedef struct STSVnodeBlockInfoEx { - STSVnodeBlockInfo info; +typedef struct STSGroupBlockInfoEx { + STSGroupBlockInfo info; int32_t len; // length before compress -} STSVnodeBlockInfoEx; +} STSGroupBlockInfoEx; typedef struct STSBuf { FILE* f; char path[PATH_MAX]; uint32_t fileSize; - STSVnodeBlockInfoEx* pData; + // todo use array + STSGroupBlockInfoEx* pData; uint32_t numOfAlloc; - uint32_t numOfVnodes; + uint32_t numOfGroups; char* assistBuf; int32_t bufSize; @@ -100,30 +94,31 @@ typedef struct STSBuf { typedef struct STSBufFileHeader { uint32_t magic; // file magic number - uint32_t numOfVnode; // number of vnode stored in current file + uint32_t numOfGroup; // number of group stored in current file int32_t tsOrder; // timestamp order in current file } STSBufFileHeader; STSBuf* tsBufCreate(bool autoDelete, int32_t order); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); -STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); +STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder, int32_t id); void* tsBufDestroy(STSBuf* pTSBuf); -void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag, const char* pData, int32_t len); -int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); +void tsBufAppend(STSBuf* pTSBuf, int32_t id, tVariant* tag, const char* pData, int32_t len); +int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf); STSBuf* tsBufClone(STSBuf* pTSBuf); -STSVnodeBlockInfo* tsBufGetVnodeBlockInfo(STSBuf* pTSBuf, int32_t vnodeId); +STSGroupBlockInfo* tsBufGetGroupBlockInfo(STSBuf* pTSBuf, int32_t id); void tsBufFlush(STSBuf* pTSBuf); void tsBufResetPos(STSBuf* pTSBuf); STSElem tsBufGetElem(STSBuf* pTSBuf); + bool tsBufNextPos(STSBuf* pTSBuf); -STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag); +STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t id, tVariant* tag); STSCursor tsBufGetCursor(STSBuf* pTSBuf); void tsBufSetTraverseOrder(STSBuf* pTSBuf, int32_t order); @@ -136,6 +131,16 @@ void tsBufSetCursor(STSBuf* pTSBuf, STSCursor* pCur); */ void tsBufDisplay(STSBuf* pTSBuf); +int32_t tsBufGetNumOfGroup(STSBuf* pTSBuf); + +void tsBufGetGroupIdList(STSBuf* pTSBuf, int32_t* num, int32_t** id); + +int32_t dumpFileBlockByGroupId(STSBuf* pTSBuf, int32_t id, void* buf, int32_t* len, int32_t* numOfBlocks); + +STSElem tsBufFindElemStartPosByTag(STSBuf* pTSBuf, tVariant* pTag); + +bool tsBufIsValidElem(STSElem* pElem); + #ifdef __cplusplus } #endif diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 5320e5622e70a93c06edb4a1e5a3fe568498ef21..4cd0e60ebed3bc01c40fa8bee1a255fe9d0b132f 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -15,17 +15,26 @@ #ifndef TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H +#define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \ + do { \ + assert(sizeof(_uid) == sizeof(uint64_t)); \ + *(uint64_t *)(_k) = (_uid); \ + memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \ + } while (0) + +#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t)) + int32_t getOutputInterResultBufSize(SQuery* pQuery); -void clearTimeWindowResBuf(SQueryRuntimeEnv* pRuntimeEnv, SWindowResult* pOneOutputRes); -void copyTimeWindowResBuf(SQueryRuntimeEnv* pRuntimeEnv, SWindowResult* dst, const SWindowResult* src); +void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pRow); +void copyResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* dst, const SResultRow* src); +SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index); -int32_t initWindowResInfo(SWindowResInfo* pWindowResInfo, SQueryRuntimeEnv* pRuntimeEnv, int32_t size, - int32_t threshold, int16_t type); +int32_t initWindowResInfo(SWindowResInfo* pWindowResInfo, int32_t size, int32_t threshold, int16_t type); void cleanupTimeWindowInfo(SWindowResInfo* pWindowResInfo); void resetTimeWindowInfo(SQueryRuntimeEnv* pRuntimeEnv, SWindowResInfo* pWindowResInfo); -void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num); +void clearFirstNWindowRes(SQueryRuntimeEnv *pRuntimeEnv, int32_t num); void clearClosedTimeWindow(SQueryRuntimeEnv* pRuntimeEnv); int32_t numOfClosedTimeWindow(SWindowResInfo* pWindowResInfo); @@ -33,27 +42,27 @@ void closeTimeWindow(SWindowResInfo* pWindowResInfo, int32_t slot); void closeAllTimeWindow(SWindowResInfo* pWindowResInfo); void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order); -static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot) { +static FORCE_INLINE SResultRow *getResultRow(SWindowResInfo *pWindowResInfo, int32_t slot) { assert(pWindowResInfo != NULL && slot >= 0 && slot < pWindowResInfo->size); - return &pWindowResInfo->pResult[slot]; + return pWindowResInfo->pResult[slot]; } #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1) +#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1) bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); -int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, size_t interBufSize); +int32_t initResultRow(SResultRow *pResultRow); -static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult, +static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SResultRow *pResult, tFilePage* page) { assert(pResult != NULL && pRuntimeEnv != NULL); SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); + int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage + - pQuery->pSelectExpr[columnIndex].bytes * realRowId; + pQuery->pExpr1[columnIndex].bytes * realRowId; } bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval); @@ -62,4 +71,14 @@ bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval); __filter_func_t *getRangeFilterFuncArray(int32_t type); __filter_func_t *getValueFilterFuncArray(int32_t type); +size_t getWindowResultSize(SQueryRuntimeEnv* pRuntimeEnv); + +SResultRowPool* initResultRowPool(size_t size); +SResultRow* getNewResultRow(SResultRowPool* p); +int64_t getResultRowPoolMemSize(SResultRowPool* p); +void* destroyResultRowPool(SResultRowPool* p); +int32_t getNumOfAllocatedResultRows(SResultRowPool* p); +int32_t getNumOfUsedResultRows(SResultRowPool* p); + + #endif // TDENGINE_QUERYUTIL_H diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index e5d1185330a47b79fec1becd74a01ed779db8c80..09b1e1592aa53dcec817472a068e0378b7f4adf1 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -223,8 +223,8 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K Y.stat = M; } -%type keep {tVariantList*} -%destructor keep {tVariantListDestroy($$);} +%type keep {SArray*} +%destructor keep {taosArrayDestroy($$);} keep(Y) ::= KEEP tagitemlist(X). { Y = X; } cache(Y) ::= CACHE INTEGER(X). { Y = X; } @@ -239,6 +239,7 @@ wal(Y) ::= WAL INTEGER(X). { Y = X; } fsync(Y) ::= FSYNC INTEGER(X). { Y = X; } comp(Y) ::= COMP INTEGER(X). { Y = X; } prec(Y) ::= PRECISION STRING(X). { Y = X; } +update(Y) ::= UPDATE INTEGER(X). { Y = X; } %type db_optr {SCreateDBInfo} db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);} @@ -256,6 +257,7 @@ db_optr(Y) ::= db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z db_optr(Y) ::= db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } db_optr(Y) ::= db_optr(Z) prec(X). { Y = Z; Y.precision = X; } db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; } +db_optr(Y) ::= db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); } %type alter_db_optr {SCreateDBInfo} alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);} @@ -267,6 +269,7 @@ alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = s alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } +alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); } %type typename {TAOS_FIELD} typename(A) ::= ids(X). { @@ -324,10 +327,10 @@ create_table_args(A) ::= AS select(S). { } %type column{TAOS_FIELD} -%type columnlist{tFieldList*} -%destructor columnlist {tFieldListDestroy($$);} -columnlist(A) ::= columnlist(X) COMMA column(Y). {A = tFieldListAppend(X, &Y); } -columnlist(A) ::= column(X). {A = tFieldListAppend(NULL, &X);} +%type columnlist{SArray*} +%destructor columnlist {taosArrayDestroy($$);} +columnlist(A) ::= columnlist(X) COMMA column(Y). {taosArrayPush(X, &Y); A = X; } +columnlist(A) ::= column(X). {A = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(A, &X);} // The information used for a column is the name and type of column: // tinyint smallint int bigint float double bool timestamp binary(x) nchar(x) @@ -335,8 +338,8 @@ column(A) ::= ids(X) typename(Y). { tSQLSetColumnInfo(&A, &X, &Y); } -%type tagitemlist {tVariantList*} -%destructor tagitemlist {tVariantListDestroy($$);} +%type tagitemlist {SArray*} +%destructor tagitemlist {taosArrayDestroy($$);} %type tagitem {tVariant} tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } @@ -429,11 +432,11 @@ as(X) ::= ids(Y). { X = Y; } as(X) ::= . { X.n = 0; } // A complete FROM clause. -%type from {tVariantList*} +%type from {SArray*} // current not support query from no-table from(A) ::= FROM tablelist(X). {A = X;} -%type tablelist {tVariantList*} +%type tablelist {SArray*} tablelist(A) ::= ids(X) cpxName(Y). { toTSDBType(X.type); X.n += Y.n; @@ -473,8 +476,8 @@ interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;} interval_opt(N) ::= . {memset(&N, 0, sizeof(N));} -%type fill_opt {tVariantList*} -%destructor fill_opt {tVariantListDestroy($$);} +%type fill_opt {SArray*} +%destructor fill_opt {taosArrayDestroy($$);} fill_opt(N) ::= . {N = 0; } fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { tVariant A = {0}; @@ -494,11 +497,11 @@ fill_opt(N) ::= FILL LP ID(Y) RP. { sliding_opt(K) ::= SLIDING LP tmvar(E) RP. {K = E; } sliding_opt(K) ::= . {K.n = 0; K.z = NULL; K.type = 0; } -%type orderby_opt {tVariantList*} -%destructor orderby_opt {tVariantListDestroy($$);} +%type orderby_opt {SArray*} +%destructor orderby_opt {taosArrayDestroy($$);} -%type sortlist {tVariantList*} -%destructor sortlist {tVariantListDestroy($$);} +%type sortlist {SArray*} +%destructor sortlist {taosArrayDestroy($$);} %type sortitem {tVariant} %destructor sortitem {tVariantDestroy(&$$);} @@ -528,10 +531,10 @@ sortorder(A) ::= DESC. {A = TSDB_ORDER_DESC;} sortorder(A) ::= . {A = TSDB_ORDER_ASC;} //default is descend order //group by clause -%type groupby_opt {tVariantList*} -%destructor groupby_opt {tVariantListDestroy($$);} -%type grouplist {tVariantList*} -%destructor grouplist {tVariantListDestroy($$);} +%type groupby_opt {SArray*} +%destructor groupby_opt {taosArrayDestroy($$);} +%type grouplist {SArray*} +%destructor grouplist {taosArrayDestroy($$);} groupby_opt(A) ::= . {A = 0;} groupby_opt(A) ::= GROUP BY grouplist(X). {A = X;} @@ -553,11 +556,11 @@ having_opt(A) ::= HAVING expr(X). {A = X;} //limit-offset subclause %type limit_opt {SLimitVal} limit_opt(A) ::= . {A.limit = -1; A.offset = 0;} -limit_opt(A) ::= LIMIT signed(X). {A.limit = X; A.offset = 0;} +limit_opt(A) ::= LIMIT signed(X). {printf("aa1, %d\n", X); A.limit = X; A.offset = 0;} limit_opt(A) ::= LIMIT signed(X) OFFSET signed(Y). - {A.limit = X; A.offset = Y;} + {printf("aa2\n, %d\n", X); A.limit = X; A.offset = Y;} limit_opt(A) ::= LIMIT signed(X) COMMA signed(Y). - {A.limit = Y; A.offset = X;} + {printf("aa3\n, %d\n", X); A.limit = Y; A.offset = X;} %type slimit_opt {SLimitVal} slimit_opt(A) ::= . {A.limit = -1; A.offset = 0;} @@ -654,7 +657,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { X.n += F.n; toTSDBType(A.type); - tVariantList* K = tVariantListAppendToken(NULL, &A, -1); + SArray* K = tVariantListAppendToken(NULL, &A, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -670,7 +673,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -680,7 +683,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); toTSDBType(Z.type); A = tVariantListAppendToken(A, &Z, -1); @@ -693,7 +696,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); A = tVariantListAppend(A, &Z, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 28b9a60102d41a11fa02fee1ca51cbf8e263bf3b..32cbb56c62514001ea5314e7e5dce8549daddb97 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -128,7 +128,7 @@ typedef struct SArithmeticSupport { SExprInfo *pArithExpr; int32_t numOfCols; SColumnInfo *colList; - SArray* exprList; // client side used + void *exprList; // client side used int32_t offset; char** data; } SArithmeticSupport; @@ -145,15 +145,14 @@ typedef struct SInterpInfoDetail { int8_t primaryCol; } SInterpInfoDetail; -typedef struct SResultInfo { +typedef struct SResultRowCellInfo { int8_t hasResult; // result generated, not NULL value - bool initialized; // output buffer has been initialized - bool complete; // query has completed - bool superTableQ; // is super table query - uint32_t bufLen; // buffer size - uint64_t numOfRes; // num of output result in current buffer - void* interResultBuf; // output result buffer -} SResultInfo; + bool initialized; // output buffer has been initialized + bool complete; // query has completed + uint32_t numOfRes; // num of output result in current buffer +} SResultRowCellInfo; + +#define GET_ROWCELL_INTERBUF(_c) ((void*) ((char*)(_c) + sizeof(SResultRowCellInfo))) struct SQLFunctionCtx; @@ -175,9 +174,11 @@ typedef struct SQLFunctionCtx { int16_t inputBytes; int16_t outputType; - int16_t outputBytes; // size of results, determined by function and input column data type - bool hasNull; // null value exist in current block + int16_t outputBytes; // size of results, determined by function and input column data type + int32_t interBufBytes; // internal buffer size + bool hasNull; // null value exist in current block bool requireNull; // require null in some function + bool stableQuery; int16_t functionId; // function id void * aInputElemBuf; char * aOutputBuf; // final result output buffer, point to sdata->data @@ -189,7 +190,8 @@ typedef struct SQLFunctionCtx { void * ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ SQLPreAggVal preAggVals; tVariant tag; - SResultInfo *resultInfo; + + SResultRowCellInfo *resultInfo; SExtTagsInfo tagInfo; } SQLFunctionCtx; @@ -274,16 +276,16 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const cha (_r)->initialized = false; \ } while (0) -void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf); +//void setResultInfoBuf(SResultRowCellInfo *pResInfo, char* buf); -static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) { +static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, uint32_t bufLen) { pResInfo->initialized = true; // the this struct has been initialized flag pResInfo->complete = false; pResInfo->hasResult = false; pResInfo->numOfRes = 0; - memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen); + memset(GET_ROWCELL_INTERBUF(pResInfo), 0, (size_t)bufLen); } #ifdef __cplusplus diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index 893105e44ac4eb82843514cda20928d3e0dcdaf9..e813688d8408bd124149352b337f1c0bef71d2af 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -13,12 +13,10 @@ * along with this program. If not, see . */ - #include "os.h" #include "exception.h" #include "qAst.h" -#include "qSqlparser.h" #include "qSyntaxtreefunction.h" #include "taosdef.h" #include "taosmsg.h" @@ -30,201 +28,19 @@ #include "tskiplist.h" #include "tsqlfunction.h" #include "tstoken.h" -#include "ttokendef.h" -#include "tulog.h" -#include "tutil.h" - -/* - * - * @date 2018-2-15 - * @version 0.2 operation for column filter - * - * @Description parse tag query expression to build ast - * ver 0.2, filter the result on first column with high priority to limit the candidate set - * ver 0.3, pipeline filter in the form of: (a+2)/9 > 14 - * - */ -static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SStrToken *pToken); - -static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i); -static void destroySyntaxTree(tExprNode *); - -static uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight); - -/* - * Check the filter value type on the right hand side based on the column id on the left hand side, - * the filter value type must be identical to field type for relational operation - * As for binary arithmetic operation, it is not necessary to do so. - */ -static void reviseBinaryExprIfNecessary(tExprNode **pLeft, tExprNode **pRight, uint8_t *optr) { - if (*optr >= TSDB_RELATION_LESS && *optr <= TSDB_RELATION_LIKE) { - // make sure that the type of data on both sides of relational comparision are identical - if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { - tVariantTypeSetType((*pLeft)->pVal, (*pRight)->pSchema->type); - } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { - tVariantTypeSetType((*pRight)->pVal, (*pLeft)->pSchema->type); - } - - } else if (*optr >= TSDB_BINARY_OP_ADD && *optr <= TSDB_BINARY_OP_REMAINDER) { - if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { - /* convert to int/bigint may cause the precision loss */ - tVariantTypeSetType((*pLeft)->pVal, TSDB_DATA_TYPE_DOUBLE); - } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { - /* convert to int/bigint may cause the precision loss */ - tVariantTypeSetType((*pRight)->pVal, TSDB_DATA_TYPE_DOUBLE); - } - } - - /* - * for expressions that are suitable for switch principle, - * switch left and left and right hand side in expr if possible - */ - if ((*pLeft)->nodeType == TSQL_NODE_VALUE && (*pRight)->nodeType == TSQL_NODE_COL) { - if (*optr >= TSDB_RELATION_GREATER && *optr <= TSDB_RELATION_GREATER_EQUAL && *optr != TSDB_RELATION_EQUAL) { - SWAP(*pLeft, *pRight, tExprNode *); - } - - switch (*optr) { - case TSDB_RELATION_GREATER: - (*optr) = TSDB_RELATION_LESS; - break; - case TSDB_RELATION_LESS: - (*optr) = TSDB_RELATION_GREATER; - break; - case TSDB_RELATION_GREATER_EQUAL: - (*optr) = TSDB_RELATION_LESS_EQUAL; - break; - case TSDB_RELATION_LESS_EQUAL: - (*optr) = TSDB_RELATION_GREATER_EQUAL; - break; - default:; - // for other type of operations, do nothing - } - } -} - -static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SStrToken *pToken) { - /* if the token is not a value, return false */ - if (pToken->type == TK_RP || (pToken->type != TK_INTEGER && pToken->type != TK_FLOAT && pToken->type != TK_ID && - pToken->type != TK_TBNAME && pToken->type != TK_STRING && pToken->type != TK_BOOL)) { - return NULL; - } - - size_t nodeSize = sizeof(tExprNode); - tExprNode *pNode = NULL; - - if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { - int32_t i = 0; - if (pToken->type == TK_ID) { - do { - SStrToken tableToken = {0}; - extractTableNameFromToken(pToken, &tableToken); - - size_t len = strlen(pSchema[i].name); - if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; - } while (++i < numOfCols); - - if (i == numOfCols) { // column name is not valid, parse the expression failed - return NULL; - } - } - - nodeSize += sizeof(SSchema); - - pNode = calloc(1, nodeSize); - pNode->pSchema = (struct SSchema *)((char *)pNode + sizeof(tExprNode)); - pNode->nodeType = TSQL_NODE_COL; - - if (pToken->type == TK_ID) { - memcpy(pNode->pSchema, &pSchema[i], sizeof(SSchema)); - } else { - pNode->pSchema->type = TSDB_DATA_TYPE_BINARY; - pNode->pSchema->bytes = TSDB_TABLE_NAME_LEN - 1; - strcpy(pNode->pSchema->name, TSQL_TBNAME_L); - pNode->pSchema->colId = -1; - } +#include "tschemautil.h" - } else { - nodeSize += sizeof(tVariant); - pNode = calloc(1, nodeSize); - pNode->pVal = (tVariant *)((char *)pNode + sizeof(tExprNode)); - - toTSDBType(pToken->type); - tVariantCreate(pNode->pVal, pToken); - pNode->nodeType = TSQL_NODE_VALUE; - } - - return pNode; -} - -uint8_t getBinaryExprOptr(SStrToken *pToken) { - switch (pToken->type) { - case TK_LT: - return TSDB_RELATION_LESS; - case TK_LE: - return TSDB_RELATION_LESS_EQUAL; - case TK_GT: - return TSDB_RELATION_GREATER; - case TK_GE: - return TSDB_RELATION_GREATER_EQUAL; - case TK_NE: - return TSDB_RELATION_NOT_EQUAL; - case TK_AND: - return TSDB_RELATION_AND; - case TK_OR: - return TSDB_RELATION_OR; - case TK_EQ: - return TSDB_RELATION_EQUAL; - case TK_PLUS: - return TSDB_BINARY_OP_ADD; - case TK_MINUS: - return TSDB_BINARY_OP_SUBTRACT; - case TK_STAR: - return TSDB_BINARY_OP_MULTIPLY; - case TK_SLASH: - case TK_DIVIDE: - return TSDB_BINARY_OP_DIVIDE; - case TK_REM: - return TSDB_BINARY_OP_REMAINDER; - case TK_LIKE: - return TSDB_RELATION_LIKE; - case TK_ISNULL: - return TSDB_RELATION_ISNULL; - case TK_NOTNULL: - return TSDB_RELATION_NOTNULL; - default: { return 0; } - } -} - -// previous generated expr is reduced as the left child -static tExprNode *parseRemainStr(char *pstr, tExprNode *pExpr, SSchema *pSchema, int32_t optr, - int32_t numOfCols, int32_t *i) { - // set the previous generated node as the left child of new root - pExpr->nodeType = TSQL_NODE_EXPR; - - // remain is the right child - tExprNode *pRight = createSyntaxTree(pSchema, numOfCols, pstr, i); - if (pRight == NULL || (pRight->nodeType == TSQL_NODE_COL && pExpr->nodeType != TSQL_NODE_VALUE) || - (pExpr->nodeType == TSQL_NODE_VALUE && pRight->nodeType != TSQL_NODE_COL)) { - tExprNodeDestroy(pExpr, NULL); - tExprNodeDestroy(pRight, NULL); - return NULL; - } - - tExprNode *pNewExpr = (tExprNode *)calloc(1, sizeof(tExprNode)); - uint8_t k = optr; - reviseBinaryExprIfNecessary(&pExpr, &pRight, &k); - pNewExpr->_node.pLeft = pExpr; - pNewExpr->_node.pRight = pRight; - pNewExpr->_node.optr = k; - - pNewExpr->_node.hasPK = isQueryOnPrimaryKey(pSchema[0].name, pExpr, pRight); - pNewExpr->nodeType = TSQL_NODE_EXPR; +typedef struct { + char* v; + int32_t optr; +} SEndPoint; - return pNewExpr; -} +typedef struct { + SEndPoint* start; + SEndPoint* end; +} SQueryCond; -uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) { +static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) { if (pLeft->nodeType == TSQL_NODE_COL) { // if left node is the primary column,return true return (strcmp(primaryColumnName, pLeft->pSchema->name) == 0) ? 1 : 0; @@ -237,103 +53,6 @@ uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLef } } -static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i) { - SStrToken t0 = tStrGetToken(str, i, false, 0, NULL); - if (t0.n == 0) { - return NULL; - } - - tExprNode *pLeft = NULL; - if (t0.type == TK_LP) { // start new left child branch - pLeft = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - if (t0.type == TK_RP) { - return NULL; - } - - pLeft = tExprNodeCreate(pSchema, numOfCols, &t0); - } - - if (pLeft == NULL) { - return NULL; - } - - t0 = tStrGetToken(str, i, false, 0, NULL); - if (t0.n == 0 || t0.type == TK_RP) { - if (pLeft->nodeType != TSQL_NODE_EXPR) { // if left is not the expr, it is not a legal expr - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - return pLeft; - } - - // get the operator of expr - uint8_t optr = getBinaryExprOptr(&t0); - if (optr == 0) { - uError("not support binary operator:%d", t0.type); - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - assert(pLeft != NULL); - tExprNode *pRight = NULL; - - if (t0.type == TK_AND || t0.type == TK_OR || t0.type == TK_LP) { - pRight = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - /* - * In case that pLeft is a field identification, - * we parse the value in expression according to queried field type, - * if we do not get the information, in case of value of field presented first, - * we revised the value after the binary expression is completed. - */ - t0 = tStrGetToken(str, i, true, 0, NULL); - if (t0.n == 0) { - tExprNodeDestroy(pLeft, NULL); // illegal expression - return NULL; - } - - if (t0.type == TK_LP) { - pRight = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - pRight = tExprNodeCreate(pSchema, numOfCols, &t0); - } - } - - if (pRight == NULL) { - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - /* create binary expr as the child of new parent node */ - tExprNode *pExpr = (tExprNode *)calloc(1, sizeof(tExprNode)); - reviseBinaryExprIfNecessary(&pLeft, &pRight, &optr); - - pExpr->_node.hasPK = isQueryOnPrimaryKey(pSchema[0].name, pLeft, pRight); - pExpr->_node.pLeft = pLeft; - pExpr->_node.pRight = pRight; - pExpr->_node.optr = optr; - - t0 = tStrGetToken(str, i, true, 0, NULL); - - if (t0.n == 0 || t0.type == TK_RP) { - pExpr->nodeType = TSQL_NODE_EXPR; - return pExpr; - } else { - uint8_t localOptr = getBinaryExprOptr(&t0); - if (localOptr == 0) { - uError("not support binary operator:%d", t0.type); - free(pExpr); - return NULL; - } - - return parseRemainStr(str, pExpr, pSchema, localOptr, numOfCols, i); - } -} - -static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); } - void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) { if (pNode == NULL) { return; @@ -373,16 +92,6 @@ void tExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { *pExpr = NULL; } -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - // todo check for malloc failure static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { int32_t optr = queryColInfo->optr; @@ -396,13 +105,10 @@ static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { pCond->end = calloc(1, sizeof(SEndPoint)); pCond->end->optr = queryColInfo->optr; pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - printf("relation is in\n"); - assert(0); - } else if (optr == TSDB_RELATION_LIKE) { - printf("relation is like\n"); + } else if (optr == TSDB_RELATION_IN || optr == TSDB_RELATION_LIKE) { assert(0); } + return TSDB_CODE_SUCCESS; } @@ -415,9 +121,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr } if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->keyInfo.type, TSDB_ORDER_ASC); + iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->keyInfo.type, TSDB_ORDER_DESC); + iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); } if (cond.start != NULL) { @@ -432,7 +138,7 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr break; } - STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); } } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal @@ -450,7 +156,7 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_GREATER) { continue; } else { - STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); comp = false; } @@ -465,22 +171,22 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr continue; } - STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); } tSkipListDestroyIter(iter); comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->keyInfo.type, TSDB_ORDER_DESC); + iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); while(tSkipListIterNext(iter)) { SSkipListNode* pNode = tSkipListIterGet(iter); comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { + if (comp) { continue; } - STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); } @@ -504,7 +210,7 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_LESS) { continue; } else { - STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); comp = false; // no need to compare anymore } @@ -518,7 +224,7 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(result, &info); } } @@ -530,99 +236,6 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr tSkipListDestroyIter(iter); } -int32_t merge(SArray *pLeft, SArray *pRight, SArray *pFinalRes) { -// assert(pFinalRes->pRes == 0); -// -// pFinalRes->pRes = calloc((size_t)(pLeft->num + pRight->num), POINTER_BYTES); -// pFinalRes->num = 0; -// -// // sort according to address -// tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; -// qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); -// -// tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; -// qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); -// -// int32_t i = 0, j = 0; -// -// // merge two sorted arrays in O(n) time -// while (i < pLeft->num && j < pRight->num) { -// int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; -// -// if (ret < 0) { -// pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; -// } else if (ret > 0) { -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// } else { // pNode->key > pkey[i] -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// i++; -// } -// } -// -// while (i < pLeft->num) { -// pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; -// } -// -// while (j < pRight->num) { -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// } -// -// return pFinalRes->num; - return 0; -} - -int32_t intersect(SArray *pLeft, SArray *pRight, SArray *pFinalRes) { -// int64_t num = MIN(pLeft->num, pRight->num); -// -// assert(pFinalRes->pRes == 0); -// -// pFinalRes->pRes = calloc(num, POINTER_BYTES); -// pFinalRes->num = 0; -// -// // sort according to address -// tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; -// qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); -// -// tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; -// qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); -// -// int32_t i = 0, j = 0; -// // merge two sorted arrays in O(n) time -// while (i < pLeft->num && j < pRight->num) { -// int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; -// -// if (ret < 0) { -// i++; -// } else if (ret > 0) { -// j++; -// } else { // pNode->key > pkey[i] -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j]; -// i++; -// j++; -// } -// } -// -// return pFinalRes->num; - return 0; -} - -/* - * traverse the result and apply the function to each item to check if the item is qualified or not - */ -static void tArrayTraverse(tExprNode *pExpr, __result_filter_fn_t fp, SArray *pResult) { - assert(pExpr->_node.pLeft->nodeType == TSQL_NODE_COL && pExpr->_node.pRight->nodeType == TSQL_NODE_VALUE && fp != NULL); - - // scan the result array list and check for each item in the list - for (int32_t i = 0; i < taosArrayGetSize(pResult); ++i) { - void* item = taosArrayGet(pResult, i); - if (fp(item, pExpr->_node.info)) { - i++; - } else { - taosArrayRemove(pResult, i); - } - } -} - static bool filterItem(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) { tExprNode *pLeft = pExpr->_node.pLeft; tExprNode *pRight = pExpr->_node.pRight; @@ -650,39 +263,13 @@ static bool filterItem(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *p return param->nodeFilterFn(pItem, pExpr->_node.info); } -/** - * Apply the filter expression on non-indexed tag columns to each element in the result list, which is generated - * by filtering on indexed tag column. So the whole result set only needs to be iterated once to generate - * result that is satisfied to the filter expression, no matter how the filter expression consisting of. - * - * @param pExpr filter expression on non-indexed tag columns. - * @param pResult results from filter on the indexed tag column, which is usually the first tag column - * @param pSchema tag schemas - * @param fp filter callback function - */ -static void exprTreeTraverseImpl(tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - size_t size = taosArrayGetSize(pResult); - - SArray* array = taosArrayInit(size, POINTER_BYTES); - for (int32_t i = 0; i < size; ++i) { - void *pItem = taosArrayGetP(pResult, i); - - if (filterItem(pExpr, pItem, param)) { - taosArrayPush(array, &pItem); - } - } - - taosArrayCopy(pResult, array); - taosArrayDestroy(array); -} - static void tSQLBinaryTraverseOnSkipList(tExprNode *pExpr, SArray *pResult, SSkipList *pSkipList, SExprTraverseSupp *param ) { SSkipListIterator* iter = tSkipListCreateIter(pSkipList); while (tSkipListIterNext(iter)) { SSkipListNode *pNode = tSkipListIterGet(iter); if (filterItem(pExpr, pNode, param)) { - taosArrayPush(pResult, SL_GET_NODE_DATA(pNode)); + taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); } } tSkipListDestroyIter(iter); @@ -697,7 +284,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SSkipListNode *pNode = tSkipListIterGet(iter); char * pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName(*(void**) pData); + tstr *name = (tstr*) tsdbGetTableName((void*) pData); // todo speed up by using hash if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { @@ -711,7 +298,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, } if (addToResult) { - STableKeyInfo info = {.pTable = *(void**)pData, .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(res, &info); } } @@ -733,10 +320,6 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); param->setupInfoFn(pExpr, param->pExtInfo); - if (pSkipList == NULL) { - tArrayTraverse(pExpr, param->nodeFilterFn, result); - return; - } tQueryInfo *pQueryInfo = pExpr->_node.info; if (pQueryInfo->indexed && pQueryInfo->optr != TSDB_RELATION_LIKE) { @@ -748,49 +331,14 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S return; } - // recursive traverse left child branch + // The value of hasPK is always 0. uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; + assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); - if (weight == 0 ) { - if (taosArrayGetSize(result) > 0 && pSkipList == NULL) { - /** - * Perform the filter operation based on the initial filter result, which is obtained from filtering from index. - * Since no index presented, the filter operation is done by scan all elements in the result set. - * - * if the query is a high selectivity filter, only small portion of meters are retrieved. - */ - exprTreeTraverseImpl(pExpr, result, param); - } else { - /** - * apply the hierarchical expression to every node in skiplist for find the qualified nodes - */ - assert(taosArrayGetSize(result) == 0); - tSQLBinaryTraverseOnSkipList(pExpr, result, pSkipList, param); - } - - return; - } - - if (weight == 2 || (weight == 1 && pExpr->_node.optr == TSDB_RELATION_OR)) { - SArray* rLeft = taosArrayInit(10, POINTER_BYTES); - SArray* rRight = taosArrayInit(10, POINTER_BYTES); - - tExprTreeTraverse(pLeft, pSkipList, rLeft, param); - tExprTreeTraverse(pRight, pSkipList, rRight, param); - - if (pExpr->_node.optr == TSDB_RELATION_AND) { // CROSS - intersect(rLeft, rRight, result); - } else if (pExpr->_node.optr == TSDB_RELATION_OR) { // or - merge(rLeft, rRight, result); - } else { - assert(false); - } - - taosArrayDestroy(rLeft); - taosArrayDestroy(rRight); - return; - } + //apply the hierarchical expression to every node in skiplist for find the qualified nodes + tSQLBinaryTraverseOnSkipList(pExpr, result, pSkipList, param); +#if 0 /* * (weight == 1 && pExpr->nSQLBinaryOptr == TSDB_RELATION_AND) is handled here * @@ -819,6 +367,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S * So, we do not set the skip list index as a parameter */ tExprTreeTraverse(pSecond, NULL, result, param); +#endif } void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, @@ -1011,6 +560,7 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size) { if (size == 0) { return NULL; } + SBufferReader br = tbufInitReader(data, size, false); return exprTreeFromBinaryImpl(&br); } @@ -1034,10 +584,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { SSchema* pSchema = exception_calloc(1, sizeof(SSchema)); left->pSchema = pSchema; - pSchema->type = TSDB_DATA_TYPE_BINARY; - pSchema->bytes = TSDB_TABLE_NAME_LEN - 1; - strcpy(pSchema->name, TSQL_TBNAME_L); - pSchema->colId = -1; + *pSchema = tscGetTbnameColumnSchema(); tExprNode* right = exception_calloc(1, sizeof(tExprNode)); expr->_node.pRight = right; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 5ad52ef29e6ba0a4b47129db891c0d1318e76c5f..d0874c36bc1783242c8536ebeec1fbce8559389f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -170,21 +170,19 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { tw->ekey -= 1; } -#define GET_NEXT_TIMEWINDOW(_q, tw) getNextTimeWindow((_q), (tw)) - #define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) #define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); -static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult); -static void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult); -static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo); +static void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); +static void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); +static void resetMergeResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx *pCtx, SResultRow *pRow); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex); + SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId); static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo); @@ -194,6 +192,8 @@ static void buildTagQueryResult(SQInfo *pQInfo); static int32_t setAdditionalInfo(SQInfo *pQInfo, void *pTable, STableQueryInfo *pTableQueryInfo); static int32_t flushFromResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo); +static int32_t checkForQueryBuf(size_t numOfTables); +static void releaseQueryBuf(size_t numOfTables); bool doFilterData(SQuery *pQuery, int32_t elemPos) { for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { @@ -242,7 +242,7 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { int64_t maxOutput = 0; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -253,7 +253,7 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { continue; } - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); + SResultRowCellInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); if (pResInfo != NULL && maxOutput < pResInfo->numOfRes) { maxOutput = pResInfo->numOfRes; } @@ -270,7 +270,7 @@ void updateNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfRes) { SQuery *pQuery = pRuntimeEnv->pQuery; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); + SResultRowCellInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); int16_t functionId = pRuntimeEnv->pCtx[j].functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || @@ -337,7 +337,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { int32_t numOfSelectivity = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pSelectExpr[i].base.functionId; + int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { hasTags = true; continue; @@ -357,7 +357,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool isProjQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pSelectExpr[i].base.functionId; + int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) { return false; } @@ -366,7 +366,7 @@ bool isProjQuery(SQuery *pQuery) { return true; } -bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].base.functionId == TSDB_FUNC_TS_COMP; } +bool isTSCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) { SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); @@ -387,7 +387,7 @@ static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) { static bool isTopBottomQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS) { continue; } @@ -401,12 +401,12 @@ static bool isTopBottomQuery(SQuery *pQuery) { } static bool hasTagValOutput(SQuery* pQuery) { - SExprInfo *pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo *pExprInfo = &pQuery->pExpr1[0]; if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { return true; } else { // set tag value, by which the results are aggregated. for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { - SExprInfo *pLocalExprInfo = &pQuery->pSelectExpr[idx]; + SExprInfo *pLocalExprInfo = &pQuery->pExpr1[idx]; // ts_comp column required the tag value for join filter if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { @@ -445,11 +445,11 @@ static bool hasNullValue(SColIndex* pColIndex, SDataStatis *pStatis, SDataStatis return true; } -static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, char *pData, - int16_t bytes, bool masterscan) { - SQuery *pQuery = pRuntimeEnv->pQuery; - - int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes); +static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, char *pData, + int16_t bytes, bool masterscan, uint64_t uid) { + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); + int32_t *p1 = + (int32_t *)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); if (p1 != NULL) { pWindowResInfo->curIndex = *p1; } else { @@ -459,41 +459,37 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin // more than the capacity, reallocate the resources if (pWindowResInfo->size >= pWindowResInfo->capacity) { - int64_t newCap = 0; + int64_t newCapacity = 0; if (pWindowResInfo->capacity > 10000) { - newCap = (int64_t)(pWindowResInfo->capacity * 1.25); + newCapacity = (int64_t)(pWindowResInfo->capacity * 1.25); } else { - newCap = (int64_t)(pWindowResInfo->capacity * 1.5); + newCapacity = (int64_t)(pWindowResInfo->capacity * 1.5); } - char *t = realloc(pWindowResInfo->pResult, (size_t)(newCap * sizeof(SWindowResult))); - pRuntimeEnv->summary.internalSupSize += (newCap - pWindowResInfo->capacity) * sizeof(SWindowResult); - pRuntimeEnv->summary.numOfTimeWindows += (newCap - pWindowResInfo->capacity); - + char *t = realloc(pWindowResInfo->pResult, (size_t)(newCapacity * POINTER_BYTES)); if (t == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - pWindowResInfo->pResult = (SWindowResult *)t; - - int32_t inc = (int32_t)newCap - pWindowResInfo->capacity; - memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc); + pWindowResInfo->pResult = (SResultRow **)t; - pRuntimeEnv->summary.internalSupSize += (pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * inc; + int32_t inc = (int32_t)newCapacity - pWindowResInfo->capacity; + memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, POINTER_BYTES * inc); - for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { - int32_t ret = createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } + pWindowResInfo->capacity = (int32_t)newCapacity; + } - pWindowResInfo->capacity = (int32_t)newCap; + SResultRow *pResult = getNewResultRow(pRuntimeEnv->pool); + pWindowResInfo->pResult[pWindowResInfo->size] = pResult; + int32_t ret = initResultRow(pResult); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // add a new result set for a new group pWindowResInfo->curIndex = pWindowResInfo->size++; - taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); + taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), + (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); } // too many time window in query @@ -501,7 +497,7 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } - return getWindowResult(pWindowResInfo, pWindowResInfo->curIndex); + return getResultRow(pWindowResInfo, pWindowResInfo->curIndex); } // get the correct time window according to the handled timestamp @@ -517,7 +513,7 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t } } else { int32_t slot = curTimeWindowIndex(pWindowResInfo); - SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot); + SResultRow* pWindowRes = getResultRow(pWindowResInfo, slot); w = pWindowRes->win; } @@ -553,9 +549,9 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t return w; } -static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResultBuf *pResultBuf, int32_t sid, +static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf *pResultBuf, int32_t tid, int32_t numOfRowsPerPage) { - if (pWindowRes->pos.pageId != -1) { + if (pWindowRes->pageId != -1) { return 0; } @@ -563,10 +559,10 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult // in the first scan, new space needed for results int32_t pageId = -1; - SIDList list = getDataBufPagesIdList(pResultBuf, sid); + SIDList list = getDataBufPagesIdList(pResultBuf, tid); if (taosArrayGetSize(list) == 0) { - pData = getNewDataBuf(pResultBuf, sid, &pageId); + pData = getNewDataBuf(pResultBuf, tid, &pageId); } else { SPageInfo* pi = getLastPageInfo(list); pData = getResBufPage(pResultBuf, pi->pageId); @@ -576,7 +572,7 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult // release current page first, and prepare the next one releaseResBufPageInfo(pResultBuf, pi); - pData = getNewDataBuf(pResultBuf, sid, &pageId); + pData = getNewDataBuf(pResultBuf, tid, &pageId); if (pData != NULL) { assert(pData->num == 0); // number of elements must be 0 for new allocated buffer } @@ -588,24 +584,23 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult } // set the number of rows in current disk page - if (pWindowRes->pos.pageId == -1) { // not allocated yet, allocate new buffer - pWindowRes->pos.pageId = pageId; - pWindowRes->pos.rowId = (int32_t)(pData->num++); + if (pWindowRes->pageId == -1) { // not allocated yet, allocate new buffer + pWindowRes->pageId = pageId; + pWindowRes->rowId = (int32_t)(pData->num++); - assert(pWindowRes->pos.pageId >= 0); + assert(pWindowRes->pageId >= 0); } return 0; } -static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, int32_t sid, +static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, SDataBlockInfo* pBockInfo, STimeWindow *win, bool masterscan, bool* newWind) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, - TSDB_KEYSIZE, masterscan); - if (pWindowRes == NULL) { + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, pBockInfo->uid); + if (pResultRow == NULL) { *newWind = false; return masterscan? -1:0; @@ -614,23 +609,22 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes *newWind = true; // not assign result buffer yet, add new result buffer - if (pWindowRes->pos.pageId == -1) { - int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage); + if (pResultRow->pageId == -1) { + int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, pBockInfo->tid, pRuntimeEnv->numOfRowsPerPage); if (ret != TSDB_CODE_SUCCESS) { return -1; } } // set time window for current result - pWindowRes->win = (*win); - - setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes); + pResultRow->win = (*win); + setResultRowOutputBufInitCtx(pRuntimeEnv, pResultRow); return TSDB_CODE_SUCCESS; } static bool getTimeWindowResStatus(SWindowResInfo *pWindowResInfo, int32_t slot) { assert(slot >= 0 && slot < pWindowResInfo->size); - return pWindowResInfo->pResult[slot].closed; + return pWindowResInfo->pResult[slot]->closed; } static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos, @@ -666,7 +660,13 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se */ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!QUERY_IS_INTERVAL_QUERY(pQuery))) { + if (pRuntimeEnv->scanFlag != MASTER_SCAN) { + return pWindowResInfo->size; + } + + // for group by normal column query, close time window and return. + if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { + closeAllTimeWindow(pWindowResInfo); return pWindowResInfo->size; } @@ -689,7 +689,7 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe int64_t skey = TSKEY_INITIAL_VAL; for (i = 0; i < pWindowResInfo->size; ++i) { - SWindowResult *pResult = &pWindowResInfo->pResult[i]; + SResultRow *pResult = pWindowResInfo->pResult[i]; if (pResult->closed) { numOfClosed += 1; continue; @@ -713,7 +713,7 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe pWindowResInfo->curIndex = i; } - pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].win.skey; + pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex]->win.skey; // the number of completed slots are larger than the threshold, return current generated results to client. if (numOfClosed > pWindowResInfo->threshold) { @@ -790,7 +790,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed pCtx[k].size = forwardStep; pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { pCtx[k].ptsList = &tsCol[pCtx[k].startOffset]; } @@ -819,7 +819,7 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].nStartQueryTimestamp = pWin->skey; - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunctionF(&pCtx[k], offset); } @@ -831,7 +831,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow TSKEY *primaryKeys, __block_search_fn_t searchFn, int32_t prevPosition) { SQuery *pQuery = pRuntimeEnv->pQuery; - GET_NEXT_TIMEWINDOW(pQuery, pNext); + getNextTimeWindow(pQuery, pNext); // next time window is not in current block if ((pNext->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || @@ -853,35 +853,50 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow } int32_t startPos = 0; + // tumbling time window query, a special case of sliding time window query if (pQuery->interval.sliding == pQuery->interval.interval && prevPosition != -1) { int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); startPos = prevPosition + factor; } else { - startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order); + if (startKey <= pDataBlockInfo->window.skey && QUERY_IS_ASC_QUERY(pQuery)) { + startPos = 0; + } else if (startKey >= pDataBlockInfo->window.ekey && !QUERY_IS_ASC_QUERY(pQuery)) { + startPos = pDataBlockInfo->rows - 1; + } else { + startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order); + } } /* * This time window does not cover any data, try next time window, * this case may happen when the time window is too small */ - if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) { - TSKEY next = primaryKeys[startPos]; - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); - pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + if (primaryKeys == NULL) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + assert(pDataBlockInfo->window.skey <= pNext->ekey); } else { - pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding; - pNext->skey = pNext->ekey - pQuery->interval.interval + 1; + assert(pDataBlockInfo->window.ekey >= pNext->skey); } - } else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) { - TSKEY next = primaryKeys[startPos]; - if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { - pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); - pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; - } else { - pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding; - pNext->ekey = pNext->skey + pQuery->interval.interval - 1; + } else { + if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) { + TSKEY next = primaryKeys[startPos]; + if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { + pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); + pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + } else { + pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding; + pNext->skey = pNext->ekey - pQuery->interval.interval + 1; + } + } else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) { + TSKEY next = primaryKeys[startPos]; + if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') { + pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision); + pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1; + } else { + pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding; + pNext->ekey = pNext->skey + pQuery->interval.interval - 1; + } } } @@ -928,9 +943,9 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas char *dataBlock = NULL; SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t functionId = pQuery->pSelectExpr[col].base.functionId; + int32_t functionId = pQuery->pExpr1[col].base.functionId; if (functionId == TSDB_FUNC_ARITHM) { - sas->pArithExpr = &pQuery->pSelectExpr[col]; + sas->pArithExpr = &pQuery->pExpr1[col]; sas->offset = 0; sas->colList = pQuery->colList; @@ -938,7 +953,6 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas sas->data = calloc(pQuery->numOfCols, POINTER_BYTES); if (sas->data == NULL) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -961,9 +975,9 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas } } else { // other type of query function - SColIndex *pCol = &pQuery->pSelectExpr[col].base.colInfo; + SColIndex *pCol = &pQuery->pExpr1[col].base.colInfo; if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) { - SColIndex* pColIndex = &pQuery->pSelectExpr[col].base.colInfo; + SColIndex* pColIndex = &pQuery->pExpr1[col].base.colInfo; SColumnInfoData *p = taosArrayGet(pDataBlock, pColIndex->colIndex); assert(p->info.colId == pColIndex->colId); @@ -1001,13 +1015,13 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); if (sasArray == NULL) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } + SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k, pQInfo->vgId); } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -1023,9 +1037,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * bool hasTimeWindow = false; STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) != - TSDB_CODE_SUCCESS) { - taosTFree(sasArray); + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { + tfree(sasArray); return; } @@ -1052,8 +1065,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * // null data, failed to allocate more memory buffer hasTimeWindow = false; - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, - &hasTimeWindow) != TSDB_CODE_SUCCESS) { + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } @@ -1076,7 +1088,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * * tag_prj function are changed to be TSDB_FUNC_TAG_DUMMY */ for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunction(&pCtx[k]); } @@ -1084,17 +1096,17 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * } for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - if (pQuery->pSelectExpr[i].base.functionId != TSDB_FUNC_ARITHM) { + if (pQuery->pExpr1[i].base.functionId != TSDB_FUNC_ARITHM) { continue; } - taosTFree(sasArray[i].data); + tfree(sasArray[i].data); } - taosTFree(sasArray); + tfree(sasArray); } -static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, int16_t bytes) { +static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pData, int16_t type, int16_t bytes, int32_t groupIndex) { if (isNull(pData, type)) { // ignore the null value return -1; } @@ -1111,13 +1123,14 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat len = varDataLen(pData); } else if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); - qError("QInfo:%p group by not supported on double/float/binary/nchar columns, abort", pQInfo); + qError("QInfo:%p group by not supported on double/float columns, abort", pQInfo); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true); - if (pWindowRes == NULL) { + uint64_t uid = groupIndex; // uid is always set to be 0. + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true, uid); + if (pResultRow == NULL) { return -1; } @@ -1131,23 +1144,21 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat } if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - pWindowRes->key = malloc(varDataTLen(pData)); - varDataCopy(pWindowRes->key, pData); + pResultRow->key = malloc(varDataTLen(pData)); + varDataCopy(pResultRow->key, pData); } else { - pWindowRes->win.skey = v; - pWindowRes->win.ekey = v; + pResultRow->win.skey = v; + pResultRow->win.ekey = v; } - assert(pRuntimeEnv->windowResInfo.interval == 0); - - if (pWindowRes->pos.pageId == -1) { - int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage); + if (pResultRow->pageId == -1) { + int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage); if (ret != 0) { return -1; } } - setWindowResOutputBuf(pRuntimeEnv, pWindowRes); + setResultOutputBuf(pRuntimeEnv, pResultRow); initCtxOutputBuf(pRuntimeEnv); return TSDB_CODE_SUCCESS; } @@ -1200,7 +1211,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; // compare tag first - if (tVariantCompare(&pCtx[0].tag, &elem.tag) != 0) { + if (tVariantCompare(&pCtx[0].tag, elem.tag) != 0) { return TS_JOIN_TAG_NOT_EQUALS; } @@ -1230,7 +1241,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { } static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { - SResultInfo *pResInfo = GET_RES_INFO(pCtx); + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SQuery* pQuery = pRuntimeEnv->pQuery; // in case of timestamp column, always generated results. @@ -1246,7 +1257,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx return QUERY_IS_ASC_QUERY(pQuery); } - // todo add comments + // denote the order type if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) { return pCtx->param[0].i64Key == pQuery->order.order; } @@ -1274,7 +1285,6 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); if (sasArray == NULL) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1286,9 +1296,10 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS groupbyColumnData = getGroupbyColumnData(pQuery, &type, &bytes, pDataBlock); } + SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k, pQInfo->vgId); } // set the input column data @@ -1303,7 +1314,6 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // from top to bottom in desc // from bottom to top in asc order if (pRuntimeEnv->pTSBuf != NULL) { - SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pRuntimeEnv); qDebug("QInfo:%p process data rows, numOfRows:%d, query order:%d, ts comp order:%d", pQInfo, pDataBlockInfo->rows, pQuery->order.order, pRuntimeEnv->pTSBuf->cur.order); } @@ -1335,7 +1345,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); bool hasTimeWindow = false; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow); + int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo, &win, masterScan, &hasTimeWindow); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } @@ -1351,9 +1361,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS int32_t index = pWindowResInfo->curIndex; while (1) { - GET_NEXT_TIMEWINDOW(pQuery, &nextWin); + getNextTimeWindow(pQuery, &nextWin); if ((nextWin.skey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (nextWin.skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + (nextWin.ekey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { break; } @@ -1363,7 +1373,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // null data, failed to allocate more memory buffer hasTimeWindow = false; - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } @@ -1379,14 +1389,14 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS if (groupbyColumnValue) { char *val = groupbyColumnData + bytes * offset; - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, val, type, bytes); + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, val, type, bytes, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } } for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunctionF(&pCtx[k], offset); } @@ -1409,13 +1419,17 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS item->lastKey = (QUERY_IS_ASC_QUERY(pQuery)? pDataBlockInfo->window.ekey:pDataBlockInfo->window.skey) + step; } + if (pRuntimeEnv->pTSBuf != NULL) { + item->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + } + // todo refactor: extract method for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - if (pQuery->pSelectExpr[i].base.functionId != TSDB_FUNC_ARITHM) { + if (pQuery->pExpr1[i].base.functionId != TSDB_FUNC_ARITHM) { continue; } - taosTFree(sasArray[i].data); + tfree(sasArray[i].data); } free(sasArray); @@ -1440,7 +1454,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl // interval query with limit applied int32_t numOfRes = 0; - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); } else { numOfRes = (int32_t)getNumOfResult(pRuntimeEnv); @@ -1469,13 +1483,13 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex) { + SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId) { - int32_t functionId = pQuery->pSelectExpr[colIndex].base.functionId; - int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId; + int32_t functionId = pQuery->pExpr1[colIndex].base.functionId; + int32_t colId = pQuery->pExpr1[colIndex].base.colInfo.colId; SDataStatis *tpField = NULL; - pCtx->hasNull = hasNullValue(&pQuery->pSelectExpr[colIndex].base.colInfo, pStatis, &tpField); + pCtx->hasNull = hasNullValue(&pQuery->pExpr1[colIndex].base.colInfo, pStatis, &tpField); pCtx->aInputElemBuf = inputData; if (tpField != NULL) { @@ -1508,13 +1522,14 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY functionId == TSDB_FUNC_DIFF || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { /* * least squares function needs two columns of input, currently, the x value of linear equation is set to - * timestamp column, and the y-value is the column specified in pQuery->pSelectExpr[i].colIdxInBuffer + * timestamp column, and the y-value is the column specified in pQuery->pExpr1[i].colIdxInBuffer * * top/bottom function needs timestamp to indicate when the * top/bottom values emerge, so does diff function */ if (functionId == TSDB_FUNC_TWA) { - STwaInfo *pTWAInfo = GET_RES_INFO(pCtx)->interResultBuf; + SResultRowCellInfo* pInfo = GET_RES_INFO(pCtx); + STwaInfo *pTWAInfo = (STwaInfo*) GET_ROWCELL_INTERBUF(pInfo); pTWAInfo->SKey = pQuery->window.skey; pTWAInfo->EKey = pQuery->window.ekey; } @@ -1528,7 +1543,9 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY pCtx->preAggVals.statis.max = pBlockInfo->window.ekey; } } else if (functionId == TSDB_FUNC_INTERP) { - SInterpInfoDetail *pInterpInfo = GET_RES_INFO(pCtx)->interResultBuf; + SResultRowCellInfo* pInfo = GET_RES_INFO(pCtx); + + SInterpInfoDetail *pInterpInfo = (SInterpInfoDetail *)GET_ROWCELL_INTERBUF(pInfo); pInterpInfo->type = (int8_t)pQuery->fillType; pInterpInfo->ts = pQuery->window.skey; pInterpInfo->primaryCol = (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX); @@ -1542,6 +1559,9 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY } } } + } else if (functionId == TSDB_FUNC_TS_COMP) { + pCtx->param[0].i64Key = vgId; + pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; } #if defined(_DEBUG_VIEW) @@ -1575,7 +1595,7 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; if (pSqlFuncMsg->functionId == TSDB_FUNC_TAG_DUMMY || pSqlFuncMsg->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pCtx[i].outputBytes; @@ -1595,39 +1615,28 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx p->tagInfo.numOfTagCols = num; p->tagInfo.tagsLen = tagLen; } else { - taosTFree(pTagCtx); + tfree(pTagCtx); } } return TSDB_CODE_SUCCESS; } -static FORCE_INLINE void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery, char* buf) { - char* p = buf; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t size = pQuery->pSelectExpr[i].interBytes; - setResultInfoBuf(&pResultInfo[i], size, isStableQuery, p); - - p += size; - } -} - static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order) { qDebug("QInfo:%p setup runtime env", GET_QINFO_ADDR(pRuntimeEnv)); SQuery *pQuery = pRuntimeEnv->pQuery; - size_t size = pRuntimeEnv->interBufSize + pQuery->numOfOutput * sizeof(SResultInfo); - - pRuntimeEnv->resultInfo = calloc(1, size); pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx)); + pRuntimeEnv->offset = calloc(pQuery->numOfOutput, sizeof(int16_t)); + pRuntimeEnv->rowCellInfoOffset = calloc(pQuery->numOfOutput, sizeof(int32_t)); - if (pRuntimeEnv->resultInfo == NULL || pRuntimeEnv->pCtx == NULL) { + if (pRuntimeEnv->offset == NULL || pRuntimeEnv->pCtx == NULL || pRuntimeEnv->rowCellInfoOffset == NULL) { goto _clean; } pRuntimeEnv->offset[0] = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; SColIndex* pIndex = &pSqlFuncMsg->colInfo; @@ -1658,17 +1667,18 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order pCtx->inputType = pQuery->colList[index].type; } - assert(isValidDataType(pCtx->inputType)); pCtx->ptsOutputBuf = NULL; - pCtx->outputBytes = pQuery->pSelectExpr[i].bytes; - pCtx->outputType = pQuery->pSelectExpr[i].type; + pCtx->outputBytes = pQuery->pExpr1[i].bytes; + pCtx->outputType = pQuery->pExpr1[i].type; - pCtx->order = pQuery->order.order; - pCtx->functionId = pSqlFuncMsg->functionId; + pCtx->order = pQuery->order.order; + pCtx->functionId = pSqlFuncMsg->functionId; + pCtx->stableQuery = pRuntimeEnv->stableQuery; + pCtx->interBufBytes = pQuery->pExpr1[i].interBytes; - pCtx->numOfParams = pSqlFuncMsg->numOfParams; + pCtx->numOfParams = pSqlFuncMsg->numOfParams; for (int32_t j = 0; j < pCtx->numOfParams; ++j) { int16_t type = pSqlFuncMsg->arg[j].argType; int16_t bytes = pSqlFuncMsg->arg[j].argBytes; @@ -1683,7 +1693,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - int32_t f = pQuery->pSelectExpr[0].base.functionId; + int32_t f = pQuery->pExpr1[0].base.functionId; assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); pCtx->param[2].i64Key = order; @@ -1696,16 +1706,14 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order if (i > 0) { pRuntimeEnv->offset[i] = pRuntimeEnv->offset[i - 1] + pRuntimeEnv->pCtx[i - 1].outputBytes; + pRuntimeEnv->rowCellInfoOffset[i] = pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; } - } - - char* buf = (char*) pRuntimeEnv->resultInfo + sizeof(SResultInfo) * pQuery->numOfOutput; - // set the intermediate result output buffer - setWindowResultInfo(pRuntimeEnv->resultInfo, pQuery, pRuntimeEnv->stableQuery, buf); + } // if it is group by normal column, do not set output buffer, the output buffer is pResult - if (!pRuntimeEnv->groupbyNormalCol && !pRuntimeEnv->stableQuery) { + // fixed output query/multi-output query for normal table + if (!pRuntimeEnv->groupbyNormalCol && !pRuntimeEnv->stableQuery && !QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQuery)) { resetCtxOutputBuf(pRuntimeEnv); } @@ -1717,12 +1725,26 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order return TSDB_CODE_SUCCESS; _clean: - taosTFree(pRuntimeEnv->resultInfo); - taosTFree(pRuntimeEnv->pCtx); + tfree(pRuntimeEnv->pCtx); + tfree(pRuntimeEnv->offset); + tfree(pRuntimeEnv->rowCellInfoOffset); return TSDB_CODE_QRY_OUT_OF_MEMORY; } +static void doFreeQueryHandle(SQInfo* pQInfo) { + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + + tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle); + tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); + + pRuntimeEnv->pQueryHandle = NULL; + pRuntimeEnv->pSecQueryHandle = NULL; + + SMemRef* pMemRef = &pQInfo->memRef; + assert(pMemRef->ref == 0 && pMemRef->imem == NULL && pMemRef->mem == NULL); +} + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->pQuery == NULL) { return; @@ -1743,20 +1765,27 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { } tVariantDestroy(&pCtx->tag); - taosTFree(pCtx->tagInfo.pTagCtxList); + tfree(pCtx->tagInfo.pTagCtxList); } - taosTFree(pRuntimeEnv->resultInfo); - taosTFree(pRuntimeEnv->pCtx); + tfree(pRuntimeEnv->pCtx); } - pRuntimeEnv->pFillInfo = taosDestoryFillInfo(pRuntimeEnv->pFillInfo); + pRuntimeEnv->pFillInfo = taosDestroyFillInfo(pRuntimeEnv->pFillInfo); destroyResultBuf(pRuntimeEnv->pResultBuf); - tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle); - tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); + doFreeQueryHandle(pQInfo); pRuntimeEnv->pTSBuf = tsBufDestroy(pRuntimeEnv->pTSBuf); + + tfree(pRuntimeEnv->offset); + tfree(pRuntimeEnv->keyBuf); + tfree(pRuntimeEnv->rowCellInfoOffset); + + taosHashCleanup(pRuntimeEnv->pResultRowHashTable); + pRuntimeEnv->pResultRowHashTable = NULL; + + pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); } #define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) @@ -1775,7 +1804,7 @@ static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; // ignore the ts_comp function if (i == 0 && pExprMsg->functionId == TSDB_FUNC_PRJ && pExprMsg->numOfParams == 1 && @@ -1798,7 +1827,7 @@ static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { // todo refactor with isLastRowQuery static bool isPointInterpoQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionID = pQuery->pSelectExpr[i].base.functionId; + int32_t functionID = pQuery->pExpr1[i].base.functionId; if (functionID == TSDB_FUNC_INTERP) { return true; } @@ -1810,7 +1839,7 @@ static bool isPointInterpoQuery(SQuery *pQuery) { // TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION static bool isSumAvgRateQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS) { continue; } @@ -1826,7 +1855,7 @@ static bool isSumAvgRateQuery(SQuery *pQuery) { static bool isFirstLastRowQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionID = pQuery->pSelectExpr[i].base.functionId; + int32_t functionID = pQuery->pExpr1[i].base.functionId; if (functionID == TSDB_FUNC_LAST_ROW) { return true; } @@ -1837,7 +1866,7 @@ static bool isFirstLastRowQuery(SQuery *pQuery) { static bool needReverseScan(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { continue; } @@ -1847,8 +1876,11 @@ static bool needReverseScan(SQuery *pQuery) { } if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) { - int32_t order = (int32_t)pQuery->pSelectExpr[i].base.arg->argValue.i64; - return order != pQuery->order.order; + // the scan order to acquire the last result of the specified column + int32_t order = (int32_t)pQuery->pExpr1[i].base.arg->argValue.i64; + if (order != pQuery->order.order) { + return true; + } } } @@ -1861,7 +1893,7 @@ static bool needReverseScan(SQuery *pQuery) { */ static bool onlyQueryTags(SQuery* pQuery) { for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SExprInfo* pExprInfo = &pQuery->pSelectExpr[i]; + SExprInfo* pExprInfo = &pQuery->pExpr1[i]; int32_t functionId = pExprInfo->base.functionId; @@ -1904,7 +1936,7 @@ static void setScanLimitationByResultBuffer(SQuery *pQuery) { } else { bool hasMultioutput = false; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; if (pExprMsg->functionId == TSDB_FUNC_TS || pExprMsg->functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -1938,7 +1970,7 @@ bool colIdCheck(SQuery *pQuery) { // the scan order is not matter static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAG_DUMMY) { @@ -2168,7 +2200,7 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat if (pRuntimeEnv->topBotQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { return topbot_datablock_filter(&pCtx[i], functionId, (char *)&pDataStatis[i].min, (char *)&pDataStatis[i].max); } @@ -2193,7 +2225,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - GET_NEXT_TIMEWINDOW(pQuery, &w); + getNextTimeWindow(pQuery, &w); if (w.skey > pBlockInfo->window.ekey) { break; } @@ -2212,7 +2244,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - GET_NEXT_TIMEWINDOW(pQuery, &w); + getNextTimeWindow(pQuery, &w); if (w.ekey < pBlockInfo->window.skey) { break; } @@ -2252,14 +2284,14 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo * pW TSKEY k = QUERY_IS_ASC_QUERY(pQuery)? pBlockInfo->window.skey:pBlockInfo->window.ekey; STimeWindow win = getActiveTimeWindow(pWindowResInfo, k, pQuery); - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pBlockInfo->tid, &win, masterScan, &hasTimeWindow) != + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pBlockInfo, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { // todo handle error in set result for timewindow } } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg* pSqlFunc = &pQuery->pExpr1[i].base; int32_t functionId = pSqlFunc->functionId; int32_t colId = pSqlFunc->colInfo.colId; @@ -2383,11 +2415,11 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t bytes = pQuery->pSelectExpr[i].bytes; + int32_t bytes = pQuery->pExpr1[i].bytes; assert(bytes > 0 && capacity > 0); char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage)); - if (tmp == NULL) { // todo handle the oom + if (tmp == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } else { pQuery->sdata[i] = (tFilePage *)tmp; @@ -2414,11 +2446,11 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB int32_t newSize = (int32_t)(pRec->capacity + (pBlockInfo->rows - remain)); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t bytes = pQuery->pSelectExpr[i].bytes; + int32_t bytes = pQuery->pExpr1[i].bytes; assert(bytes > 0 && newSize > 0); char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); - if (tmp == NULL) { // todo handle the oom + if (tmp == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } else { memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes)); @@ -2428,7 +2460,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB // set the pCtx output buffer position pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data + pRec->rows * bytes; - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } @@ -2527,7 +2559,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { setQueryStatus(pQuery, QUERY_COMPLETED); } - if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && (IS_MASTER_SCAN(pRuntimeEnv)|| pRuntimeEnv->scanFlag == REPEAT_SCAN)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { closeAllTimeWindow(&pRuntimeEnv->windowResInfo); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window @@ -2592,7 +2624,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); - SExprInfo *pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo *pExprInfo = &pQuery->pExpr1[0]; if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { assert(pExprInfo->base.numOfParams == 1); @@ -2603,7 +2635,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { } else { // set tag value, by which the results are aggregated. for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { - SExprInfo* pLocalExprInfo = &pQuery->pSelectExpr[idx]; + SExprInfo* pLocalExprInfo = &pQuery->pExpr1[idx]; // ts_comp column required the tag value for join filter if (!TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { @@ -2621,24 +2653,31 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { assert(pFuncMsg->numOfParams == 1); - int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; - SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); + int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; + SColumnInfo *pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); - qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%"PRId64, pQInfo, pExprInfo->base.arg->argValue.i64, - pRuntimeEnv->pCtx[0].tag.i64Key) + + int16_t tagType = pRuntimeEnv->pCtx[0].tag.nType; + if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%s", pQInfo, + pExprInfo->base.arg->argValue.i64, pRuntimeEnv->pCtx[0].tag.pz); + } else { + qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, pQInfo, + pExprInfo->base.arg->argValue.i64, pRuntimeEnv->pCtx[0].tag.i64Key); + } } } } -static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SWindowResult *pWindowRes, bool mergeFlag) { +static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SResultRow *pWindowRes, bool mergeFlag) { SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (!mergeFlag) { pCtx[i].aOutputBuf = pCtx[i].aOutputBuf + pCtx[i].outputBytes; pCtx[i].currentStage = FIRST_STAGE_MERGE; @@ -2666,7 +2705,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SWindowRes } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TAG_DUMMY) { continue; } @@ -2752,25 +2791,25 @@ void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntim for (int32_t j = 0; j < numOfRows; ++j) { for (int32_t i = 0; i < numOfCols; ++i) { - switch (pQuery->pSelectExpr[i].type) { + switch (pQuery->pExpr1[i].type) { case TSDB_DATA_TYPE_BINARY: { - int32_t type = pQuery->pSelectExpr[i].type; - printBinaryData(pQuery->pSelectExpr[i].base.functionId, pdata[i]->data + pQuery->pSelectExpr[i].bytes * j, + int32_t type = pQuery->pExpr1[i].type; + printBinaryData(pQuery->pExpr1[i].base.functionId, pdata[i]->data + pQuery->pExpr1[i].bytes * j, type); break; } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_INT: - printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%f\t", *(float *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%f\t", *(float *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; } } @@ -2805,15 +2844,15 @@ int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) } SWindowResInfo *pWindowResInfo1 = &supporter->pTableQueryInfo[left]->windowResInfo; - SWindowResult * pWindowRes1 = getWindowResult(pWindowResInfo1, leftPos); - tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes1->pos.pageId); + SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes1->pageId); char *b1 = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes1, page1); TSKEY leftTimestamp = GET_INT64_VAL(b1); SWindowResInfo *pWindowResInfo2 = &supporter->pTableQueryInfo[right]->windowResInfo; - SWindowResult * pWindowRes2 = getWindowResult(pWindowResInfo2, rightPos); - tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes2->pos.pageId); + SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes2->pageId); char *b2 = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes2, page2); TSKEY rightTimestamp = GET_INT64_VAL(b2); @@ -2850,7 +2889,7 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) { } SGroupResInfo* info = &pQInfo->groupResInfo; - if (pQInfo->groupIndex == numOfGroups && info->pos.pageId == info->numOfDataPages) { + if (pQInfo->groupIndex == numOfGroups && info->pageId == info->numOfDataPages) { SET_STABLE_QUERY_OVER(pQInfo); } @@ -2866,10 +2905,10 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; // all results have been return to client, try next group - if (pGroupResInfo->pos.pageId == pGroupResInfo->numOfDataPages) { + if (pGroupResInfo->pageId == pGroupResInfo->numOfDataPages) { pGroupResInfo->numOfDataPages = 0; - pGroupResInfo->pos.pageId = 0; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId = 0; + pGroupResInfo->rowId = 0; // current results of group has been sent to client, try next group if (mergeIntoGroupResult(pQInfo) != TSDB_CODE_SUCCESS) { @@ -2897,22 +2936,22 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { assert(size == pGroupResInfo->numOfDataPages); bool done = false; - for (int32_t j = pGroupResInfo->pos.pageId; j < size; ++j) { + for (int32_t j = pGroupResInfo->pageId; j < size; ++j) { SPageInfo* pi = *(SPageInfo**) taosArrayGet(list, j); tFilePage* pData = getResBufPage(pResultBuf, pi->pageId); - assert(pData->num > 0 && pData->num <= pRuntimeEnv->numOfRowsPerPage && pGroupResInfo->pos.rowId < pData->num); - int32_t numOfRes = (int32_t)(pData->num - pGroupResInfo->pos.rowId); + assert(pData->num > 0 && pData->num <= pRuntimeEnv->numOfRowsPerPage && pGroupResInfo->rowId < pData->num); + int32_t numOfRes = (int32_t)(pData->num - pGroupResInfo->rowId); if (numOfRes > pQuery->rec.capacity - offset) { numOfCopiedRows = (int32_t)(pQuery->rec.capacity - offset); - pGroupResInfo->pos.rowId += numOfCopiedRows; + pGroupResInfo->rowId += numOfCopiedRows; done = true; } else { numOfCopiedRows = (int32_t)pData->num; - pGroupResInfo->pos.pageId += 1; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId += 1; + pGroupResInfo->rowId = 0; } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { @@ -2933,9 +2972,11 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { pQuery->rec.rows += offset; } -int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) { +int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) { + SQuery* pQuery = pRuntimeEnv->pQuery; + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -2945,7 +2986,7 @@ int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) { continue; } - SResultInfo *pResultInfo = &pWindowRes->resultInfo[j]; + SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j); assert(pResultInfo != NULL); if (pResultInfo->numOfRes > 0) { @@ -2967,8 +3008,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { STableQueryInfo **pTableList = malloc(POINTER_BYTES * size); if (pTableList == NULL || posList == NULL) { - taosTFree(posList); - taosTFree(pTableList); + tfree(posList); + tfree(pTableList); qError("QInfo:%p failed alloc memory", pQInfo); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -2992,19 +3033,19 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { // there is no data in current group if (numOfTables == 0) { - taosTFree(posList); - taosTFree(pTableList); + tfree(posList); + tfree(pTableList); return 0; } else if (numOfTables == 1) { // no need to merge results since only one table in each group - taosTFree(posList); - taosTFree(pTableList); + tfree(posList); + tfree(pTableList); SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; pGroupResInfo->numOfDataPages = (int32_t)taosArrayGetSize(pageList); pGroupResInfo->groupId = tid; - pGroupResInfo->pos.pageId = 0; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId = 0; + pGroupResInfo->rowId = 0; return pGroupResInfo->numOfDataPages; } @@ -3014,18 +3055,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { SLoserTreeInfo *pTree = NULL; tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); - SResultInfo *pResultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo)); - if (pResultInfo == NULL) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - char* buf = calloc(1, pRuntimeEnv->interBufSize); - if (buf == NULL) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery, buf); - resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); + SResultRow* pRow = getNewResultRow(pRuntimeEnv->pool); + resetMergeResultBuf(pRuntimeEnv, pRuntimeEnv->pCtx, pRow); pQInfo->groupResInfo.groupId = getGroupResultId(pQInfo->groupIndex); @@ -3037,26 +3068,23 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); - taosTFree(pTableList); - taosTFree(posList); - taosTFree(pTree); - taosTFree(pResultInfo); - taosTFree(buf); - + tfree(pTableList); + tfree(posList); + tfree(pTree); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } int32_t pos = pTree->pNode[0].index; SWindowResInfo *pWindowResInfo = &pTableList[pos]->windowResInfo; - SWindowResult *pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.position[pos]); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page); TSKEY ts = GET_INT64_VAL(b); assert(ts == pWindowRes->win.skey); - int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes); + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); if (num <= 0) { cs.position[pos] += 1; @@ -3077,7 +3105,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { return -1; } - resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); + resetMergeResultBuf(pRuntimeEnv, pRuntimeEnv->pCtx, pRow); } doMerge(pRuntimeEnv, ts, pWindowRes, false); @@ -3087,7 +3115,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { lastTimestamp = ts; // move to the next element of current entry - int32_t currentPageId = pWindowRes->pos.pageId; + int32_t currentPageId = pWindowRes->pageId; cs.position[pos] += 1; if (cs.position[pos] >= pWindowResInfo->size) { @@ -3099,8 +3127,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { } } else { // current page is not needed anymore - SWindowResult *pNextWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); - if (pNextWindowRes->pos.pageId != currentPageId) { + SResultRow *pNextWindowRes = getResultRow(pWindowResInfo, cs.position[pos]); + if (pNextWindowRes->pageId != currentPageId) { releaseResBufPage(pRuntimeEnv->pResultBuf, page); } } @@ -3113,11 +3141,9 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { if (flushFromResultBuf(pRuntimeEnv, &pQInfo->groupResInfo) != TSDB_CODE_SUCCESS) { qError("QInfo:%p failed to flush data into temp file, abort query", pQInfo); - taosTFree(pTree); - taosTFree(pTableList); - taosTFree(posList); - taosTFree(pResultInfo); - + tfree(pTree); + tfree(pTableList); + tfree(posList); return -1; } } @@ -3130,12 +3156,12 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt); - taosTFree(pTableList); - taosTFree(posList); - taosTFree(pTree); + tfree(pTableList); + tfree(posList); + tfree(pTree); - taosTFree(pResultInfo); - taosTFree(buf); +// tfree(pResultInfo); +// tfree(buf); return pQInfo->groupResInfo.numOfDataPages; } @@ -3178,12 +3204,14 @@ int32_t flushFromResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupR return TSDB_CODE_SUCCESS; } -void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo) { +void resetMergeResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx *pCtx, SResultRow *pRow) { + SQuery* pQuery = pRuntimeEnv->pQuery; + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].aOutputBuf = pQuery->sdata[k]->data - pCtx[k].outputBytes; pCtx[k].size = 1; pCtx[k].startOffset = 0; - pCtx[k].resultInfo = &pResultInfo[k]; + pCtx[k].resultInfo = getResultCell(pRuntimeEnv, pRow, k); pQuery->sdata[k]->num = 0; } @@ -3220,8 +3248,8 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * pTableQueryInfo->windowResInfo.curIndex = pTableQueryInfo->windowResInfo.size - 1; } -static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindowResInfo, int32_t order) { - SQuery* pQuery = pQInfo->runtimeEnv.pQuery; +static void disableFuncInReverseScanImpl(SQueryRuntimeEnv* pRuntimeEnv, SWindowResInfo *pWindowResInfo, int32_t order) { + SQuery* pQuery = pRuntimeEnv->pQuery; for (int32_t i = 0; i < pWindowResInfo->size; ++i) { bool closed = getTimeWindowResStatus(pWindowResInfo, i); @@ -3229,17 +3257,18 @@ static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindow continue; } - SWindowResult *buf = getWindowResult(pWindowResInfo, i); + SResultRow *pRow = getResultRow(pWindowResInfo, i); // open/close the specified query for each group result for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functId = pQuery->pSelectExpr[j].base.functionId; + int32_t functId = pQuery->pExpr1[j].base.functionId; + SResultRowCellInfo* pInfo = getResultCell(pRuntimeEnv, pRow, j); if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSDB_ORDER_ASC) || ((functId == TSDB_FUNC_LAST || functId == TSDB_FUNC_LAST_DST) && order == TSDB_ORDER_DESC)) { - buf->resultInfo[j].complete = false; + pInfo->complete = false; } else if (functId != TSDB_FUNC_TS && functId != TSDB_FUNC_TAG) { - buf->resultInfo[j].complete = true; + pInfo->complete = true; } } } @@ -3253,10 +3282,10 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { // group by normal columns and interval query on normal table SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { - disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order); + disableFuncInReverseScanImpl(pRuntimeEnv, pWindowResInfo, order); } else { // for simple result of table query, for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor - int32_t functId = pQuery->pSelectExpr[j].base.functionId; + int32_t functId = pQuery->pExpr1[j].base.functionId; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j]; if (pCtx->resultInfo == NULL) { @@ -3303,27 +3332,20 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) { } } -int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, size_t interBufSize) { - int32_t numOfCols = pQuery->numOfOutput; - - size_t size = numOfCols * sizeof(SResultInfo) + interBufSize; - pResultRow->resultInfo = calloc(1, size); - if (pResultRow->resultInfo == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - pResultRow->pos = (SPosInfo) {-1, -1}; - - char* buf = (char*) pResultRow->resultInfo + numOfCols * sizeof(SResultInfo); - - // set the intermediate result output buffer - setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf); +int32_t initResultRow(SResultRow *pResultRow) { + pResultRow->pCellInfo = (SResultRowCellInfo*)((char*)pResultRow + sizeof(SResultRow)); + pResultRow->pageId = -1; + pResultRow->rowId = -1; return TSDB_CODE_SUCCESS; } void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; + int32_t groupIndex = 0; + int32_t uid = 0; + SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&groupIndex, sizeof(groupIndex), true, uid); + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; pCtx->aOutputBuf = pQuery->sdata[i]->data; @@ -3332,16 +3354,17 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { * set the output buffer information and intermediate buffer * not all queries require the interResultBuf, such as COUNT/TAGPRJ/PRJ/TAG etc. */ - RESET_RESULT_INFO(&pRuntimeEnv->resultInfo[i]); - pCtx->resultInfo = &pRuntimeEnv->resultInfo[i]; + SResultRowCellInfo* pCellInfo = getResultCell(pRuntimeEnv, pRow, i); + RESET_RESULT_INFO(pCellInfo); + pCtx->resultInfo = pCellInfo; // set the timestamp output buffer for top/bottom/diff query - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } - memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pSelectExpr[i].bytes * pQuery->rec.capacity)); + memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pExpr1[i].bytes * pQuery->rec.capacity)); } initCtxOutputBuf(pRuntimeEnv); @@ -3352,7 +3375,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { // reset the execution contexts for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; assert(functionId != TSDB_FUNC_DIFF); // set next output position @@ -3379,10 +3402,10 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; pRuntimeEnv->pCtx[j].currentStage = 0; - SResultInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); + SResultRowCellInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); if (pResInfo->initialized) { continue; } @@ -3417,7 +3440,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) { 0, pQuery->rec.rows); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; memmove(pQuery->sdata[i]->data, (char*)pQuery->sdata[i]->data + bytes * numOfSkip, (size_t)(pQuery->rec.rows * bytes)); @@ -3451,34 +3474,34 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; for (int32_t i = 0; i < pWindowResInfo->size; ++i) { - SWindowResult *pResult = getWindowResult(pWindowResInfo, i); + SResultRow *pResult = getResultRow(pWindowResInfo, i); if (!pResult->closed) { continue; } - setWindowResOutputBuf(pRuntimeEnv, pResult); + setResultOutputBuf(pRuntimeEnv, pResult); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int16_t functId = pQuery->pSelectExpr[j].base.functionId; + int16_t functId = pQuery->pExpr1[j].base.functionId; if (functId == TSDB_FUNC_TS) { continue; } aAggs[functId].xNextStep(&pRuntimeEnv->pCtx[j]); - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); + SResultRowCellInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); toContinue |= (!pResInfo->complete); } } } else { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int16_t functId = pQuery->pSelectExpr[j].base.functionId; + int16_t functId = pQuery->pExpr1[j].base.functionId; if (functId == TSDB_FUNC_TS) { continue; } aAggs[functId].xNextStep(&pRuntimeEnv->pCtx[j]); - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); + SResultRowCellInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); toContinue |= (!pResInfo->complete); } @@ -3551,7 +3574,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } - pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } @@ -3635,7 +3658,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { } restoreTimeWindow(&pQInfo->tableGroupInfo, &cond); - pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } @@ -3649,7 +3672,6 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { // check if query is killed or not if (IS_QUERY_KILLED(pQInfo)) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } } @@ -3678,15 +3700,15 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { } for (int32_t i = 0; i < pWindowResInfo->size; ++i) { - SWindowResult *buf = &pWindowResInfo->pResult[i]; + SResultRow *buf = pWindowResInfo->pResult[i]; if (!isWindowResClosed(pWindowResInfo, i)) { continue; } - setWindowResOutputBuf(pRuntimeEnv, buf); + setResultOutputBuf(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - aAggs[pQuery->pSelectExpr[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); + aAggs[pQuery->pExpr1[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); } /* @@ -3698,14 +3720,14 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { } else { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - aAggs[pQuery->pSelectExpr[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); + aAggs[pQuery->pExpr1[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); } } } static bool hasMainOutput(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TAGPRJ) { return true; @@ -3730,7 +3752,7 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { int32_t initialSize = 16; int32_t initialThreshold = 100; - int32_t code = initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); + int32_t code = initWindowResInfo(&pTableQueryInfo->windowResInfo, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -3745,6 +3767,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { return; } + tVariantDestroy(&pTableQueryInfo->tag); cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo); } @@ -3769,9 +3792,10 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { return; } - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, - sizeof(groupIndex), true); - if (pWindowRes == NULL) { + uint64_t uid = 0; // uid is always set to be 0 + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, + sizeof(groupIndex), true, uid); + if (pResultRow == NULL) { return; } @@ -3779,8 +3803,8 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { * not assign result buffer yet, add new result buffer * all group belong to one result set, and each group result has different group id so set the id to be one */ - if (pWindowRes->pos.pageId == -1) { - if (addNewWindowResultBuf(pWindowRes, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->numOfRowsPerPage) != + if (pResultRow->pageId == -1) { + if (addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->numOfRowsPerPage) != TSDB_CODE_SUCCESS) { return; } @@ -3788,47 +3812,43 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { // record the current active group id pRuntimeEnv->prevGroupId = groupIndex; - setWindowResOutputBuf(pRuntimeEnv, pWindowRes); + setResultOutputBuf(pRuntimeEnv, pResultRow); initCtxOutputBuf(pRuntimeEnv); } -void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) { +void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult) { SQuery *pQuery = pRuntimeEnv->pQuery; // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult, page); - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } /* - * set the output buffer information and intermediate buffer + * set the output buffer information and intermediate buffer, * not all queries require the interResultBuf, such as COUNT */ - pCtx->resultInfo = &pResult->resultInfo[i]; - - // set super table query flag - SResultInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->superTableQ = pRuntimeEnv->stableQuery; + pCtx->resultInfo = getResultCell(pRuntimeEnv, pResult, i); } } -void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) { +void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult) { SQuery *pQuery = pRuntimeEnv->pQuery; // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group - tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId); + tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - pCtx->resultInfo = &pResult->resultInfo[i]; + pCtx->resultInfo = getResultCell(pRuntimeEnv, pResult, i); if (pCtx->resultInfo->initialized && pCtx->resultInfo->complete) { continue; } @@ -3841,12 +3861,6 @@ void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult * pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } - /* - * set the output buffer information and intermediate buffer - * not all queries require the interResultBuf, such as COUNT - */ - pCtx->resultInfo->superTableQ = pRuntimeEnv->stableQuery; // set super table query flag - if (!pCtx->resultInfo->initialized) { aAggs[functionId].init(pCtx); } @@ -3860,14 +3874,40 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQ // both the master and supplement scan needs to set the correct ts comp start position if (pRuntimeEnv->pTSBuf != NULL) { + tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; + if (pTableQueryInfo->cur.vgroupIndex == -1) { - tVariantAssign(&pTableQueryInfo->tag, &pRuntimeEnv->pCtx[0].tag); - tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, 0, &pTableQueryInfo->tag); + tVariantAssign(&pTableQueryInfo->tag, pTag); + + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, &pTableQueryInfo->tag); + + // failed to find data with the specified tag value and vnodeId + if (!tsBufIsValidElem(&elem)) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pQInfo, pTag->i64Key); + } + + return false; + } // keep the cursor info of current meter - pTableQueryInfo->cur = pRuntimeEnv->pTSBuf->cur; + pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } + } else { tsBufSetCursor(pRuntimeEnv->pTSBuf, &pTableQueryInfo->cur); + + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } } } @@ -3929,7 +3969,7 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { bool requireTimestamp(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; i++) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_NEED_TS) != 0) { return true; } @@ -3961,7 +4001,7 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_ qDebug("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo); int32_t totalSet = numOfClosedTimeWindow(pResultInfo); - SWindowResult* result = pResultInfo->pResult; + SResultRow** result = pResultInfo->pResult; if (orderType == TSDB_ORDER_ASC) { startIdx = pQInfo->groupIndex; @@ -3974,14 +4014,14 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_ SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; for (int32_t i = startIdx; (i < totalSet) && (i >= 0); i += step) { - if (result[i].numOfRows == 0) { + if (result[i]->numOfRows == 0) { pQInfo->groupIndex += 1; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->rowId = 0; continue; } - int32_t numOfRowsToCopy = result[i].numOfRows - pGroupResInfo->pos.rowId; - int32_t oldOffset = pGroupResInfo->pos.rowId; + int32_t numOfRowsToCopy = result[i]->numOfRows - pGroupResInfo->rowId; + int32_t oldOffset = pGroupResInfo->rowId; /* * current output space is not enough to accommodate all data of this page, only partial results @@ -3989,19 +4029,19 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_ */ if (numOfRowsToCopy > pQuery->rec.capacity - numOfResult) { numOfRowsToCopy = (int32_t) pQuery->rec.capacity - numOfResult; - pGroupResInfo->pos.rowId += numOfRowsToCopy; + pGroupResInfo->rowId += numOfRowsToCopy; } else { - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->rowId = 0; pQInfo->groupIndex += 1; } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, result[i].pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, result[i]->pageId); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t size = pRuntimeEnv->pCtx[j].outputBytes; char *out = pQuery->sdata[j]->data + numOfResult * size; - char *in = getPosInResultPage(pRuntimeEnv, j, &result[i], page); + char *in = getPosInResultPage(pRuntimeEnv, j, result[i], page); memcpy(out, in + oldOffset * size, size * numOfRowsToCopy); } @@ -4048,7 +4088,7 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { } for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { - SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; + SResultRow *pResult = pRuntimeEnv->windowResInfo.pResult[i]; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t functionId = pRuntimeEnv->pCtx[j].functionId; @@ -4056,7 +4096,8 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { continue; } - pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes)); + SResultRowCellInfo* pCell = getResultCell(pRuntimeEnv, pResult, j); + pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pCell->numOfRes)); } } } @@ -4101,7 +4142,7 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { * first result row in the actual result set will fill nothing. */ if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t numOfTotal = (int32_t)getFilledNumOfRes(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); + int32_t numOfTotal = (int32_t)getNumOfResWithFill(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); return numOfTotal > 0; } @@ -4117,14 +4158,27 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { return false; } +static int16_t getNumOfFinalResCol(SQuery* pQuery) { + return pQuery->pExpr2 == NULL? pQuery->numOfOutput:pQuery->numOfExpr2; +} + static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { - int32_t bytes = pQuery->pSelectExpr[col].bytes; + if (pQuery->pExpr2 == NULL) { + for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { + int32_t bytes = pQuery->pExpr1[col].bytes; - memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); - data += bytes * numOfRows; + memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); + data += bytes * numOfRows; + } + } else { + for (int32_t col = 0; col < pQuery->numOfExpr2; ++col) { + int32_t bytes = pQuery->pExpr2[col].bytes; + + memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); + data += bytes * numOfRows; + } } int32_t numOfTables = (int32_t)taosArrayGetSize(pQInfo->arrTableIdInfo); @@ -4159,7 +4213,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo; while (1) { - int32_t ret = (int32_t)taosGenerateDataBlock(pFillInfo, (tFilePage**)pQuery->sdata, (int32_t)pQuery->rec.capacity); + int32_t ret = (int32_t)taosFillResultDataBlock(pFillInfo, (tFilePage**)pQuery->sdata, (int32_t)pQuery->rec.capacity); // todo apply limit output function /* reached the start position of according to offset value, return immediately */ @@ -4174,10 +4228,9 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int ret -= (int32_t)pQuery->limit.offset; // todo !!!!there exactly number of interpo is not valid. - // todo refactor move to the beginning of buffer for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].bytes * pQuery->limit.offset, - ret * pQuery->pSelectExpr[i].bytes); + memmove(pDst[i]->data, pDst[i]->data + pQuery->pExpr1[i].bytes * pQuery->limit.offset, + ret * pQuery->pExpr1[i].bytes); } pQuery->limit.offset = 0; @@ -4202,16 +4255,24 @@ static void queryCostStatis(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryCostInfo *pSummary = &pRuntimeEnv->summary; + uint64_t hashSize = taosHashGetMemSize(pQInfo->runtimeEnv.pResultRowHashTable); + hashSize += taosHashGetMemSize(pQInfo->tableqinfoGroupInfo.map); + pSummary->hashSize = hashSize; + // add the merge time pSummary->elapsedTime += pSummary->firstStageMergeTime; + SResultRowPool* p = pQInfo->runtimeEnv.pool; + pSummary->winInfoSize = getResultRowPoolMemSize(p); + pSummary->numOfTimeWindows = getNumOfAllocatedResultRows(p); + qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " "load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); - qDebug("QInfo:%p :cost summary: internal size:%"PRId64"B, numOfWin:%"PRId64, pQInfo, pSummary->internalSupSize, - pSummary->numOfTimeWindows); + qDebug("QInfo:%p :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo, pSummary->winInfoSize/1024.0, + pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); } static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { @@ -4266,7 +4327,6 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -4290,6 +4350,56 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } +static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* win, SDataBlockInfo* pBlockInfo, STableQueryInfo* pTableQueryInfo) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + + assert(pQuery->limit.offset == 0); + STimeWindow tw = *win; + getNextTimeWindow(pQuery, &tw); + + if ((tw.skey <= pBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (tw.ekey >= pBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + + // load the data block and check data remaining in current data block + // TODO optimize performance + SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); + SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); + + tw = *win; + int32_t startPos = + getNextQualifiedWindow(pRuntimeEnv, &tw, pBlockInfo, pColInfoData->pData, binarySearchForKey, -1); + assert(startPos >= 0); + + // set the abort info + pQuery->pos = startPos; + + // reset the query start timestamp + pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; + pQuery->window.skey = pTableQueryInfo->win.skey; + TSKEY key = pTableQueryInfo->win.skey; + + pWindowResInfo->prevSKey = tw.skey; + int32_t index = pRuntimeEnv->windowResInfo.curIndex; + + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); + pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index + + qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, + GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, + pQuery->current->lastKey); + + return key; + } else { // do nothing + pQuery->window.skey = tw.skey; + pWindowResInfo->prevSKey = tw.skey; + + return tw.skey; + } + + return true; +} + static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { SQuery *pQuery = pRuntimeEnv->pQuery; *start = pQuery->current->lastKey; @@ -4332,52 +4442,19 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { STimeWindow win = getActiveTimeWindow(pWindowResInfo, pWindowResInfo->prevSKey, pQuery); while (pQuery->limit.offset > 0) { + STimeWindow tw = win; + if ((win.ekey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || (win.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { pQuery->limit.offset -= 1; pWindowResInfo->prevSKey = win.skey; } - STimeWindow tw = win; - GET_NEXT_TIMEWINDOW(pQuery, &tw); - + // current window does not ended in current data block, try next data block + getNextTimeWindow(pQuery, &tw); if (pQuery->limit.offset == 0) { - if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (tw.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { - // load the data block and check data remaining in current data block - // TODO optimize performance - SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); - SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); - - tw = win; - int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); - assert(startPos >= 0); - - // set the abort info - pQuery->pos = startPos; - - // reset the query start timestamp - pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; - pQuery->window.skey = pTableQueryInfo->win.skey; - *start = pTableQueryInfo->win.skey; - - pWindowResInfo->prevSKey = tw.skey; - int32_t index = pRuntimeEnv->windowResInfo.curIndex; - - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock); - pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index - - qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, - GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey); - - return true; - } else { // do nothing - *start = tw.skey; - pQuery->window.skey = tw.skey; - pWindowResInfo->prevSKey = tw.skey; - return true; - } + *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); + return true; } /* @@ -4392,16 +4469,26 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); - tw = win; - int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); - assert(startPos >= 0); + if ((win.ekey > blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (win.ekey < blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + pQuery->limit.offset -= 1; + } + + if (pQuery->limit.offset == 0) { + *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); + return true; + } else { + tw = win; + int32_t startPos = + getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); + assert(startPos >= 0); - // set the abort info - pQuery->pos = startPos; - pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; - pWindowResInfo->prevSKey = tw.skey; - win = tw; + // set the abort info + pQuery->pos = startPos; + pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; + pWindowResInfo->prevSKey = tw.skey; + win = tw; + } } else { break; // offset is not 0, and next time window begins or ends in the next block. } @@ -4452,7 +4539,7 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) terrno = TSDB_CODE_SUCCESS; if (isFirstLastRowQuery(pQuery)) { - pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); // update the query time window pQuery->window = cond.twindow; @@ -4474,16 +4561,16 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) } } } else if (isPointInterpoQuery(pQuery)) { - pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); } else { - pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); } return terrno; } -static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { - int32_t numOfCols = pQuery->numOfOutput; +static SFillColInfo* createFillColInfo(SQuery* pQuery) { + int32_t numOfCols = getNumOfFinalResCol(pQuery); int32_t offset = 0; SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); @@ -4491,12 +4578,14 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { return NULL; } + // TODO refactor for(int32_t i = 0; i < numOfCols; ++i) { - SExprInfo* pExprInfo = &pQuery->pSelectExpr[i]; + SExprInfo* pExprInfo = (pQuery->pExpr2 == NULL)? &pQuery->pExpr1[i]:&pQuery->pExpr2[i]; pFillCol[i].col.bytes = pExprInfo->bytes; pFillCol[i].col.type = (int8_t)pExprInfo->type; pFillCol[i].col.offset = offset; + pFillCol[i].tagIndex = -2; pFillCol[i].flag = TSDB_COL_NORMAL; // always be ta normal column for table query pFillCol[i].functionId = pExprInfo->base.functionId; pFillCol[i].fillVal.i = pQuery->fillVal[i]; @@ -4510,7 +4599,6 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - int32_t code = TSDB_CODE_SUCCESS; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); @@ -4518,7 +4606,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo setScanLimitationByResultBuffer(pQuery); - code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); + int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4538,12 +4626,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); } - // create runtime environment - code = setupQueryRuntimeEnv(pRuntimeEnv, pQuery->order.order); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - int32_t ps = DEFAULT_PAGE_SIZE; int32_t rowsize = 0; getIntermediateBufInfo(pRuntimeEnv, &ps, &rowsize); @@ -4570,12 +4652,12 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo } } - code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 8, threshold, type); + code = initWindowResInfo(&pRuntimeEnv->windowResInfo, 8, threshold, type); if (code != TSDB_CODE_SUCCESS) { return code; } } - } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { + } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery) || (!isSTableQuery)) { int32_t numOfResultRows = getInitialPageNum(pQInfo); getIntermediateBufInfo(pRuntimeEnv, &ps, &rowsize); code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rowsize, ps, TWOMB, pQInfo); @@ -4590,23 +4672,30 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_TIMESTAMP; } - code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, numOfResultRows, 4096, type); + code = initWindowResInfo(&pRuntimeEnv->windowResInfo, numOfResultRows, 1024, type); if (code != TSDB_CODE_SUCCESS) { return code; } } + // create runtime environment + code = setupQueryRuntimeEnv(pRuntimeEnv, pQuery->order.order); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { - SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery); + SFillColInfo* pColInfo = createFillColInfo(pQuery); STimeWindow w = TSWINDOW_INITIALIZER; TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); - pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput, + int32_t numOfCols = getNumOfFinalResCol(pQuery); + pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, numOfCols, pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision, - pQuery->fillType, pColInfo); + pQuery->fillType, pColInfo, pQInfo); } setQueryStatus(pQuery, QUERY_NOT_COMPLETED); @@ -4617,7 +4706,7 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); + SResultRowCellInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]); if (pResInfo != NULL) { pResInfo->complete = false; } @@ -4755,7 +4844,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { pRuntimeEnv->pQueryHandle = NULL; } - pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo, &pQInfo->memRef); taosArrayDestroy(tx); taosArrayDestroy(g1); if (pRuntimeEnv->pQueryHandle == NULL) { @@ -4763,15 +4852,62 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { } if (pRuntimeEnv->pTSBuf != NULL) { + tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; + if (pRuntimeEnv->cur.vgroupIndex == -1) { - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, 0, &pRuntimeEnv->pCtx[0].tag); + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + // failed to find data with the specified tag value and vnodeId + if (!tsBufIsValidElem(&elem)) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%"PRId64" in ts_comp", pQInfo, pTag->i64Key); + } - // failed to find data with the specified tag value - if (elem.vnode < 0) { return false; + } else { + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, + cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, + cur.blockIndex, cur.tsIndex); + } } } else { - tsBufSetCursor(pRuntimeEnv->pTSBuf, &pRuntimeEnv->cur); + STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); + if (tVariantCompare(elem.tag, &pRuntimeEnv->pCtx[0].tag) != 0) { + + STSElem elem1 = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + // failed to find data with the specified tag value and vnodeId + if (!tsBufIsValidElem(&elem1)) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%"PRId64" in ts_comp", pQInfo, pTag->i64Key); + } + + return false; + } else { + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, cur.blockIndex, cur.tsIndex); + } + } + + } else { + tsBufSetCursor(pRuntimeEnv->pTSBuf, &pRuntimeEnv->cur); + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p continue scan ts_comp file, tag:%s blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p continue scan ts_comp file, tag:%"PRId64" blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, cur.blockIndex, cur.tsIndex); + } + } } } @@ -4793,7 +4929,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo); - if (isPointInterpoQuery(pQuery) || isFirstLastRowQuery(pQuery)) { + if (isPointInterpoQuery(pQuery)) { resetCtxOutputBuf(pRuntimeEnv); assert(pQuery->limit.offset == 0 && pQuery->limit.limit != 0); @@ -4823,11 +4959,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pRuntimeEnv->pQueryHandle = NULL; } - if (isFirstLastRowQuery(pQuery)) { - assert(0); // last_row query switch to other routine to handle - } else { - pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp, pQInfo); - } + pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp, pQInfo, &pQInfo->memRef); taosArrayDestroy(tx); taosArrayDestroy(g1); @@ -4841,10 +4973,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) { assert(taosArrayGetSize(s) >= 1); setTagVal(pRuntimeEnv, taosArrayGetP(s, 0), pQInfo->tsdb); - if (isFirstLastRowQuery(pQuery)) { - assert(taosArrayGetSize(s) == 1); - } - taosArrayDestroy(s); // here we simply set the first table as current table @@ -4897,7 +5025,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } // no need to update the lastkey for each table - pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo, &pQInfo->memRef); taosArrayDestroy(g1); taosArrayDestroy(tx); @@ -4923,11 +5051,12 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } for (int32_t i = 0; i < pWindowResInfo->size; ++i) { - pWindowResInfo->pResult[i].closed = true; // enable return all results for group by normal columns + pWindowResInfo->pResult[i]->closed = true; // enable return all results for group by normal columns - SWindowResult *pResult = &pWindowResInfo->pResult[i]; + SResultRow *pResult = pWindowResInfo->pResult[i]; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes)); + SResultRowCellInfo* pCell = getResultCell(pRuntimeEnv, pResult, j); + pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pCell->numOfRes)); } } @@ -5027,6 +5156,10 @@ static void sequentialTableProcess(SQInfo *pQInfo) { break; } + if (pRuntimeEnv->pTSBuf != NULL) { + pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; + } + } else { // all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter if (pQuery->rec.rows == 0) { @@ -5079,7 +5212,7 @@ static void doSaveContext(SQInfo *pQInfo) { SWITCH_ORDER(pQuery->order.order); if (pRuntimeEnv->pTSBuf != NULL) { - pRuntimeEnv->pTSBuf->cur.order = pQuery->order.order; + SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order); } STsdbQueryCond cond = { @@ -5101,7 +5234,7 @@ static void doSaveContext(SQInfo *pQInfo) { setupQueryRangeForReverseScan(pQInfo); pRuntimeEnv->prevGroupId = INT32_MIN; - pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); + pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo, &pQInfo->memRef); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } @@ -5172,7 +5305,6 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { // query error occurred or query is killed, abort current execution if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5194,7 +5326,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + //TODO finalizeQueryResult may cause SEGSEV, since the memory may not allocated yet, add a cleanup function instead longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5214,6 +5346,77 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qDebug("QInfo:%p points returned:%" PRId64 ", total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); } + +static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) { + SArithmeticSupport *pSupport = (SArithmeticSupport *) param; + SExprInfo* pExprInfo = (SExprInfo*) pSupport->exprList; + + int32_t index = -1; + for (int32_t i = 0; i < pSupport->numOfCols; ++i) { + if (colId == pExprInfo[i].base.resColId) { + index = i; + break; + } + } + + assert(index >= 0 && index < pSupport->numOfCols); + return pSupport->data[index] + pSupport->offset * pExprInfo[index].bytes; +} + +static void doSecondaryArithmeticProcess(SQuery* pQuery) { + if (pQuery->numOfExpr2 == 0) { + return; + } + + SArithmeticSupport arithSup = {0}; + tFilePage **data = calloc(pQuery->numOfExpr2, POINTER_BYTES); + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + int32_t bytes = pQuery->pExpr2[i].bytes; + data[i] = (tFilePage *)malloc(bytes * pQuery->rec.rows + sizeof(tFilePage)); + } + + arithSup.offset = 0; + arithSup.numOfCols = (int32_t)pQuery->numOfOutput; + arithSup.exprList = pQuery->pExpr1; + arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); + + for (int32_t k = 0; k < arithSup.numOfCols; ++k) { + arithSup.data[k] = pQuery->sdata[k]->data; + } + + for (int i = 0; i < pQuery->numOfExpr2; ++i) { + SExprInfo *pExpr = &pQuery->pExpr2[i]; + + // calculate the result from several other columns + SSqlFuncMsg* pSqlFunc = &pExpr->base; + if (pSqlFunc->functionId != TSDB_FUNC_ARITHM) { + + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { + if (pSqlFunc->functionId == pQuery->pExpr1[j].base.functionId && + pSqlFunc->colInfo.colId == pQuery->pExpr1[j].base.colInfo.colId) { + memcpy(data[i]->data, pQuery->sdata[j]->data, pQuery->pExpr1[j].bytes * pQuery->rec.rows); + break; + } + } + } else { + arithSup.pArithExpr = pExpr; + tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t)pQuery->rec.rows, data[i]->data, &arithSup, TSDB_ORDER_ASC, + getArithemicInputSrc); + } + } + + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + memcpy(pQuery->sdata[i]->data, data[i]->data, pQuery->pExpr2[i].bytes * pQuery->rec.rows); + } + + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + tfree(data[i]); + } + + tfree(data); + tfree(arithSup.data); +} + /* * in each query, this function will be called only once, no retry for further result. * @@ -5233,14 +5436,14 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey); finalizeQueryResult(pRuntimeEnv); + // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. + pQuery->rec.rows = getNumOfResult(pRuntimeEnv); + doSecondaryArithmeticProcess(pQuery); + if (IS_QUERY_KILLED(pQInfo)) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } - // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. - pQuery->rec.rows = getNumOfResult(pRuntimeEnv); - skipResults(pRuntimeEnv); limitResults(pRuntimeEnv); } @@ -5322,7 +5525,7 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) int32_t numOfClosed = numOfClosedTimeWindow(&pRuntimeEnv->windowResInfo); int32_t c = (int32_t)(MIN(numOfClosed, pQuery->limit.offset)); - clearFirstNTimeWindow(pRuntimeEnv, c); + clearFirstNWindowRes(pRuntimeEnv, c); pQuery->limit.offset -= c; } @@ -5343,10 +5546,12 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { TSKEY newStartKey = TSKEY_INITIAL_VAL; // skip blocks without load the actual data block from file if no filter condition present - skipTimeInterval(pRuntimeEnv, &newStartKey); - if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols == 0 && pRuntimeEnv->pFillInfo == NULL) { - setQueryStatus(pQuery, QUERY_COMPLETED); - return; + if (!pRuntimeEnv->groupbyNormalCol) { + skipTimeInterval(pRuntimeEnv, &newStartKey); + if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols == 0 && pRuntimeEnv->pFillInfo == NULL) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; + } } while (1) { @@ -5357,16 +5562,23 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { pQuery->rec.rows = 0; copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); + clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); + } + + // no result generated, abort + if (pQuery->rec.rows == 0 || pRuntimeEnv->groupbyNormalCol) { + break; } + doSecondaryArithmeticProcess(pQuery); + // the offset is handled at prepare stage if no interpolation involved - if (pQuery->fillType == TSDB_FILL_NONE || pQuery->rec.rows == 0) { + if (pQuery->fillType == TSDB_FILL_NONE) { limitResults(pRuntimeEnv); break; } else { taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, pQuery->window.ekey); - taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata); + taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage**) pQuery->sdata); numOfFilled = 0; pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); @@ -5382,10 +5594,21 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { // all data scanned, the group by normal column can return if (pRuntimeEnv->groupbyNormalCol) { // todo refactor with merge interval time result - pQInfo->groupIndex = 0; - pQuery->rec.rows = 0; - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); + // maxOutput <= 0, means current query does not generate any results + int32_t numOfClosed = numOfClosedTimeWindow(&pRuntimeEnv->windowResInfo); + + if ((pQuery->limit.offset > 0 && pQuery->limit.offset < numOfClosed) || pQuery->limit.offset == 0) { + // skip offset result rows + clearFirstNWindowRes(pRuntimeEnv, (int32_t) pQuery->limit.offset); + + pQuery->rec.rows = 0; + pQInfo->groupIndex = 0; + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); + + doSecondaryArithmeticProcess(pQuery); + limitResults(pRuntimeEnv); + } } } @@ -5415,7 +5638,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { if (pRuntimeEnv->windowResInfo.size > 0) { copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); + clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); if (pQuery->rec.rows > 0) { qDebug("QInfo:%p %"PRId64" rows returned from group results, total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total); @@ -5584,7 +5807,7 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p * @param pExpr * @return */ -static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr, +static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr, SSqlFuncMsg ***pSecStageExpr, char **tagCond, char** tbnameCond, SColIndex **groupbyCols, SColumnInfo** tagCols) { int32_t code = TSDB_CODE_SUCCESS; @@ -5615,6 +5838,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks); pQueryMsg->tsOrder = htonl(pQueryMsg->tsOrder); pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags); + pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput); // query msg safety check if (!validateQueryMsg(pQueryMsg)) { @@ -5684,9 +5908,10 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); - pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); - pExprMsg->functionId = htons(pExprMsg->functionId); - pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); + pExprMsg->functionId = htons(pExprMsg->functionId); + pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + pExprMsg->resColId = htons(pExprMsg->resColId); pMsg += sizeof(SSqlFuncMsg); @@ -5722,6 +5947,49 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, goto _cleanup; } + if (pQueryMsg->secondStageOutput) { + pExprMsg = (SSqlFuncMsg *)pMsg; + *pSecStageExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); + + for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) { + (*pSecStageExpr)[i] = pExprMsg; + + pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); + pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); + pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); + pExprMsg->functionId = htons(pExprMsg->functionId); + pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { + pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); + pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); + + if (pExprMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { + pExprMsg->arg[j].argValue.pz = pMsg; + pMsg += pExprMsg->arg[j].argBytes; // one more for the string terminated char. + } else { + pExprMsg->arg[j].argValue.i64 = htobe64(pExprMsg->arg[j].argValue.i64); + } + } + + int16_t functionId = pExprMsg->functionId; + if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) { + if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression. + code = TSDB_CODE_QRY_INVALID_MSG; + goto _cleanup; + } + } else { +// if (!validateExprColumnInfo(pQueryMsg, pExprMsg)) { +// return TSDB_CODE_QRY_INVALID_MSG; +// } + } + + pExprMsg = (SSqlFuncMsg *)pMsg; + } + } + pMsg = createTableIdList(pQueryMsg, pMsg, pTableIdList); if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns @@ -5816,19 +6084,19 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, return TSDB_CODE_SUCCESS; _cleanup: - taosTFree(*pExpr); + tfree(*pExpr); taosArrayDestroy(*pTableIdList); *pTableIdList = NULL; - taosTFree(*tbnameCond); - taosTFree(*groupbyCols); - taosTFree(*tagCols); - taosTFree(*tagCond); + tfree(*tbnameCond); + tfree(*groupbyCols); + tfree(*tagCols); + tfree(*tagCond); return code; } -static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { - qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); +static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { + qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); tExprNode* pExprNode = NULL; TRY(TSDB_MAX_TAG_CONDITIONS) { @@ -5848,7 +6116,7 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable return TSDB_CODE_SUCCESS; } -static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, +static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, SColumnInfo* pTagCols) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -5861,7 +6129,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * bool isSuperTable = QUERY_IS_STABLE_QUERY(pQueryMsg->queryType); int16_t tagLen = 0; - for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { + for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; pExprs[i].bytes = 0; @@ -5870,10 +6138,10 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * // parse the arithmetic expression if (pExprs[i].base.functionId == TSDB_FUNC_ARITHM) { - code = buildAirthmeticExprFromMsg(&pExprs[i], pQueryMsg); + code = buildArithmeticExprFromMsg(&pExprs[i], pQueryMsg); if (code != TSDB_CODE_SUCCESS) { - taosTFree(pExprs); + tfree(pExprs); return code; } @@ -5912,7 +6180,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64; if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].type, &pExprs[i].bytes, &pExprs[i].interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { - taosTFree(pExprs); + tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } @@ -5923,7 +6191,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * } // TODO refactor - for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { + for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; int16_t functId = pExprs[i].base.functionId; @@ -6063,10 +6331,10 @@ static int32_t createFilterInfo(void *pQInfo, SQuery *pQuery) { } static void doUpdateExprColumnIndex(SQuery *pQuery) { - assert(pQuery->pSelectExpr != NULL && pQuery != NULL); + assert(pQuery->pExpr1 != NULL && pQuery != NULL); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - SSqlFuncMsg *pSqlExprMsg = &pQuery->pSelectExpr[k].base; + SSqlFuncMsg *pSqlExprMsg = &pQuery->pExpr1[k].base; if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM) { continue; } @@ -6121,7 +6389,7 @@ static void calResultBufSize(SQuery* pQuery) { } static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) { + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -6147,7 +6415,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQuery->limit.offset = pQueryMsg->offset; pQuery->order.order = pQueryMsg->order; pQuery->order.orderColId = pQueryMsg->orderColId; - pQuery->pSelectExpr = pExprs; + pQuery->pExpr1 = pExprs; + pQuery->pExpr2 = pSecExprs; + pQuery->numOfExpr2 = pQueryMsg->secondStageOutput; pQuery->pGroupbyExpr = pGroupbyExpr; memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval)); pQuery->fillType = pQueryMsg->fillType; @@ -6186,10 +6456,16 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou calResultBufSize(pQuery); for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { - assert(pExprs[col].interBytes >= pExprs[col].bytes); - // allocate additional memory for interResults that are usually larger then final results - size_t size = (size_t)((pQuery->rec.capacity + 1) * pExprs[col].bytes + pExprs[col].interBytes + sizeof(tFilePage)); + // TODO refactor + int16_t bytes = 0; + if (pQuery->pExpr2 == NULL || col > pQuery->numOfExpr2) { + bytes = pExprs[col].bytes; + } else { + bytes = MAX(pQuery->pExpr2[col].bytes, pExprs[col].bytes); + } + + size_t size = (size_t)((pQuery->rec.capacity + 1) * bytes + pExprs[col].interBytes + sizeof(tFilePage)); pQuery->sdata[col] = (tFilePage *)calloc(1, size); if (pQuery->sdata[col] == NULL) { goto _cleanup; @@ -6213,12 +6489,18 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQInfo->tableqinfoGroupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); pQInfo->tableqinfoGroupInfo.numOfTables = pTableGroupInfo->numOfTables; pQInfo->tableqinfoGroupInfo.map = taosHashInit(pTableGroupInfo->numOfTables, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); + taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); } int tableIndex = 0; pQInfo->runtimeEnv.interBufSize = getOutputInterResultBufSize(pQuery); + pQInfo->runtimeEnv.summary.tableInfoSize += (pTableGroupInfo->numOfTables * sizeof(STableQueryInfo)); + + pQInfo->runtimeEnv.pResultRowHashTable = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pQInfo->runtimeEnv.keyBuf = malloc(TSDB_MAX_BYTES_PER_ROW); + pQInfo->runtimeEnv.pool = initResultRowPool(getWindowResultSize(&pQInfo->runtimeEnv)); + pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); if (pQInfo->pBuf == NULL) { goto _cleanup; @@ -6253,7 +6535,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou for(int32_t j = 0; j < s; ++j) { STableKeyInfo* info = taosArrayGet(pa, j); - void* buf = (char*)pQInfo->pBuf + index * sizeof(STableQueryInfo); + void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); window.skey = info->lastKey; STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf); @@ -6284,7 +6566,7 @@ _cleanup_query: free(pGroupbyExpr); } - taosTFree(pTagCols); + tfree(pTagCols); for (int32_t i = 0; i < numOfOutput; ++i) { SExprInfo* pExprInfo = &pExprs[i]; if (pExprInfo->pExpr != NULL) { @@ -6292,7 +6574,7 @@ _cleanup_query: } } - taosTFree(pExprs); + tfree(pExprs); _cleanup: freeQInfo(pQInfo); @@ -6318,12 +6600,13 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ SQuery *pQuery = pQInfo->runtimeEnv.pQuery; STSBuf *pTSBuf = NULL; - if (pQueryMsg->tsLen > 0) { // open new file to save the result + if (pQueryMsg->tsLen > 0) { // open new file to save the result char *tsBlock = (char *) pQueryMsg + pQueryMsg->tsOffset; - pTSBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder); + pTSBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); tsBufResetPos(pTSBuf); bool ret = tsBufNextPos(pTSBuf); + UNUSED(ret); } @@ -6395,6 +6678,22 @@ static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) { pTableqinfoGroupInfo->numOfTables = 0; } +static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { + if (pExprInfo == NULL) { + assert(numOfExpr == 0); + return NULL; + } + + for (int32_t i = 0; i < numOfExpr; ++i) { + if (pExprInfo[i].pExpr != NULL) { + tExprNodeDestroy(pExprInfo[i].pExpr, NULL); + } + } + + tfree(pExprInfo); + return NULL; +} + static void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; @@ -6402,62 +6701,55 @@ static void freeQInfo(SQInfo *pQInfo) { qDebug("QInfo:%p start to free QInfo", pQInfo); + releaseQueryBuf(pQInfo->tableqinfoGroupInfo.numOfTables); + teardownQueryRuntimeEnv(&pQInfo->runtimeEnv); SQuery *pQuery = pQInfo->runtimeEnv.pQuery; if (pQuery != NULL) { if (pQuery->sdata != NULL) { for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { - taosTFree(pQuery->sdata[col]); + tfree(pQuery->sdata[col]); } - taosTFree(pQuery->sdata); + tfree(pQuery->sdata); } if (pQuery->fillVal != NULL) { - taosTFree(pQuery->fillVal); + tfree(pQuery->fillVal); } for (int32_t i = 0; i < pQuery->numOfFilterCols; ++i) { SSingleColumnFilterInfo *pColFilter = &pQuery->pFilterInfo[i]; if (pColFilter->numOfFilters > 0) { - taosTFree(pColFilter->pFilters); - } - } - - if (pQuery->pSelectExpr != NULL) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SExprInfo *pExprInfo = &pQuery->pSelectExpr[i]; - - if (pExprInfo->pExpr != NULL) { - tExprTreeDestroy(&pExprInfo->pExpr, NULL); - } + tfree(pColFilter->pFilters); } - - taosTFree(pQuery->pSelectExpr); } - if (pQuery->pGroupbyExpr != NULL) { - taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo); - taosTFree(pQuery->pGroupbyExpr); - } + pQuery->pExpr1 = destroyQueryFuncExpr(pQuery->pExpr1, pQuery->numOfOutput); + pQuery->pExpr2 = destroyQueryFuncExpr(pQuery->pExpr2, pQuery->numOfExpr2); - taosTFree(pQuery->tagColList); - taosTFree(pQuery->pFilterInfo); + tfree(pQuery->tagColList); + tfree(pQuery->pFilterInfo); if (pQuery->colList != NULL) { for (int32_t i = 0; i < pQuery->numOfCols; i++) { SColumnInfo *column = pQuery->colList + i; freeColumnFilterInfo(column->filters, column->numOfFilters); } - taosTFree(pQuery->colList); + tfree(pQuery->colList); } - taosTFree(pQuery); + if (pQuery->pGroupbyExpr != NULL) { + taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo); + tfree(pQuery->pGroupbyExpr); + } + + tfree(pQuery); } doDestroyTableQueryInfo(&pQInfo->tableqinfoGroupInfo); - taosTFree(pQInfo->pBuf); + tfree(pQInfo->pBuf); tsdbDestroyTableGroup(&pQInfo->tableGroupInfo); taosArrayDestroy(pQInfo->arrTableIdInfo); @@ -6465,7 +6757,7 @@ static void freeQInfo(SQInfo *pQInfo) { qDebug("QInfo:%p QInfo is freed", pQInfo); - taosTFree(pQInfo); + tfree(pQInfo); } static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { @@ -6554,16 +6846,19 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi int32_t code = TSDB_CODE_SUCCESS; - char *tagCond = NULL; - char *tbnameCond = NULL; + char *tagCond = NULL; + char *tbnameCond = NULL; SArray *pTableIdList = NULL; - SSqlFuncMsg **pExprMsg = NULL; - SExprInfo *pExprs = NULL; + SSqlFuncMsg **pExprMsg = NULL; + SSqlFuncMsg **pSecExprMsg = NULL; + SExprInfo *pExprs = NULL; + SExprInfo *pSecExprs = NULL; + SColIndex *pGroupColIndex = NULL; SColumnInfo *pTagColumnInfo = NULL; SSqlGroupbyExpr *pGroupbyExpr = NULL; - code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo); + code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &pSecExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo); if (code != TSDB_CODE_SUCCESS) { goto _over; } @@ -6580,10 +6875,16 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - if ((code = createQFunctionExprFromMsg(pQueryMsg, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { goto _over; } + if (pSecExprMsg != NULL) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, &pSecExprs, pSecExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } + pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, pGroupColIndex, &code); if ((pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { goto _over; @@ -6636,8 +6937,15 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi assert(0); } - (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery); + code = checkForQueryBuf(tableGroupInfo.numOfTables); + if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort + goto _over; + } + + (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, pSecExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery); + pExprs = NULL; + pSecExprs = NULL; pGroupbyExpr = NULL; pTagColumnInfo = NULL; @@ -6652,13 +6960,19 @@ _over: free(tagCond); free(tbnameCond); free(pGroupColIndex); + if (pGroupbyExpr != NULL) { taosArrayDestroy(pGroupbyExpr->columnInfo); free(pGroupbyExpr); } + free(pTagColumnInfo); free(pExprs); + free(pSecExprs); + free(pExprMsg); + free(pSecExprMsg); + taosArrayDestroy(pTableIdList); for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { @@ -6777,7 +7091,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex int32_t code = TSDB_CODE_SUCCESS; -#if 0 +#if _NON_BLOCKING_RETRIEVE SQuery *pQuery = pQInfo->runtimeEnv.pQuery; pthread_mutex_lock(&pQInfo->lock); @@ -6849,6 +7163,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co pQInfo->dataReady = QUERY_RESULT_NOT_READY; if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { + // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. *continueExec = false; (*pRsp)->completed = 1; // notify no more result to client } else { @@ -6921,11 +7236,11 @@ static void buildTagQueryResult(SQInfo* pQInfo) { assert(num == pQInfo->tableqinfoGroupInfo.numOfTables); int32_t count = 0; - int32_t functionId = pQuery->pSelectExpr[0].base.functionId; + int32_t functionId = pQuery->pExpr1[0].base.functionId; if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id assert(pQuery->numOfOutput == 1); - SExprInfo* pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo* pExprInfo = &pQuery->pExpr1[0]; int32_t rsize = pExprInfo->bytes; count = 0; @@ -6999,7 +7314,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) { continue; } - SExprInfo* pExprInfo = pQuery->pSelectExpr; + SExprInfo* pExprInfo = pQuery->pExpr1; STableQueryInfo* item = taosArrayGetP(pa, i); char *data = NULL, *dst = NULL; @@ -7037,6 +7352,48 @@ static void buildTagQueryResult(SQInfo* pQInfo) { setQueryStatus(pQuery, QUERY_COMPLETED); } +static int64_t getQuerySupportBufSize(size_t numOfTables) { + size_t s1 = sizeof(STableQueryInfo); + size_t s2 = sizeof(SHashNode); + +// size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb + return (int64_t)((s1 + s2) * 1.5 * numOfTables); +} + +int32_t checkForQueryBuf(size_t numOfTables) { + int64_t t = getQuerySupportBufSize(numOfTables); + if (tsQueryBufferSize < 0) { + return TSDB_CODE_SUCCESS; + } else if (tsQueryBufferSize > 0) { + + while(1) { + int64_t s = tsQueryBufferSize; + int64_t remain = s - t; + if (remain >= 0) { + if (atomic_val_compare_exchange_64(&tsQueryBufferSize, s, remain) == s) { + return TSDB_CODE_SUCCESS; + } + } else { + return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER; + } + } + } + + // disable query processing if the value of tsQueryBufferSize is zero. + return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER; +} + +void releaseQueryBuf(size_t numOfTables) { + if (tsQueryBufferSize <= 0) { + return; + } + + int64_t t = getQuerySupportBufSize(numOfTables); + + // restore value is not enough buffer available + atomic_add_fetch_64(&tsQueryBufferSize, t); +} + void* qGetResultRetrieveMsg(qinfo_t qinfo) { SQInfo* pQInfo = (SQInfo*) qinfo; assert(pQInfo != NULL); @@ -7111,9 +7468,9 @@ void qCleanupQueryMgmt(void* pQMgmt) { taosCacheCleanup(pqinfoPool); pthread_mutex_destroy(&pQueryMgmt->lock); - taosTFree(pQueryMgmt); + tfree(pQueryMgmt); - qDebug("vgId:%d queryMgmt cleanup completed", vgId); + qDebug("vgId:%d, queryMgmt cleanup completed", vgId); } void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index fc9c60b39b0cfa4b591cc77c1efcdac4e6647ce9..fa3fe285a857cc0d146e25f52710d0ebc0a69193 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -64,7 +64,7 @@ void* destoryExtMemBuffer(tExtMemBuffer *pMemBuffer) { // release flush out info link SExtFileInfo *pFileMeta = &pMemBuffer->fileMeta; if (pFileMeta->flushoutData.nAllocSize != 0 && pFileMeta->flushoutData.pFlushoutInfo != NULL) { - taosTFree(pFileMeta->flushoutData.pFlushoutInfo); + tfree(pFileMeta->flushoutData.pFlushoutInfo); } // release all in-memory buffer pages @@ -72,7 +72,7 @@ void* destoryExtMemBuffer(tExtMemBuffer *pMemBuffer) { while (pFilePages != NULL) { tFilePagesItem *pTmp = pFilePages; pFilePages = pFilePages->pNext; - taosTFree(pTmp); + tfree(pTmp); } // close temp file @@ -87,8 +87,8 @@ void* destoryExtMemBuffer(tExtMemBuffer *pMemBuffer) { destroyColumnModel(pMemBuffer->pColumnModel); - taosTFree(pMemBuffer->path); - taosTFree(pMemBuffer); + tfree(pMemBuffer->path); + tfree(pMemBuffer); return NULL; } @@ -275,7 +275,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { tFilePagesItem *ptmp = first; first = first->pNext; - taosTFree(ptmp); // release all data in memory buffer + tfree(ptmp); // release all data in memory buffer } fflush(pMemBuffer->file); // flush to disk @@ -300,7 +300,7 @@ void tExtMemBufferClear(tExtMemBuffer *pMemBuffer) { while (first != NULL) { tFilePagesItem *ptmp = first; first = first->pNext; - taosTFree(ptmp); + tfree(ptmp); } pMemBuffer->fileMeta.numOfElemsInFile = 0; @@ -344,8 +344,6 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t return 0; } - assert(colIdx == 0); - if (tsOrder == TSDB_ORDER_DESC) { // primary column desc order return (f1 < f2) ? 1 : -1; } else { // asc @@ -804,7 +802,7 @@ void destroyColumnModel(SColumnModel *pModel) { return; } - taosTFree(pModel); + tfree(pModel); } static void printBinaryData(char *data, int32_t len) { @@ -1089,5 +1087,5 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { } destroyColumnModel(pDesc->pColumnModel); - taosTFree(pDesc); + tfree(pDesc); } diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index f186726c0120f2e2e34580fec0da00c2083e5da9..ca1203cb17da52b8ffe638e29913b06bdfcade29 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -13,46 +13,23 @@ * along with this program. If not, see . */ -#include "qFill.h" #include "os.h" -#include "qExtbuffer.h" + #include "taosdef.h" #include "taosmsg.h" #include "tsqlfunction.h" +#include "ttype.h" -#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) - -SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, - int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) { - if (fillType == TSDB_FILL_NONE) { - return NULL; - } - - SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); - - taosResetFillInfo(pFillInfo, skey); - - pFillInfo->order = order; - pFillInfo->fillType = fillType; - pFillInfo->pFillCol = pFillCol; - pFillInfo->numOfTags = numOfTags; - pFillInfo->numOfCols = numOfCols; - pFillInfo->precision = precision; - - pFillInfo->interval.interval = slidingTime; - pFillInfo->interval.intervalUnit = slidingUnit; - pFillInfo->interval.sliding = slidingTime; - pFillInfo->interval.slidingUnit = slidingUnit; +#include "qFill.h" +#include "qExtbuffer.h" +#include "queryLog.h" - pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); - if (numOfTags > 0) { - pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); - for(int32_t i = 0; i < numOfTags; ++i) { - pFillInfo->pTags[i].col.colId = -2; - } - } +#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) +// there are no duplicated tags in the SFillTagColInfo list +static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) { int32_t rowsize = 0; + int32_t k = 0; for (int32_t i = 0; i < numOfCols; ++i) { SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; @@ -60,72 +37,120 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ if (TSDB_COL_IS_TAG(pColInfo->flag)) { bool exists = false; - for(int32_t j = 0; j < k; ++j) { + int32_t index = -1; + for (int32_t j = 0; j < k; ++j) { if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { exists = true; + index = j; break; } } if (!exists) { - pFillInfo->pTags[k].col.colId = pColInfo->col.colId; + SSchema* pSchema = &pFillInfo->pTags[k].col; + pSchema->colId = pColInfo->col.colId; + pSchema->type = pColInfo->col.type; + pSchema->bytes = pColInfo->col.bytes; + pFillInfo->pTags[k].tagVal = calloc(1, pColInfo->col.bytes); + pColInfo->tagIndex = k; k += 1; + } else { + pColInfo->tagIndex = index; } } + rowsize += pColInfo->col.bytes; } - pFillInfo->rowSize = rowsize; - pFillInfo->capacityInRows = capacity; - + assert(k <= pFillInfo->numOfTags); + return rowsize; +} + +SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, + int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, + SFillColInfo* pCol, void* handle) { + if (fillType == TSDB_FILL_NONE) { + return NULL; + } + + SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); + + taosResetFillInfo(pFillInfo, skey); + + pFillInfo->order = order; + pFillInfo->type = fillType; + pFillInfo->pFillCol = pCol; + pFillInfo->numOfTags = numOfTags; + pFillInfo->numOfCols = numOfCols; + pFillInfo->precision = precision; + pFillInfo->alloc = capacity; + pFillInfo->handle = handle; + + pFillInfo->interval.interval = slidingTime; + pFillInfo->interval.intervalUnit = slidingUnit; + pFillInfo->interval.sliding = slidingTime; + pFillInfo->interval.slidingUnit = slidingUnit; + + pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); + if (numOfTags > 0) { + pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); + for (int32_t i = 0; i < numOfTags; ++i) { + pFillInfo->pTags[i].col.colId = -2; // TODO + } + } + + pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc); + assert(pFillInfo->rowSize > 0); + return pFillInfo; } void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) { pFillInfo->start = startTimestamp; - pFillInfo->rowIdx = -1; + pFillInfo->currentKey = startTimestamp; + pFillInfo->index = -1; pFillInfo->numOfRows = 0; pFillInfo->numOfCurrent = 0; pFillInfo->numOfTotal = 0; } -void* taosDestoryFillInfo(SFillInfo* pFillInfo) { +void* taosDestroyFillInfo(SFillInfo* pFillInfo) { if (pFillInfo == NULL) { return NULL; } - taosTFree(pFillInfo->prevValues); - taosTFree(pFillInfo->nextValues); - taosTFree(pFillInfo->pTags); + tfree(pFillInfo->prevValues); + tfree(pFillInfo->nextValues); + tfree(pFillInfo->pTags); for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { - taosTFree(pFillInfo->pData[i]); + tfree(pFillInfo->pData[i]); } - taosTFree(pFillInfo->pData); - taosTFree(pFillInfo->pFillCol); + tfree(pFillInfo->pData); + tfree(pFillInfo->pFillCol); - taosTFree(pFillInfo); + tfree(pFillInfo); return NULL; } void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) { - if (pFillInfo->fillType == TSDB_FILL_NONE) { + if (pFillInfo->type == TSDB_FILL_NONE) { return; } - pFillInfo->endKey = endKey; - if (pFillInfo->order != TSDB_ORDER_ASC) { - pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision); + pFillInfo->end = endKey; + if (!FILL_IS_ASC_FILL(pFillInfo)) { + pFillInfo->end = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision); } - pFillInfo->rowIdx = 0; + pFillInfo->index = 0; pFillInfo->numOfRows = numOfRows; // ensure the space - if (pFillInfo->capacityInRows < numOfRows) { + if (pFillInfo->alloc < numOfRows) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { char* tmp = realloc(pFillInfo->pData[i], numOfRows*pFillInfo->pFillCol[i].col.bytes); assert(tmp != NULL); // todo handle error @@ -136,42 +161,38 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) } } -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) { - // copy the data into source data buffer +// copy the data into source data buffer +void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes); } } -void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput) { +void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput) { assert(pFillInfo->numOfRows == pInput->num); for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - char* data = pInput->data + pCol->col.offset * pInput->num; + const char* data = pInput->data + pCol->col.offset * pInput->num; memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes)); if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer - for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) { - SFillTagColInfo* pTag = &pFillInfo->pTags[j]; - if (pTag->col.colId == pCol->col.colId) { - memcpy(pTag->tagVal, data, pCol->col.bytes); - break; - } - } + SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; + assert (pTag->col.colId == pCol->col.colId); + memcpy(pTag->tagVal, data, pCol->col.bytes); } } } -int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { +int64_t getNumOfResWithFill(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { int64_t* tsList = (int64_t*) pFillInfo->pData[0]; int32_t numOfRows = taosNumOfRemainRows(pFillInfo); TSKEY ekey1 = ekey; - if (pFillInfo->order != TSDB_ORDER_ASC) { - pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision); + if (!FILL_IS_ASC_FILL(pFillInfo)) { + pFillInfo->end = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision); } int64_t numOfRes = -1; @@ -179,20 +200,20 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; numOfRes = taosTimeCountInterval( lastKey, - pFillInfo->start, + pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->precision); numOfRes += 1; assert(numOfRes >= numOfRows); } else { // reach the end of data - if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) || - (ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) { + if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) || + (ekey1 > pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) { return 0; } numOfRes = taosTimeCountInterval( ekey1, - pFillInfo->start, + pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->precision); @@ -203,315 +224,283 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows } int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { - if (pFillInfo->rowIdx == -1 || pFillInfo->numOfRows == 0) { + if (pFillInfo->numOfRows == 0 || (pFillInfo->numOfRows > 0 && pFillInfo->index >= pFillInfo->numOfRows)) { return 0; } - return pFillInfo->numOfRows - pFillInfo->rowIdx; + return pFillInfo->numOfRows - pFillInfo->index; } -// todo: refactor -static double linearInterpolationImpl(double v1, double v2, double k1, double k2, double k) { - return v1 + (v2 - v1) * (k - k1) / (k2 - k1); -} +#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1)))) -int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) { - switch (type) { - case TSDB_DATA_TYPE_INT: { - *(int32_t*)point->val = (int32_t)linearInterpolationImpl(*(int32_t*)point1->val, *(int32_t*)point2->val, (double)point1->key, - (double)point2->key, (double)point->key); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - *(float*)point->val = (float) - linearInterpolationImpl(*(float*)point1->val, *(float*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_DOUBLE: { - *(double*)point->val = - linearInterpolationImpl(*(double*)point1->val, *(double*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: { - *(int64_t*)point->val = (int64_t)linearInterpolationImpl((double)(*(int64_t*)point1->val), (double)(*(int64_t*)point2->val), (double)point1->key, - (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_SMALLINT: { - *(int16_t*)point->val = (int16_t)linearInterpolationImpl(*(int16_t*)point1->val, *(int16_t*)point2->val, (double)point1->key, - (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_TINYINT: { - *(int8_t*) point->val = (int8_t) - linearInterpolationImpl(*(int8_t*)point1->val, *(int8_t*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - default: { - // TODO: Deal with interpolation with bool and strings and timestamp - return -1; - } +int32_t taosGetLinearInterpolationVal(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) { + double v1 = -1; + double v2 = -1; + + GET_TYPED_DATA(v1, double, type, point1->val); + GET_TYPED_DATA(v2, double, type, point2->val); + + double r = DO_INTERPOLATION(v1, v2, point1->key, point2->key, point->key); + + switch(type) { + case TSDB_DATA_TYPE_TINYINT: *(int8_t*) point->val = (int8_t) r;break; + case TSDB_DATA_TYPE_SMALLINT: *(int16_t*) point->val = (int16_t) r;break; + case TSDB_DATA_TYPE_INT: *(int32_t*) point->val = (int32_t) r;break; + case TSDB_DATA_TYPE_BIGINT: *(int64_t*) point->val = (int64_t) r;break; + case TSDB_DATA_TYPE_DOUBLE: *(double*) point->val = (double) r;break; + case TSDB_DATA_TYPE_FLOAT: *(float*) point->val = (float) r;break; + default: + assert(0); } - return 0; + return TSDB_CODE_SUCCESS; } -static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) { +static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) { for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) { SFillColInfo* pCol = &pFillInfo->pFillCol[j]; if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) { continue; } - char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, num); + char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows); - for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) { - SFillTagColInfo* pTag = &pFillInfo->pTags[i]; - if (pTag->col.colId == pCol->col.colId) { - assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); - break; - } - } + assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags); + SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; + + assert (pTag->col.colId == pCol->col.colId); + assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); } } -static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, int64_t ts, - bool outOfBound) { - char* prevValues = pFillInfo->prevValues; - char* nextValues = pFillInfo->nextValues; +static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) { + // the first are always the timestamp column, so start from the second column. + for (int32_t i = 1; i < numOfCol; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + + char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex); + setNull(output, pCol->col.type, pCol->col.bytes); + } +} + +static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) { + char* prev = pFillInfo->prevValues; + char* next = pFillInfo->nextValues; SPoint point1, point2, point; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, *num); - *(TSKEY*) val = pFillInfo->start; - - int32_t numOfValCols = pFillInfo->numOfCols - pFillInfo->numOfTags; + // set the primary timestamp column value + int32_t index = pFillInfo->numOfCurrent; + char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index); + *(TSKEY*) val = pFillInfo->currentKey; // set the other values - if (pFillInfo->fillType == TSDB_FILL_PREV) { - char* p = FILL_IS_ASC_FILL(pFillInfo) ? prevValues : nextValues; + if (pFillInfo->type == TSDB_FILL_PREV) { + char* p = FILL_IS_ASC_FILL(pFillInfo) ? prev : next; if (p != NULL) { - for (int32_t i = 1; i < numOfValCols; ++i) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (isNull(p + pCol->col.offset, pCol->col.type)) { - if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } - } else { - assignVal(val1, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; } - } - } else { // no prev value yet, set the value for NULL - for (int32_t i = 1; i < numOfValCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } + char* output = elePtrAt(data[i]->data, pCol->col.bytes, index); + assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); } + } else { // no prev value yet, set the value for NULL + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } - - setTagsValue(pFillInfo, data, *num); - } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { + } else if (pFillInfo->type == TSDB_FILL_LINEAR) { // TODO : linear interpolation supports NULL value - if (prevValues != NULL && !outOfBound) { - for (int32_t i = 1; i < numOfValCols; ++i) { + if (prev != NULL && !outOfBound) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; + } + int16_t type = pCol->col.type; int16_t bytes = pCol->col.bytes; - char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - continue; - } else if (type == TSDB_DATA_TYPE_BOOL) { + char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index); + if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) { setNull(val1, pCol->col.type, bytes); continue; } - point1 = (SPoint){.key = *(TSKEY*)(prevValues), .val = prevValues + pCol->col.offset}; - point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->rowIdx * bytes}; - point = (SPoint){.key = pFillInfo->start, .val = val1}; - taosDoLinearInterpolation(type, &point1, &point2, &point); + point1 = (SPoint){.key = *(TSKEY*)(prev), .val = prev + pCol->col.offset}; + point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->index * bytes}; + point = (SPoint){.key = pFillInfo->currentKey, .val = val1}; + taosGetLinearInterpolationVal(type, &point1, &point2, &point); } - - setTagsValue(pFillInfo, data, *num); - } else { - for (int32_t i = 1; i < numOfValCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - - if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } - } - - setTagsValue(pFillInfo, data, *num); - + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } } else { /* fill the default value */ - for (int32_t i = 1; i < numOfValCols; ++i) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; + } + + char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index); assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } - - setTagsValue(pFillInfo, data, *num); } - pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision); + setTagsValue(pFillInfo, data, index); + pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision); pFillInfo->numOfCurrent++; - - (*num) += 1; } -static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** nextValues) { - if (*nextValues != NULL) { +static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** next) { + if (*next != NULL) { return; } - *nextValues = calloc(1, pFillInfo->rowSize); + *next = calloc(1, pFillInfo->rowSize); for (int i = 1; i < pFillInfo->numOfCols; i++) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(*nextValues + pCol->col.offset, pCol->col.type); - } else { - setNull(*nextValues + pCol->col.offset, pCol->col.type, pCol->col.bytes); - } + setNull(*next + pCol->col.offset, pCol->col.type, pCol->col.bytes); + } +} + +static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* buf) { + int32_t rowIndex = pFillInfo->index; + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + memcpy(buf + pCol->col.offset, srcData[i] + rowIndex * pCol->col.bytes, pCol->col.bytes); } } -int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfRows, int32_t outputRows, char** srcData) { - int32_t num = 0; +static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) { pFillInfo->numOfCurrent = 0; - char** prevValues = &pFillInfo->prevValues; - char** nextValues = &pFillInfo->nextValues; + char** srcData = pFillInfo->pData; + char** prev = &pFillInfo->prevValues; + char** next = &pFillInfo->nextValues; - int32_t numOfTags = pFillInfo->numOfTags; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - if (numOfRows == 0) { - /* - * These data are generated according to fill strategy, since the current timestamp is out of time window of - * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. - */ - while (num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, true); - } - - pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return outputRows; + if (FILL_IS_ASC_FILL(pFillInfo)) { + assert(pFillInfo->currentKey >= pFillInfo->start); } else { - while (1) { - int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->rowIdx]; + assert(pFillInfo->currentKey <= pFillInfo->start); + } - if ((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) { - /* set the next value for interpolation */ - initBeforeAfterDataBuf(pFillInfo, nextValues); - - int32_t offset = pFillInfo->rowIdx; - for (int32_t i = 0; i < pFillInfo->numOfCols - numOfTags; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - memcpy(*nextValues + pCol->col.offset, srcData[i] + offset * pCol->col.bytes, pCol->col.bytes); - } + while (pFillInfo->numOfCurrent < outputRows) { + int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->index]; + + if ((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || + (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) { + /* set the next value for interpolation */ + initBeforeAfterDataBuf(pFillInfo, next); + copyCurrentRowIntoBuf(pFillInfo, srcData, *next); + } + + if (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) && + pFillInfo->numOfCurrent < outputRows) { + + // fill the gap between two actual input rows + while (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || + (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) && + pFillInfo->numOfCurrent < outputRows) { + doFillOneRowResult(pFillInfo, data, srcData, ts, false); } - if (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - - while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, ts, false); - } + // output buffer is full, abort + if (pFillInfo->numOfCurrent == outputRows) { + pFillInfo->numOfTotal += pFillInfo->numOfCurrent; + return outputRows; + } + } else { + assert(pFillInfo->currentKey == ts); + initBeforeAfterDataBuf(pFillInfo, prev); - /* output buffer is full, abort */ - if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { - pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return outputRows; + // assign rows to dst buffer + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; } - } else { - assert(pFillInfo->start == ts); - initBeforeAfterDataBuf(pFillInfo, prevValues); - - // assign rows to dst buffer - for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); - char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->rowIdx); - - if (i == 0 || - (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) || - (pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) { - assignVal(val1, src, pCol->col.bytes, pCol->col.type); - memcpy(*prevValues + pCol->col.offset, src, pCol->col.bytes); - } else { // i > 0 and data is null , do interpolation - if (pFillInfo->fillType == TSDB_FILL_PREV) { - assignVal(val1, *prevValues + pCol->col.offset, pCol->col.bytes, pCol->col.type); - } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { - assignVal(val1, src, pCol->col.bytes, pCol->col.type); - memcpy(*prevValues + pCol->col.offset, src, pCol->col.bytes); - } else { - assignVal(val1, (char*) &pCol->fillVal.i, pCol->col.bytes, pCol->col.type); - } + char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent); + char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index); + + if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) || + (pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) { + assignVal(output, src, pCol->col.bytes, pCol->col.type); + memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else { // i > 0 and data is null , do interpolation + if (pFillInfo->type == TSDB_FILL_PREV) { + assignVal(output, *prev + pCol->col.offset, pCol->col.bytes, pCol->col.type); + } else if (pFillInfo->type == TSDB_FILL_LINEAR) { + assignVal(output, src, pCol->col.bytes, pCol->col.type); + memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else { + assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } } + } - // set the tag value for final result - setTagsValue(pFillInfo, data, num); + // set the tag value for final result + setTagsValue(pFillInfo, data, pFillInfo->numOfCurrent); - pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision); - pFillInfo->rowIdx += 1; + pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, + pFillInfo->interval.slidingUnit, pFillInfo->precision); + pFillInfo->index += 1; + pFillInfo->numOfCurrent += 1; + } - pFillInfo->numOfCurrent +=1; - num += 1; + if (pFillInfo->index >= pFillInfo->numOfRows || pFillInfo->numOfCurrent >= outputRows) { + /* the raw data block is exhausted, next value does not exists */ + if (pFillInfo->index >= pFillInfo->numOfRows) { + tfree(*next); } - if ((pFillInfo->rowIdx >= pFillInfo->numOfRows && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->rowIdx < 0 && !FILL_IS_ASC_FILL(pFillInfo)) || num >= outputRows) { - if (pFillInfo->rowIdx >= pFillInfo->numOfRows || pFillInfo->rowIdx < 0) { - pFillInfo->rowIdx = -1; - pFillInfo->numOfRows = 0; + pFillInfo->numOfTotal += pFillInfo->numOfCurrent; + return pFillInfo->numOfCurrent; + } + } - /* the raw data block is exhausted, next value does not exists */ - taosTFree(*nextValues); - } + return pFillInfo->numOfCurrent; +} - pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return num; - } - } +static int64_t fillExternalResults(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) { + /* + * These data are generated according to fill strategy, since the current timestamp is out of the time window of + * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. + */ + pFillInfo->numOfCurrent = 0; + while (pFillInfo->numOfCurrent < resultCapacity) { + doFillOneRowResult(pFillInfo, output, pFillInfo->pData, pFillInfo->start, true); } + + pFillInfo->numOfTotal += pFillInfo->numOfCurrent; + + assert(pFillInfo->numOfCurrent == resultCapacity); + return resultCapacity; } -int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) { - int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator? +int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) { + int32_t remain = taosNumOfRemainRows(pFillInfo); + + int64_t numOfRes = getNumOfResWithFill(pFillInfo, pFillInfo->end, capacity); + assert(numOfRes <= capacity); + + // no data existed for fill operation now, append result according to the fill strategy + if (remain == 0) { + fillExternalResults(pFillInfo, output, numOfRes); + } else { + fillResultImpl(pFillInfo, output, (int32_t) numOfRes); + assert(numOfRes == pFillInfo->numOfCurrent); + } + + qDebug("fill:%p, generated fill result, src block:%d, index:%d, brange:%"PRId64"-%"PRId64", currentKey:%"PRId64", current:%d, total:%d, %p", + pFillInfo, pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->end, pFillInfo->currentKey, pFillInfo->numOfCurrent, + pFillInfo->numOfTotal, pFillInfo->handle); - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, pFillInfo->endKey, capacity); - int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData); - assert(numOfRes == rows); - return numOfRes; } diff --git a/src/query/src/qHistogram.c b/src/query/src/qHistogram.c index 703ee2c52129a067d1070cfbe567087ec48a7a6e..35e5906d1f797c2f53f3906bf93c6797ac766c3e 100644 --- a/src/query/src/qHistogram.c +++ b/src/query/src/qHistogram.c @@ -168,7 +168,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { (*pHisto)->numOfEntries += 1; } } else { /* insert a new slot */ - if ((*pHisto)->numOfElems > 1 && idx < (*pHisto)->numOfEntries) { + if ((*pHisto)->numOfElems >= 1 && idx < (*pHisto)->numOfEntries) { if (idx > 0) { assert((*pHisto)->elems[idx - 1].val <= val); } @@ -661,4 +661,4 @@ SHistogramInfo* tHistogramMerge(SHistogramInfo* pHisto1, SHistogramInfo* pHisto2 free(pHistoBins); return pResHistogram; -} \ No newline at end of file +} diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 7e8128f200658af282bab89bd73dc2c27ddb25c1..0853565fc633d9ed32eb4d562dd4f2901c059d04 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -15,11 +15,9 @@ #include "os.h" #include "qSqlparser.h" -#include "queryLog.h" #include "taosdef.h" #include "taosmsg.h" #include "tcmdtype.h" -#include "tglobal.h" #include "tstoken.h" #include "tstrbuild.h" #include "ttokendef.h" @@ -227,13 +225,13 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { tSQLExprDestroy(pLeft); tSQLExprDestroy(pRight); - } else if ((pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE && pRight->val.nType == TSDB_DATA_TYPE_BIGINT) || - (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE && pLeft->val.nType == TSDB_DATA_TYPE_BIGINT)) { + } else if ((pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_INTEGER) || (pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_FLOAT) || + (pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_FLOAT)) { pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE; - pExpr->nSQLOptr = TK_FLOAT; + pExpr->nSQLOptr = TK_FLOAT; - double left = pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE ? pLeft->val.dKey : pLeft->val.i64Key; - double right = pRight->val.nType == TSDB_DATA_TYPE_DOUBLE ? pRight->val.dKey : pRight->val.i64Key; + double left = (pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pLeft->val.dKey : pLeft->val.i64Key; + double right = (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pRight->val.dKey : pRight->val.i64Key; switch (optrType) { case TK_PLUS: { @@ -314,130 +312,57 @@ void tSQLExprDestroy(tSQLExpr *pExpr) { tSQLExprNodeDestroy(pExpr); } -static void *tVariantListExpand(tVariantList *pList) { - if (pList->nAlloc <= pList->nExpr) { // - int32_t newSize = (pList->nAlloc << 1) + 4; - - void *ptr = realloc(pList->a, newSize * sizeof(pList->a[0])); - if (ptr == 0) { - return NULL; - } - - pList->nAlloc = newSize; - pList->a = ptr; - } - - assert(pList->a != 0); - return pList; -} - -tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder) { +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order) { if (pList == NULL) { - pList = calloc(1, sizeof(tVariantList)); - } - - if (tVariantListExpand(pList) == NULL) { - return pList; + pList = taosArrayInit(4, sizeof(tVariantListItem)); } - if (pVar) { - tVariantListItem *pItem = &pList->a[pList->nExpr++]; - /* - * Here we do not employ the assign function, since we need the pz attribute of structure - * , which is the point to char string, to free it! - * - * Otherwise, the original pointer may be lost, which causes memory leak. - */ - memcpy(pItem, pVar, sizeof(tVariant)); - pItem->sortOrder = sortOrder; - } - return pList; -} - -tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { - if (pList == NULL || index >= pList->nExpr) { - return tVariantListAppend(NULL, pVar, sortOrder); - } - - if (tVariantListExpand(pList) == NULL) { - return pList; - } - - if (pVar) { - memmove(&pList->a[index + 1], &pList->a[index], sizeof(tVariantListItem) * (pList->nExpr - index)); - - tVariantListItem *pItem = &pList->a[index]; - /* - * Here we do not employ the assign function, since we need the pz attribute of structure - * , which is the point to char string, to free it! - * - * Otherwise, the original pointer may be lost, which causes memory leak. - */ - memcpy(pItem, pVar, sizeof(tVariant)); - pItem->sortOrder = sortOrder; + if (pToken) { + tVariantListItem item; + tVariantCreate(&item.pVar, pToken); + item.sortOrder = order; - pList->nExpr++; + taosArrayPush(pList, &item); } return pList; } -void tVariantListDestroy(tVariantList *pList) { - if (pList == NULL) return; - - for (int32_t i = 0; i < pList->nExpr; ++i) { - tVariantDestroy(&pList->a[i].pVar); - } - - free(pList->a); - free(pList); -} - -tVariantList *tVariantListAppendToken(tVariantList *pList, SStrToken *pToken, uint8_t sortOrder) { +SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) { if (pList == NULL) { - pList = calloc(1, sizeof(tVariantList)); + pList = taosArrayInit(4, sizeof(tVariantListItem)); } - if (tVariantListExpand(pList) == NULL) { + if (pVar == NULL) { return pList; } - if (pToken) { - tVariant t = {0}; - tVariantCreate(&t, pToken); + /* + * Here we do not employ the assign function, since we need the pz attribute of structure + * , which is the point to char string, to free it! + * + * Otherwise, the original pointer may be lost, which causes memory leak. + */ + tVariantListItem item; + item.pVar = *pVar; + item.sortOrder = sortOrder; - tVariantListItem *pItem = &pList->a[pList->nExpr++]; - memcpy(pItem, &t, sizeof(tVariant)); - pItem->sortOrder = sortOrder; - } + taosArrayPush(pList, &item); return pList; } -tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField) { - if (pList == NULL) pList = calloc(1, sizeof(tFieldList)); - - if (pList->nAlloc <= pList->nField) { // - pList->nAlloc = (pList->nAlloc << 1) + 4; - pList->p = realloc(pList->p, pList->nAlloc * sizeof(pList->p[0])); - if (pList->p == 0) { - pList->nField = pList->nAlloc = 0; - return pList; - } +SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { + if (pList == NULL || pVar == NULL || index >= taosArrayGetSize(pList)) { + return tVariantListAppend(NULL, pVar, sortOrder); } - assert(pList->p != 0); - if (pField) { - struct TAOS_FIELD *pItem = (struct TAOS_FIELD *)&pList->p[pList->nField++]; - memcpy(pItem, pField, sizeof(TAOS_FIELD)); - } - return pList; -} + tVariantListItem item; -void tFieldListDestroy(tFieldList *pList) { - if (pList == NULL) return; + item.pVar = *pVar; + item.sortOrder = sortOrder; - free(pList->p); - free(pList); + taosArrayInsert(pList, index, &item); + return pList; } void setDBName(SStrToken *pCpxName, SStrToken *pDB) { @@ -464,8 +389,6 @@ void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { pField->type = -1; - int32_t LENGTH_SIZE_OF_STR = 2; // in case of nchar and binary, there two bytes to keep the length of binary|nchar. - for (int8_t i = 0; i < tListLen(tDataTypeDesc); ++i) { if ((strncasecmp(type->z, tDataTypeDesc[i].aName, tDataTypeDesc[i].nameLen) == 0) && (type->n == tDataTypeDesc[i].nameLen)) { @@ -481,14 +404,14 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { if (type->type == 0) { pField->bytes = 0; } else { - pField->bytes = -(int32_t)type->type * TSDB_NCHAR_SIZE + LENGTH_SIZE_OF_STR; + pField->bytes = (int16_t)(-(int32_t)type->type * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); } } else if (i == TSDB_DATA_TYPE_BINARY) { /* for binary, the TOKENTYPE is the length of binary */ if (type->type == 0) { pField->bytes = 0; } else { - pField->bytes = -(int32_t) type->type + LENGTH_SIZE_OF_STR; + pField->bytes = (int16_t) (-(int32_t) type->type + VARSTR_HEADER_SIZE); } } break; @@ -499,9 +422,9 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { /* * extract the select info out of sql string */ -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, - tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { +SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, + SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { assert(pSelection != NULL); SQuerySQL *pQuery = calloc(1, sizeof(SQuerySQL)); @@ -535,6 +458,11 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, return pQuery; } +void freeVariant(void *pItem) { + tVariantListItem* p = (tVariantListItem*) pItem; + tVariantDestroy(&p->pVar); +} + void doDestroyQuerySql(SQuerySQL *pQuerySql) { if (pQuerySql == NULL) { return; @@ -547,17 +475,18 @@ void doDestroyQuerySql(SQuerySQL *pQuerySql) { tSQLExprDestroy(pQuerySql->pWhere); pQuerySql->pWhere = NULL; - tVariantListDestroy(pQuerySql->pSortOrder); + taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); pQuerySql->pSortOrder = NULL; - - tVariantListDestroy(pQuerySql->pGroupby); + + taosArrayDestroyEx(pQuerySql->pGroupby, freeVariant); pQuerySql->pGroupby = NULL; - - tVariantListDestroy(pQuerySql->from); + + taosArrayDestroyEx(pQuerySql->from, freeVariant); pQuerySql->from = NULL; - - tVariantListDestroy(pQuerySql->fillType); - + + taosArrayDestroyEx(pQuerySql->fillType, freeVariant); + pQuerySql->fillType = NULL; + free(pQuerySql); } @@ -571,11 +500,11 @@ void destroyAllSelectClause(SSubclauseInfo *pClause) { doDestroyQuerySql(pQuerySql); } - taosTFree(pClause->pClause); + tfree(pClause->pClause); } -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pStableName, - tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) { +SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pStableName, + SArray *pTagVals, SQuerySQL *pSelect, int32_t type) { SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); switch (type) { @@ -607,7 +536,7 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrTo return pCreate; } -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) { +SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type) { SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); pAlterTable->name = *pMeterName; @@ -637,26 +566,26 @@ void SQLInfoDestroy(SSqlInfo *pInfo) { SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; doDestroyQuerySql(pCreateTableInfo->pSelect); - tFieldListDestroy(pCreateTableInfo->colInfo.pColumns); - tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns); + taosArrayDestroy(pCreateTableInfo->colInfo.pColumns); + taosArrayDestroy(pCreateTableInfo->colInfo.pTagColumns); - tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals); - taosTFree(pInfo->pCreateTableInfo); + taosArrayDestroyEx(pCreateTableInfo->usingInfo.pTagVals, freeVariant); + tfree(pInfo->pCreateTableInfo); } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { - tVariantListDestroy(pInfo->pAlterInfo->varList); - tFieldListDestroy(pInfo->pAlterInfo->pAddColumns); + taosArrayDestroyEx(pInfo->pAlterInfo->varList, freeVariant); + taosArrayDestroy(pInfo->pAlterInfo->pAddColumns); - taosTFree(pInfo->pAlterInfo); + tfree(pInfo->pAlterInfo); } else { if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) { free(pInfo->pDCLInfo->a); } if (pInfo->pDCLInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) { - tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep); + taosArrayDestroyEx(pInfo->pDCLInfo->dbOpt.keep, freeVariant); } - taosTFree(pInfo->pDCLInfo); + tfree(pInfo->pDCLInfo); } } @@ -872,5 +801,6 @@ void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) { pDBInfo->quorum = -1; pDBInfo->keep = NULL; + pDBInfo->update = -1; memset(&pDBInfo->precision, 0, sizeof(SStrToken)); } diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index ab9ffb7bcb458129b7f170e7020cee904d2dfda6..3bdc0d477f777faa71a8e5d2264021932dddeea4 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -361,8 +361,8 @@ void tMemBucketDestroy(tMemBucket *pBucket) { } destroyResultBuf(pBucket->pBuffer); - taosTFree(pBucket->pSlots); - taosTFree(pBucket); + tfree(pBucket->pSlots); + tfree(pBucket); } void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataType) { @@ -680,7 +680,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) } double val = (1 - fraction) * td + fraction * nd; - taosTFree(buffer); + tfree(buffer); return val; } else { // incur a second round bucket split diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index b3e97459d36c68d48c04447b159473bd179a5dbb..c5ba551f204d72424e11f3e0ef29029a03e1c207 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -165,7 +165,7 @@ static char* doFlushPageToDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) { static char* flushPageToDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) { int32_t ret = TSDB_CODE_SUCCESS; - assert(pResultBuf->numOfPages * pResultBuf->pageSize == pResultBuf->totalBufSize && pResultBuf->numOfPages >= pResultBuf->inMemPages); + assert(((int64_t) pResultBuf->numOfPages * pResultBuf->pageSize) == pResultBuf->totalBufSize && pResultBuf->numOfPages >= pResultBuf->inMemPages); if (pResultBuf->file == NULL) { if ((ret = createDiskFile(pResultBuf)) != TSDB_CODE_SUCCESS) { @@ -267,7 +267,7 @@ static char* evicOneDataPage(SDiskbasedResultBuf* pResultBuf) { assert(d->pn == pn); d->pn = NULL; - taosTFree(pn); + tfree(pn); bufPage = flushPageToDisk(pResultBuf, d); } @@ -407,18 +407,18 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { } if (pResultBuf->file != NULL) { - qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, inmem size:%dbytes, file size:%"PRId64" bytes", - pResultBuf->handle, pResultBuf->totalBufSize, listNEles(pResultBuf->lruList) * pResultBuf->pageSize, - pResultBuf->fileSize); + qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f", + pResultBuf->handle, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0, + pResultBuf->fileSize/1024.0); fclose(pResultBuf->file); } else { - qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, no file created", pResultBuf->handle, - pResultBuf->totalBufSize); + qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, no file created", pResultBuf->handle, + pResultBuf->totalBufSize/1024.0); } unlink(pResultBuf->path); - taosTFree(pResultBuf->path); + tfree(pResultBuf->path); SHashMutableIterator* iter = taosHashCreateIter(pResultBuf->groupSet); while(taosHashIterNext(iter)) { @@ -426,8 +426,8 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { size_t n = taosArrayGetSize(*p); for(int32_t i = 0; i < n; ++i) { SPageInfo* pi = taosArrayGetP(*p, i); - taosTFree(pi->pData); - taosTFree(pi); + tfree(pi->pData); + tfree(pi); } taosArrayDestroy(*p); @@ -440,8 +440,8 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { taosHashCleanup(pResultBuf->groupSet); taosHashCleanup(pResultBuf->all); - taosTFree(pResultBuf->assistBuf); - taosTFree(pResultBuf); + tfree(pResultBuf->assistBuf); + tfree(pResultBuf); } SPageInfo* getLastPageInfo(SIDList pList) { diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 0c9f92786fdf837e2453fe70f594c10e7a093421..98545c8ef3930587b6f7bd2c5b8d40ded0066d52 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -155,6 +155,7 @@ static SKeyword keywordTable[] = { {"INSERT", TK_INSERT}, {"INTO", TK_INTO}, {"VALUES", TK_VALUES}, + {"UPDATE", TK_UPDATE}, {"RESET", TK_RESET}, {"QUERY", TK_QUERY}, {"ADD", TK_ADD}, @@ -251,16 +252,16 @@ static const char isIdChar[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ }; -static void* KeywordHashTable = NULL; +static void* keywordHashTable = NULL; static void doInitKeywordsTable(void) { int numOfEntries = tListLen(keywordTable); - KeywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); + keywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); for (int32_t i = 0; i < numOfEntries; i++) { keywordTable[i].len = (uint8_t)strlen(keywordTable[i].name); void* ptr = &keywordTable[i]; - taosHashPut(KeywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); + taosHashPut(keywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); } } @@ -282,7 +283,7 @@ int tSQLKeywordCode(const char* z, int n) { } } - SKeyword** pKey = (SKeyword**)taosHashGet(KeywordHashTable, key, n); + SKeyword** pKey = (SKeyword**)taosHashGet(keywordHashTable, key, n); return (pKey != NULL)? (*pKey)->type:TK_ID; } @@ -660,5 +661,8 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn bool isKeyWord(const char* z, int32_t len) { return (tSQLKeywordCode((char*)z, len) != TK_ID); } void taosCleanupKeywordsTable() { - taosHashCleanup(KeywordHashTable); -} \ No newline at end of file + void* m = keywordHashTable; + if (m != NULL && atomic_val_compare_exchange_ptr(&keywordHashTable, m, 0) == m) { + taosHashCleanup(m); + } +} diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index b264f6cdc9d815a12cc5a3ab0e5c09c0d670bcdb..8a5a87baab5abbfd04f1c97dbe91a7226606263e 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -4,7 +4,7 @@ #include "tutil.h" static int32_t getDataStartOffset(); -static void TSBufUpdateVnodeInfo(STSBuf* pTSBuf, int32_t index, STSVnodeBlockInfo* pBlockInfo); +static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo); static STSBuf* allocResForTSBuf(STSBuf* pTSBuf); static int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader); @@ -32,7 +32,7 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { } // update the header info - STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSDB_ORDER_ASC}; + STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = TSDB_ORDER_ASC}; STSBufUpdateHeader(pTSBuf, &header); tsBufResetPos(pTSBuf); @@ -75,9 +75,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { return NULL; } - if (header.numOfVnode > pTSBuf->numOfAlloc) { - pTSBuf->numOfAlloc = header.numOfVnode; - STSVnodeBlockInfoEx* tmp = realloc(pTSBuf->pData, sizeof(STSVnodeBlockInfoEx) * pTSBuf->numOfAlloc); + if (header.numOfGroup > pTSBuf->numOfAlloc) { + pTSBuf->numOfAlloc = header.numOfGroup; + STSGroupBlockInfoEx* tmp = realloc(pTSBuf->pData, sizeof(STSGroupBlockInfoEx) * pTSBuf->numOfAlloc); if (tmp == NULL) { tsBufDestroy(pTSBuf); return NULL; @@ -86,7 +86,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->pData = tmp; } - pTSBuf->numOfVnodes = header.numOfVnode; + pTSBuf->numOfGroups = header.numOfGroup; // check the ts order pTSBuf->tsOrder = header.tsOrder; @@ -96,9 +96,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { return NULL; } - size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes; + size_t infoSize = sizeof(STSGroupBlockInfo) * pTSBuf->numOfGroups; - STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); + STSGroupBlockInfo* buf = (STSGroupBlockInfo*)calloc(1, infoSize); if (buf == NULL) { tsBufDestroy(pTSBuf); return NULL; @@ -109,9 +109,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { UNUSED(sz); // the length value for each vnode is not kept in file, so does not set the length value - for (int32_t i = 0; i < pTSBuf->numOfVnodes; ++i) { - STSVnodeBlockInfoEx* pBlockList = &pTSBuf->pData[i]; - memcpy(&pBlockList->info, &buf[i], sizeof(STSVnodeBlockInfo)); + for (int32_t i = 0; i < pTSBuf->numOfGroups; ++i) { + STSGroupBlockInfoEx* pBlockList = &pTSBuf->pData[i]; + memcpy(&pBlockList->info, &buf[i], sizeof(STSGroupBlockInfo)); } free(buf); @@ -131,8 +131,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->cur.order = TSDB_ORDER_ASC; pTSBuf->autoDelete = autoDelete; -// tscDebug("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), -// pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); +// tscDebug("create tsBuf from file:%s, fd:%d, size:%d, numOfGroups:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), +// pTSBuf->fileSize, pTSBuf->numOfGroups, pTSBuf->autoDelete); return pTSBuf; } @@ -142,11 +142,11 @@ void* tsBufDestroy(STSBuf* pTSBuf) { return NULL; } - taosTFree(pTSBuf->assistBuf); - taosTFree(pTSBuf->tsData.rawBuf); + tfree(pTSBuf->assistBuf); + tfree(pTSBuf->tsData.rawBuf); - taosTFree(pTSBuf->pData); - taosTFree(pTSBuf->block.payload); + tfree(pTSBuf->pData); + tfree(pTSBuf->block.payload); fclose(pTSBuf->f); @@ -156,58 +156,59 @@ void* tsBufDestroy(STSBuf* pTSBuf) { } else { // tscDebug("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path); } - + + tVariantDestroy(&pTSBuf->block.tag); free(pTSBuf); return NULL; } -static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { - int32_t last = pTSBuf->numOfVnodes - 1; +static STSGroupBlockInfoEx* tsBufGetLastGroupInfo(STSBuf* pTSBuf) { + int32_t last = pTSBuf->numOfGroups - 1; assert(last >= 0); return &pTSBuf->pData[last]; } -static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { - if (pTSBuf->numOfAlloc <= pTSBuf->numOfVnodes) { +static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { + if (pTSBuf->numOfAlloc <= pTSBuf->numOfGroups) { uint32_t newSize = (uint32_t)(pTSBuf->numOfAlloc * 1.5); assert((int32_t)newSize > pTSBuf->numOfAlloc); - STSVnodeBlockInfoEx* tmp = (STSVnodeBlockInfoEx*)realloc(pTSBuf->pData, sizeof(STSVnodeBlockInfoEx) * newSize); + STSGroupBlockInfoEx* tmp = (STSGroupBlockInfoEx*)realloc(pTSBuf->pData, sizeof(STSGroupBlockInfoEx) * newSize); if (tmp == NULL) { return NULL; } pTSBuf->pData = tmp; pTSBuf->numOfAlloc = newSize; - memset(&pTSBuf->pData[pTSBuf->numOfVnodes], 0, sizeof(STSVnodeBlockInfoEx) * (newSize - pTSBuf->numOfVnodes)); + memset(&pTSBuf->pData[pTSBuf->numOfGroups], 0, sizeof(STSGroupBlockInfoEx) * (newSize - pTSBuf->numOfGroups)); } - if (pTSBuf->numOfVnodes > 0) { - STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + if (pTSBuf->numOfGroups > 0) { + STSGroupBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastGroupInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info); + TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, &pPrevBlockInfoEx->info); } // set initial value for vnode block - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes].info; - pBlockInfo->vnode = vnodeId; + STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfGroups].info; + pBlockInfo->id = id; pBlockInfo->offset = pTSBuf->fileSize; assert(pBlockInfo->offset >= getDataStartOffset()); // update vnode info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes, pBlockInfo); + TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups, pBlockInfo); // add one vnode info - pTSBuf->numOfVnodes += 1; + pTSBuf->numOfGroups += 1; // update the header info STSBufFileHeader header = { - .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; STSBufUpdateHeader(pTSBuf, &header); - return tsBufGetLastVnodeInfo(pTSBuf); + return tsBufGetLastGroupInfo(pTSBuf); } static void shrinkBuffer(STSList* ptsData) { @@ -218,6 +219,15 @@ static void shrinkBuffer(STSList* ptsData) { } } +static int32_t getTagAreaLength(tVariant* pa) { + int32_t t = sizeof(pa->nLen) * 2 + sizeof(pa->nType); + if (pa->nType != TSDB_DATA_TYPE_NULL) { + t += pa->nLen; + } + + return t; +} + static void writeDataToDisk(STSBuf* pTSBuf) { if (pTSBuf->tsData.len == 0) { return; @@ -243,28 +253,36 @@ static void writeDataToDisk(STSBuf* pTSBuf) { */ int32_t metaLen = 0; metaLen += (int32_t)fwrite(&pBlock->tag.nType, 1, sizeof(pBlock->tag.nType), pTSBuf->f); - metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); + int32_t trueLen = pBlock->tag.nLen; if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { + metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { - metaLen += (int32_t)fwrite(&pBlock->tag.i64Key, 1, sizeof(int64_t), pTSBuf->f); + metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); + metaLen += (int32_t)fwrite(&pBlock->tag.i64Key, 1, (size_t) pBlock->tag.nLen, pTSBuf->f); + } else { + trueLen = 0; + metaLen += (int32_t)fwrite(&trueLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); } fwrite(&pBlock->numOfElem, sizeof(pBlock->numOfElem), 1, pTSBuf->f); fwrite(&pBlock->compLen, sizeof(pBlock->compLen), 1, pTSBuf->f); fwrite(pBlock->payload, (size_t)pBlock->compLen, 1, pTSBuf->f); fwrite(&pBlock->compLen, sizeof(pBlock->compLen), 1, pTSBuf->f); - + + metaLen += (int32_t) fwrite(&trueLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); + assert(metaLen == getTagAreaLength(&pBlock->tag)); + int32_t blockSize = metaLen + sizeof(pBlock->numOfElem) + sizeof(pBlock->compLen) * 2 + pBlock->compLen; pTSBuf->fileSize += blockSize; pTSBuf->tsData.len = 0; - STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + STSGroupBlockInfoEx* pGroupBlockInfoEx = tsBufGetLastGroupInfo(pTSBuf); - pVnodeBlockInfoEx->info.compLen += blockSize; - pVnodeBlockInfoEx->info.numOfBlocks += 1; + pGroupBlockInfoEx->info.compLen += blockSize; + pGroupBlockInfoEx->info.numOfBlocks += 1; shrinkBuffer(&pTSBuf->tsData); } @@ -284,23 +302,28 @@ static void expandBuffer(STSList* ptsData, int32_t inputSize) { STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { STSBlock* pBlock = &pTSBuf->block; - + // clear the memory buffer - void* tmp = pBlock->payload; - memset(pBlock, 0, sizeof(STSBlock)); - pBlock->payload = tmp; - + pBlock->compLen = 0; + pBlock->padding = 0; + pBlock->numOfElem = 0; + + int32_t offset = -1; + if (order == TSDB_ORDER_DESC) { /* * set the right position for the reversed traverse, the reversed traverse is started from * the end of each comp data block */ - int32_t ret = fseek(pTSBuf->f, -(int32_t)(sizeof(pBlock->padding)), SEEK_CUR); - size_t sz = fread(&pBlock->padding, sizeof(pBlock->padding), 1, pTSBuf->f); + int32_t prev = -(int32_t) (sizeof(pBlock->padding) + sizeof(pBlock->tag.nLen)); + int32_t ret = fseek(pTSBuf->f, prev, SEEK_CUR); + size_t sz = fread(&pBlock->padding, 1, sizeof(pBlock->padding), pTSBuf->f); + sz = fread(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); UNUSED(sz); - + pBlock->compLen = pBlock->padding; - int32_t offset = pBlock->compLen + sizeof(pBlock->compLen) * 2 + sizeof(pBlock->numOfElem) + sizeof(pBlock->tag); + + offset = pBlock->compLen + sizeof(pBlock->compLen) * 2 + sizeof(pBlock->numOfElem) + getTagAreaLength(&pBlock->tag); ret = fseek(pTSBuf->f, -offset, SEEK_CUR); UNUSED(ret); } @@ -319,7 +342,7 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { sz = fread(pBlock->tag.pz, (size_t)pBlock->tag.nLen, 1, pTSBuf->f); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { - sz = fread(&pBlock->tag.i64Key, sizeof(int64_t), 1, pTSBuf->f); + sz = fread(&pBlock->tag.i64Key, (size_t) pBlock->tag.nLen, 1, pTSBuf->f); } sz = fread(&pBlock->numOfElem, sizeof(pBlock->numOfElem), 1, pTSBuf->f); @@ -327,8 +350,7 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { sz = fread(&pBlock->compLen, sizeof(pBlock->compLen), 1, pTSBuf->f); UNUSED(sz); sz = fread(pBlock->payload, (size_t)pBlock->compLen, 1, pTSBuf->f); - UNUSED(sz); - + if (decomp) { pTSBuf->tsData.len = tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, @@ -337,11 +359,20 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { // read the comp length at the length of comp block sz = fread(&pBlock->padding, sizeof(pBlock->padding), 1, pTSBuf->f); + assert(pBlock->padding == pBlock->compLen); + + int32_t n = 0; + sz = fread(&n, sizeof(pBlock->tag.nLen), 1, pTSBuf->f); + if (pBlock->tag.nType == TSDB_DATA_TYPE_NULL) { + assert(n == 0); + } else { + assert(n == pBlock->tag.nLen); + } + UNUSED(sz); // for backwards traverse, set the start position at the end of previous block if (order == TSDB_ORDER_DESC) { - int32_t offset = pBlock->compLen + sizeof(pBlock->compLen) * 2 + sizeof(pBlock->numOfElem) + sizeof(pBlock->tag); int32_t r = fseek(pTSBuf->f, -offset, SEEK_CUR); UNUSED(r); } @@ -382,20 +413,20 @@ static int32_t setCheckTSOrder(STSBuf* pTSBuf, const char* pData, int32_t len) { return TSDB_CODE_SUCCESS; } -void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag, const char* pData, int32_t len) { - STSVnodeBlockInfoEx* pBlockInfo = NULL; +void tsBufAppend(STSBuf* pTSBuf, int32_t id, tVariant* tag, const char* pData, int32_t len) { + STSGroupBlockInfoEx* pBlockInfo = NULL; STSList* ptsData = &pTSBuf->tsData; - if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) { + if (pTSBuf->numOfGroups == 0 || tsBufGetLastGroupInfo(pTSBuf)->info.id != id) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); - pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId); + pBlockInfo = addOneGroupInfo(pTSBuf, id); } else { - pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf); + pBlockInfo = tsBufGetLastGroupInfo(pTSBuf); } - assert(pBlockInfo->info.vnode == vnodeId); + assert(pBlockInfo->info.id == id); if ((tVariantCompare(&pTSBuf->block.tag, tag) != 0) && ptsData->len > 0) { // new arrived data with different tags value, save current value into disk first @@ -403,7 +434,7 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag, const char* pDa } else { expandBuffer(ptsData, len); } - + tVariantAssign(&pTSBuf->block.tag, tag); memcpy(ptsData->rawBuf + ptsData->len, pData, (size_t)len); @@ -433,23 +464,23 @@ void tsBufFlush(STSBuf* pTSBuf) { writeDataToDisk(pTSBuf); shrinkBuffer(&pTSBuf->tsData); - STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + STSGroupBlockInfoEx* pBlockInfoEx = tsBufGetLastGroupInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info); + TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, &pBlockInfoEx->info); // save the ts order into header STSBufFileHeader header = { - .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; STSBufUpdateHeader(pTSBuf, &header); fsync(fileno(pTSBuf->f)); } -static int32_t tsBufFindVnodeIndexFromId(STSVnodeBlockInfoEx* pVnodeInfoEx, int32_t numOfVnodes, int32_t vnodeId) { +static int32_t tsBufFindGroupById(STSGroupBlockInfoEx* pGroupInfoEx, int32_t numOfGroups, int32_t id) { int32_t j = -1; - for (int32_t i = 0; i < numOfVnodes; ++i) { - if (pVnodeInfoEx[i].info.vnode == vnodeId) { + for (int32_t i = 0; i < numOfGroups; ++i) { + if (pGroupInfoEx[i].info.id == id) { j = i; break; } @@ -459,7 +490,7 @@ static int32_t tsBufFindVnodeIndexFromId(STSVnodeBlockInfoEx* pVnodeInfoEx, int3 } // todo opt performance by cache blocks info -static int32_t tsBufFindBlock(STSBuf* pTSBuf, STSVnodeBlockInfo* pBlockInfo, int32_t blockIndex) { +static int32_t tsBufFindBlock(STSBuf* pTSBuf, STSGroupBlockInfo* pBlockInfo, int32_t blockIndex) { if (fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET) != 0) { return -1; } @@ -478,7 +509,7 @@ static int32_t tsBufFindBlock(STSBuf* pTSBuf, STSVnodeBlockInfo* pBlockInfo, int if (pTSBuf->cur.order == TSDB_ORDER_DESC) { STSBlock* pBlock = &pTSBuf->block; int32_t compBlockSize = - pBlock->compLen + sizeof(pBlock->compLen) * 2 + sizeof(pBlock->numOfElem) + sizeof(pBlock->tag); + pBlock->compLen + sizeof(pBlock->compLen) * 2 + sizeof(pBlock->numOfElem) + getTagAreaLength(&pBlock->tag); int32_t ret = fseek(pTSBuf->f, -compBlockSize, SEEK_CUR); UNUSED(ret); } @@ -486,7 +517,7 @@ static int32_t tsBufFindBlock(STSBuf* pTSBuf, STSVnodeBlockInfo* pBlockInfo, int return 0; } -static int32_t tsBufFindBlockByTag(STSBuf* pTSBuf, STSVnodeBlockInfo* pBlockInfo, tVariant* tag) { +static int32_t tsBufFindBlockByTag(STSBuf* pTSBuf, STSGroupBlockInfo* pBlockInfo, tVariant* tag) { bool decomp = false; int64_t offset = 0; @@ -506,21 +537,21 @@ static int32_t tsBufFindBlockByTag(STSBuf* pTSBuf, STSVnodeBlockInfo* pBlockInfo } if (tVariantCompare(&pTSBuf->block.tag, tag) == 0) { - return i; + return (pTSBuf->cur.order == TSDB_ORDER_ASC)? i: (pBlockInfo->numOfBlocks - (i + 1)); } } return -1; } -static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex) { - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[vnodeIndex].info; +static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex) { + STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[groupIndex].info; if (pBlockInfo->numOfBlocks <= blockIndex) { assert(false); } STSCursor* pCur = &pTSBuf->cur; - if (pCur->vgroupIndex == vnodeIndex && ((pCur->blockIndex <= blockIndex && pCur->order == TSDB_ORDER_ASC) || + if (pCur->vgroupIndex == groupIndex && ((pCur->blockIndex <= blockIndex && pCur->order == TSDB_ORDER_ASC) || (pCur->blockIndex >= blockIndex && pCur->order == TSDB_ORDER_DESC))) { int32_t i = 0; bool decomp = false; @@ -555,14 +586,27 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); - pCur->vgroupIndex = vnodeIndex; + pCur->vgroupIndex = groupIndex; pCur->blockIndex = blockIndex; pCur->tsIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : pBlock->numOfElem - 1; } -STSVnodeBlockInfo* tsBufGetVnodeBlockInfo(STSBuf* pTSBuf, int32_t vnodeId) { - int32_t j = tsBufFindVnodeIndexFromId(pTSBuf->pData, pTSBuf->numOfVnodes, vnodeId); +static int32_t doUpdateGroupInfo(STSBuf* pTSBuf, int64_t offset, STSGroupBlockInfo* pVInfo) { + if (offset < 0 || offset >= getDataStartOffset()) { + return -1; + } + + if (fseek(pTSBuf->f, (int32_t)offset, SEEK_SET) != 0) { + return -1; + } + + fwrite(pVInfo, sizeof(STSGroupBlockInfo), 1, pTSBuf->f); + return 0; +} + +STSGroupBlockInfo* tsBufGetGroupBlockInfo(STSBuf* pTSBuf, int32_t id) { + int32_t j = tsBufFindGroupById(pTSBuf->pData, pTSBuf->numOfGroups, id); if (j == -1) { return NULL; } @@ -571,7 +615,7 @@ STSVnodeBlockInfo* tsBufGetVnodeBlockInfo(STSBuf* pTSBuf, int32_t vnodeId) { } int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { - if ((pTSBuf->f == NULL) || pHeader == NULL || pHeader->numOfVnode == 0 || pHeader->magic != TS_COMP_FILE_MAGIC) { + if ((pTSBuf->f == NULL) || pHeader == NULL || pHeader->numOfGroup == 0 || pHeader->magic != TS_COMP_FILE_MAGIC) { return -1; } @@ -587,7 +631,7 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { } bool tsBufNextPos(STSBuf* pTSBuf) { - if (pTSBuf == NULL || pTSBuf->numOfVnodes == 0) { + if (pTSBuf == NULL || pTSBuf->numOfGroups == 0) { return false; } @@ -606,16 +650,16 @@ bool tsBufNextPos(STSBuf* pTSBuf) { } } else { // get the last timestamp record in the last block of the last vnode - assert(pTSBuf->numOfVnodes > 0); + assert(pTSBuf->numOfGroups > 0); - int32_t vnodeIndex = pTSBuf->numOfVnodes - 1; - pCur->vgroupIndex = vnodeIndex; + int32_t groupIndex = pTSBuf->numOfGroups - 1; + pCur->vgroupIndex = groupIndex; - int32_t vnodeId = pTSBuf->pData[pCur->vgroupIndex].info.vnode; - STSVnodeBlockInfo* pBlockInfo = tsBufGetVnodeBlockInfo(pTSBuf, vnodeId); + int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id; + STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id); int32_t blockIndex = pBlockInfo->numOfBlocks - 1; - tsBufGetBlock(pTSBuf, vnodeIndex, blockIndex); + tsBufGetBlock(pTSBuf, groupIndex, blockIndex); pCur->tsIndex = pTSBuf->block.numOfElem - 1; if (pTSBuf->block.numOfElem == 0) { @@ -634,12 +678,12 @@ bool tsBufNextPos(STSBuf* pTSBuf) { if ((pCur->order == TSDB_ORDER_ASC && pCur->tsIndex >= pTSBuf->block.numOfElem - 1) || (pCur->order == TSDB_ORDER_DESC && pCur->tsIndex <= 0)) { - int32_t vnodeId = pTSBuf->pData[pCur->vgroupIndex].info.vnode; + int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id; - STSVnodeBlockInfo* pBlockInfo = tsBufGetVnodeBlockInfo(pTSBuf, vnodeId); + STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id); if (pBlockInfo == NULL || (pCur->blockIndex >= pBlockInfo->numOfBlocks - 1 && pCur->order == TSDB_ORDER_ASC) || (pCur->blockIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) { - if ((pCur->vgroupIndex >= pTSBuf->numOfVnodes - 1 && pCur->order == TSDB_ORDER_ASC) || + if ((pCur->vgroupIndex >= pTSBuf->numOfGroups - 1 && pCur->order == TSDB_ORDER_ASC) || (pCur->vgroupIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) { pCur->vgroupIndex = -1; return false; @@ -649,7 +693,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) { return false; } - int32_t blockIndex = pCur->order == TSDB_ORDER_ASC ? 0 : pBlockInfo->numOfBlocks - 1; + int32_t blockIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : (pBlockInfo->numOfBlocks - 1); tsBufGetBlock(pTSBuf, pCur->vgroupIndex + step, blockIndex); break; @@ -675,8 +719,7 @@ void tsBufResetPos(STSBuf* pTSBuf) { } STSElem tsBufGetElem(STSBuf* pTSBuf) { - STSElem elem1 = {.vnode = -1}; - + STSElem elem1 = {.id = -1}; if (pTSBuf == NULL) { return elem1; } @@ -688,9 +731,9 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { STSBlock* pBlock = &pTSBuf->block; - elem1.vnode = pTSBuf->pData[pCur->vgroupIndex].info.vnode; + elem1.id = pTSBuf->pData[pCur->vgroupIndex].info.id; elem1.ts = *(TSKEY*)(pTSBuf->tsData.rawBuf + pCur->tsIndex * TSDB_KEYSIZE); - tVariantAssign(&elem1.tag, &pBlock->tag); + elem1.tag = &pBlock->tag; return elem1; } @@ -699,35 +742,34 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { * current only support ts comp data from two vnode merge * @param pDestBuf * @param pSrcBuf - * @param vnodeId + * @param id * @return */ -int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { - if (pDestBuf == NULL || pSrcBuf == NULL || pSrcBuf->numOfVnodes <= 0) { +int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { + if (pDestBuf == NULL || pSrcBuf == NULL || pSrcBuf->numOfGroups <= 0) { return 0; } - if (pDestBuf->numOfVnodes + pSrcBuf->numOfVnodes > TS_COMP_FILE_VNODE_MAX) { + if (pDestBuf->numOfGroups + pSrcBuf->numOfGroups > TS_COMP_FILE_GROUP_MAX) { return -1; } // src can only have one vnode index - if (pSrcBuf->numOfVnodes > 1) { - return -1; - } - + assert(pSrcBuf->numOfGroups == 1); + // there are data in buffer, flush to disk first tsBufFlush(pDestBuf); // compared with the last vnode id - if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) { - int32_t oldSize = pDestBuf->numOfVnodes; - int32_t newSize = oldSize + pSrcBuf->numOfVnodes; + int32_t id = tsBufGetLastGroupInfo((STSBuf*) pSrcBuf)->info.id; + if (id != tsBufGetLastGroupInfo(pDestBuf)->info.id) { + int32_t oldSize = pDestBuf->numOfGroups; + int32_t newSize = oldSize + pSrcBuf->numOfGroups; if (pDestBuf->numOfAlloc < newSize) { pDestBuf->numOfAlloc = newSize; - STSVnodeBlockInfoEx* tmp = realloc(pDestBuf->pData, sizeof(STSVnodeBlockInfoEx) * newSize); + STSGroupBlockInfoEx* tmp = realloc(pDestBuf->pData, sizeof(STSGroupBlockInfoEx) * newSize); if (tmp == NULL) { return -1; } @@ -736,23 +778,23 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { } // directly copy the vnode index information - memcpy(&pDestBuf->pData[oldSize], pSrcBuf->pData, (size_t)pSrcBuf->numOfVnodes * sizeof(STSVnodeBlockInfoEx)); + memcpy(&pDestBuf->pData[oldSize], pSrcBuf->pData, (size_t)pSrcBuf->numOfGroups * sizeof(STSGroupBlockInfoEx)); // set the new offset value - for (int32_t i = 0; i < pSrcBuf->numOfVnodes; ++i) { - STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[i + oldSize]; + for (int32_t i = 0; i < pSrcBuf->numOfGroups; ++i) { + STSGroupBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[i + oldSize]; pBlockInfoEx->info.offset = (pSrcBuf->pData[i].info.offset - getDataStartOffset()) + pDestBuf->fileSize; - pBlockInfoEx->info.vnode = vnodeId; + pBlockInfoEx->info.id = id; } - pDestBuf->numOfVnodes = newSize; + pDestBuf->numOfGroups = newSize; } else { - STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf); + STSGroupBlockInfoEx* pBlockInfoEx = tsBufGetLastGroupInfo(pDestBuf); pBlockInfoEx->len += pSrcBuf->pData[0].len; pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks; pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen; - pBlockInfoEx->info.vnode = vnodeId; + pBlockInfoEx->info.id = id; } int32_t r = fseek(pDestBuf->f, 0, SEEK_END); @@ -761,7 +803,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { int64_t offset = getDataStartOffset(); int32_t size = (int32_t)pSrcBuf->fileSize - (int32_t)offset; - ssize_t rc = taosFSendFile(pDestBuf->f, pSrcBuf->f, &offset, size); + int64_t rc = taosFSendFile(pDestBuf->f, pSrcBuf->f, &offset, size); if (rc == -1) { // tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); @@ -785,23 +827,23 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { assert(pDestBuf->fileSize == oldSize + size); -// tscDebug("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, -// pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); +// tscDebug("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfGroups:%d, autoDelete:%d", pDestBuf, +// pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfGroups, pDestBuf->autoDelete); return 0; } -STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t order) { +STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t order, int32_t id) { STSBuf* pTSBuf = tsBufCreate(true, order); - STSVnodeBlockInfo* pBlockInfo = &(addOneVnodeInfo(pTSBuf, 0)->info); + STSGroupBlockInfo* pBlockInfo = &(addOneGroupInfo(pTSBuf, 0)->info); pBlockInfo->numOfBlocks = numOfBlocks; pBlockInfo->compLen = len; pBlockInfo->offset = getDataStartOffset(); - pBlockInfo->vnode = 0; + pBlockInfo->id = id; // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); + TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo); int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); UNUSED(ret); @@ -813,7 +855,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC); STSBufFileHeader header = { - .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; STSBufUpdateHeader(pTSBuf, &header); fsync(fileno(pTSBuf->f)); @@ -821,14 +863,14 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ return pTSBuf; } -STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag) { - STSElem elem = {.vnode = -1}; +STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t id, tVariant* tag) { + STSElem elem = {.id = -1}; if (pTSBuf == NULL) { return elem; } - int32_t j = tsBufFindVnodeIndexFromId(pTSBuf->pData, pTSBuf->numOfVnodes, vnodeId); + int32_t j = tsBufFindGroupById(pTSBuf->pData, pTSBuf->numOfGroups, id); if (j == -1) { return elem; } @@ -837,7 +879,7 @@ STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag) { // tsBufDisplay(pTSBuf); STSCursor* pCur = &pTSBuf->cur; - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[j].info; + STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[j].info; int32_t blockIndex = tsBufFindBlockByTag(pTSBuf, pBlockInfo, tag); if (blockIndex < 0) { @@ -893,7 +935,7 @@ STSBuf* tsBufClone(STSBuf* pTSBuf) { void tsBufDisplay(STSBuf* pTSBuf) { printf("-------start of ts comp file-------\n"); - printf("number of vnode:%d\n", pTSBuf->numOfVnodes); + printf("number of vnode:%d\n", pTSBuf->numOfGroups); int32_t old = pTSBuf->cur.order; pTSBuf->cur.order = TSDB_ORDER_ASC; @@ -902,8 +944,8 @@ void tsBufDisplay(STSBuf* pTSBuf) { while (tsBufNextPos(pTSBuf)) { STSElem elem = tsBufGetElem(pTSBuf); - if (elem.tag.nType == TSDB_DATA_TYPE_BIGINT) { - printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + if (elem.tag->nType == TSDB_DATA_TYPE_BIGINT) { + printf("%d-%" PRId64 "-%" PRId64 "\n", elem.id, elem.tag->i64Key, elem.ts); } } @@ -912,33 +954,20 @@ void tsBufDisplay(STSBuf* pTSBuf) { } static int32_t getDataStartOffset() { - return sizeof(STSBufFileHeader) + TS_COMP_FILE_VNODE_MAX * sizeof(STSVnodeBlockInfo); -} - -static int32_t doUpdateVnodeInfo(STSBuf* pTSBuf, int64_t offset, STSVnodeBlockInfo* pVInfo) { - if (offset < 0 || offset >= getDataStartOffset()) { - return -1; - } - - if (fseek(pTSBuf->f, (int32_t)offset, SEEK_SET) != 0) { - return -1; - } - - fwrite(pVInfo, sizeof(STSVnodeBlockInfo), 1, pTSBuf->f); - return 0; + return sizeof(STSBufFileHeader) + TS_COMP_FILE_GROUP_MAX * sizeof(STSGroupBlockInfo); } // update prev vnode length info in file -static void TSBufUpdateVnodeInfo(STSBuf* pTSBuf, int32_t index, STSVnodeBlockInfo* pBlockInfo) { - int32_t offset = sizeof(STSBufFileHeader) + index * sizeof(STSVnodeBlockInfo); - doUpdateVnodeInfo(pTSBuf, offset, pBlockInfo); +static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo) { + int32_t offset = sizeof(STSBufFileHeader) + index * sizeof(STSGroupBlockInfo); + doUpdateGroupInfo(pTSBuf, offset, pBlockInfo); } static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) { - const int32_t INITIAL_VNODEINFO_SIZE = 4; + const int32_t INITIAL_GROUPINFO_SIZE = 4; - pTSBuf->numOfAlloc = INITIAL_VNODEINFO_SIZE; - pTSBuf->pData = calloc(pTSBuf->numOfAlloc, sizeof(STSVnodeBlockInfoEx)); + pTSBuf->numOfAlloc = INITIAL_GROUPINFO_SIZE; + pTSBuf->pData = calloc(pTSBuf->numOfAlloc, sizeof(STSGroupBlockInfoEx)); if (pTSBuf->pData == NULL) { tsBufDestroy(pTSBuf); return NULL; @@ -969,3 +998,72 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) { pTSBuf->fileSize += getDataStartOffset(); return pTSBuf; } + +int32_t tsBufGetNumOfGroup(STSBuf* pTSBuf) { + if (pTSBuf == NULL) { + return 0; + } + + return pTSBuf->numOfGroups; +} + +void tsBufGetGroupIdList(STSBuf* pTSBuf, int32_t* num, int32_t** id) { + int32_t size = tsBufGetNumOfGroup(pTSBuf); + if (num != NULL) { + *num = size; + } + + *id = NULL; + if (size == 0) { + return; + } + + (*id) = malloc(tsBufGetNumOfGroup(pTSBuf) * sizeof(int32_t)); + + for(int32_t i = 0; i < size; ++i) { + (*id)[i] = pTSBuf->pData[i].info.id; + } +} + +int32_t dumpFileBlockByGroupId(STSBuf* pTSBuf, int32_t groupIndex, void* buf, int32_t* len, int32_t* numOfBlocks) { + assert(groupIndex >= 0 && groupIndex < pTSBuf->numOfGroups); + STSGroupBlockInfo *pBlockInfo = &pTSBuf->pData[groupIndex].info; + + *len = 0; + *numOfBlocks = 0; + + if (fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET) != 0) { + int code = TAOS_SYSTEM_ERROR(ferror(pTSBuf->f)); +// qError("%p: fseek failed: %s", pSql, tstrerror(code)); + return code; + } + + size_t s = fread(buf, 1, pBlockInfo->compLen, pTSBuf->f); + if (s != pBlockInfo->compLen) { + int code = TAOS_SYSTEM_ERROR(ferror(pTSBuf->f)); +// tscError("%p: fread didn't return expected data: %s", pSql, tstrerror(code)); + return code; + } + + *len = pBlockInfo->compLen; + *numOfBlocks = pBlockInfo->numOfBlocks; + + return TSDB_CODE_SUCCESS; +} + +STSElem tsBufFindElemStartPosByTag(STSBuf* pTSBuf, tVariant* pTag) { + STSElem el = {.id = -1}; + + for (int32_t i = 0; i < pTSBuf->numOfGroups; ++i) { + el = tsBufGetElemStartPos(pTSBuf, pTSBuf->pData[i].info.id, pTag); + if (el.id == pTSBuf->pData[i].info.id) { + return el; + } + } + + return el; +} + +bool tsBufIsValidElem(STSElem* pElem) { + return pElem->id >= 0; +} diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 2bd92c74a456c16fe946288968d25d797907a390..c5317226c7c9a8113395a5ae9502ff1835043acb 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -14,8 +14,9 @@ */ #include "os.h" -#include "hash.h" #include "taosmsg.h" +#include "hash.h" + #include "qExecutor.h" #include "qUtil.h" @@ -23,102 +24,68 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) { int32_t size = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - size += pQuery->pSelectExpr[i].interBytes; + size += pQuery->pExpr1[i].interBytes; } - assert(size > 0); + assert(size >= 0); return size; } -int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t size, - int32_t threshold, int16_t type) { +int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, int32_t size, int32_t threshold, int16_t type) { pWindowResInfo->capacity = size; pWindowResInfo->threshold = threshold; pWindowResInfo->type = type; - _hash_fn_t fn = taosGetDefaultHashFunction(type); - pWindowResInfo->hashList = taosHashInit(threshold, fn, true, false); - if (pWindowResInfo->hashList == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - pWindowResInfo->curIndex = -1; pWindowResInfo->size = 0; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; - SQueryCostInfo* pSummary = &pRuntimeEnv->summary; - - // use the pointer arraylist - pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); + pWindowResInfo->pResult = calloc(pWindowResInfo->capacity, POINTER_BYTES); if (pWindowResInfo->pResult == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pWindowResInfo->interval = pRuntimeEnv->pQuery->interval.interval; - - pSummary->internalSupSize += sizeof(SWindowResult) * threshold; - pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity; - pSummary->numOfTimeWindows = threshold; - - for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { - int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - return TSDB_CODE_SUCCESS; } -void destroyTimeWindowRes(SWindowResult *pWindowRes) { - if (pWindowRes == NULL) { - return; - } - - free(pWindowRes->resultInfo); -} - void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo) { if (pWindowResInfo == NULL) { return; } if (pWindowResInfo->capacity == 0) { - assert(pWindowResInfo->hashList == NULL && pWindowResInfo->pResult == NULL); + assert(pWindowResInfo->pResult == NULL); return; } - if (pWindowResInfo->pResult != NULL) { - for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { - destroyTimeWindowRes(&pWindowResInfo->pResult[i]); - } - } - - taosHashCleanup(pWindowResInfo->hashList); - taosTFree(pWindowResInfo->pResult); + tfree(pWindowResInfo->pResult); } void resetTimeWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo) { if (pWindowResInfo == NULL || pWindowResInfo->capacity == 0) { return; } - + +// assert(pWindowResInfo->size == 1); + for (int32_t i = 0; i < pWindowResInfo->size; ++i) { - SWindowResult *pWindowRes = &pWindowResInfo->pResult[i]; - clearTimeWindowResBuf(pRuntimeEnv, pWindowRes); + SResultRow *pWindowRes = pWindowResInfo->pResult[i]; + clearResultRow(pRuntimeEnv, pWindowRes); + + int32_t groupIndex = 0; + int64_t uid = 0; + + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid); + taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex))); } pWindowResInfo->curIndex = -1; - taosHashCleanup(pWindowResInfo->hashList); pWindowResInfo->size = 0; - _hash_fn_t fn = taosGetDefaultHashFunction(pWindowResInfo->type); - pWindowResInfo->hashList = taosHashInit(pWindowResInfo->capacity, fn, true, false); - pWindowResInfo->startTime = TSKEY_INITIAL_VAL; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; } -void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { +void clearFirstNWindowRes(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (pWindowResInfo == NULL || pWindowResInfo->capacity == 0 || pWindowResInfo->size == 0 || num == 0) { return; @@ -127,25 +94,31 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { int32_t numOfClosed = numOfClosedTimeWindow(pWindowResInfo); assert(num >= 0 && num <= numOfClosed); - int16_t type = pWindowResInfo->type; + int16_t type = pWindowResInfo->type; + STableId* id = TSDB_TABLEID(pRuntimeEnv->pQuery->current->pTable); // uid is always set to be 0. + int64_t uid = id->uid; + if (pRuntimeEnv->groupbyNormalCol) { + uid = 0; + } - char *key = NULL; - int16_t bytes = -1; + char *key = NULL; + int16_t bytes = -1; for (int32_t i = 0; i < num; ++i) { - SWindowResult *pResult = &pWindowResInfo->pResult[i]; + SResultRow *pResult = pWindowResInfo->pResult[i]; if (pResult->closed) { // remove the window slot from hash table // todo refactor if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - key = varDataVal(pResult->key); + key = varDataVal(pResult->key); bytes = varDataLen(pResult->key); } else { key = (char*) &pResult->win.skey; bytes = tDataTypeDesc[pWindowResInfo->type].nSize; } - taosHashRemove(pWindowResInfo->hashList, (const char *)key, bytes); + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid); + taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); } else { break; } @@ -155,19 +128,19 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { // clear all the closed windows from the window list for (int32_t k = 0; k < remain; ++k) { - copyTimeWindowResBuf(pRuntimeEnv, &pWindowResInfo->pResult[k], &pWindowResInfo->pResult[num + k]); + copyResultRow(pRuntimeEnv, pWindowResInfo->pResult[k], pWindowResInfo->pResult[num + k]); } // move the unclosed window in the front of the window list for (int32_t k = remain; k < pWindowResInfo->size; ++k) { - SWindowResult *pWindowRes = &pWindowResInfo->pResult[k]; - clearTimeWindowResBuf(pRuntimeEnv, pWindowRes); + SResultRow *pWindowRes = pWindowResInfo->pResult[k]; + clearResultRow(pRuntimeEnv, pWindowRes); } pWindowResInfo->size = remain; for (int32_t k = 0; k < pWindowResInfo->size; ++k) { - SWindowResult *pResult = &pWindowResInfo->pResult[k]; + SResultRow *pResult = pWindowResInfo->pResult[k]; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { key = varDataVal(pResult->key); @@ -177,12 +150,15 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { bytes = tDataTypeDesc[pWindowResInfo->type].nSize; } - int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)key, bytes); + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid); + int32_t *p = (int32_t *)taosHashGet(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); assert(p != NULL); int32_t v = (*p - num); assert(v >= 0 && v <= pWindowResInfo->size); - taosHashPut(pWindowResInfo->hashList, (char *)key, bytes, (char *)&v, sizeof(int32_t)); + + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, key, bytes, uid); + taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), (char *)&v, sizeof(int32_t)); } pWindowResInfo->curIndex = -1; @@ -195,12 +171,12 @@ void clearClosedTimeWindow(SQueryRuntimeEnv *pRuntimeEnv) { } int32_t numOfClosed = numOfClosedTimeWindow(pWindowResInfo); - clearFirstNTimeWindow(pRuntimeEnv, numOfClosed); + clearFirstNWindowRes(pRuntimeEnv, numOfClosed); } int32_t numOfClosedTimeWindow(SWindowResInfo *pWindowResInfo) { int32_t i = 0; - while (i < pWindowResInfo->size && pWindowResInfo->pResult[i].closed) { + while (i < pWindowResInfo->size && pWindowResInfo->pResult[i]->closed) { ++i; } @@ -211,11 +187,11 @@ void closeAllTimeWindow(SWindowResInfo *pWindowResInfo) { assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size); for (int32_t i = 0; i < pWindowResInfo->size; ++i) { - if (pWindowResInfo->pResult[i].closed) { + if (pWindowResInfo->pResult[i]->closed) { continue; } - pWindowResInfo->pResult[i].closed = true; + pWindowResInfo->pResult[i]->closed = true; } } @@ -231,19 +207,19 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_ } // get the result order - int32_t resultOrder = (pWindowResInfo->pResult[0].win.skey < pWindowResInfo->pResult[1].win.skey)? 1:-1; + int32_t resultOrder = (pWindowResInfo->pResult[0]->win.skey < pWindowResInfo->pResult[1]->win.skey)? 1:-1; if (order != resultOrder) { return; } int32_t i = 0; if (order == QUERY_ASC_FORWARD_STEP) { - TSKEY ekey = pWindowResInfo->pResult[i].win.ekey; + TSKEY ekey = pWindowResInfo->pResult[i]->win.ekey; while (i < pWindowResInfo->size && (ekey < lastKey)) { ++i; } } else if (order == QUERY_DESC_FORWARD_STEP) { - while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].win.skey > lastKey)) { + while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i]->win.skey > lastKey)) { ++i; } } @@ -254,32 +230,36 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_ } bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot) { - return (getWindowResult(pWindowResInfo, slot)->closed == true); + return (getResultRow(pWindowResInfo, slot)->closed == true); } void closeTimeWindow(SWindowResInfo *pWindowResInfo, int32_t slot) { - getWindowResult(pWindowResInfo, slot)->closed = true; + getResultRow(pWindowResInfo, slot)->closed = true; } -void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindowRes) { +void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pWindowRes) { if (pWindowRes == NULL) { return; } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + // the result does not put into the SDiskbasedResultBuf, ignore it. + if (pWindowRes->pageId >= 0) { + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); - for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) { - SResultInfo *pResultInfo = &pWindowRes->resultInfo[i]; - - char * s = getPosInResultPage(pRuntimeEnv, i, pWindowRes, page); - size_t size = pRuntimeEnv->pQuery->pSelectExpr[i].bytes; - memset(s, 0, size); - - RESET_RESULT_INFO(pResultInfo); + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) { + SResultRowCellInfo *pResultInfo = &pWindowRes->pCellInfo[i]; + + char * s = getPosInResultPage(pRuntimeEnv, i, pWindowRes, page); + size_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; + memset(s, 0, size); + + RESET_RESULT_INFO(pResultInfo); + } } - + pWindowRes->numOfRows = 0; - pWindowRes->pos = (SPosInfo){-1, -1}; + pWindowRes->pageId = -1; + pWindowRes->rowId = -1; pWindowRes->closed = false; pWindowRes->win = TSWINDOW_INITIALIZER; } @@ -289,7 +269,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow * since the attribute of "Pos" is bound to each window result when the window result is created in the * disk-based result buffer. */ -void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, const SWindowResult *src) { +void copyResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *dst, const SResultRow *src) { dst->numOfRows = src->numOfRows; dst->win = src->win; dst->closed = src->closed; @@ -297,25 +277,105 @@ void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, con int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutput; for (int32_t i = 0; i < nOutputCols; ++i) { - SResultInfo *pDst = &dst->resultInfo[i]; - SResultInfo *pSrc = &src->resultInfo[i]; + SResultRowCellInfo *pDst = getResultCell(pRuntimeEnv, dst, i); + SResultRowCellInfo *pSrc = getResultCell(pRuntimeEnv, src, i); - char *buf = pDst->interResultBuf; - memcpy(pDst, pSrc, sizeof(SResultInfo)); - pDst->interResultBuf = buf; // restore the allocated buffer +// char *buf = pDst->interResultBuf; + memcpy(pDst, pSrc, sizeof(SResultRowCellInfo) + pRuntimeEnv->pCtx[i].interBufBytes); +// pDst->interResultBuf = buf; // restore the allocated buffer // copy the result info struct - memcpy(pDst->interResultBuf, pSrc->interResultBuf, pDst->bufLen); +// memcpy(pDst->interResultBuf, pSrc->interResultBuf, pRuntimeEnv->pCtx[i].interBufBytes); // copy the output buffer data from src to dst, the position info keep unchanged - tFilePage *dstpage = getResBufPage(pRuntimeEnv->pResultBuf, dst->pos.pageId); + tFilePage *dstpage = getResBufPage(pRuntimeEnv->pResultBuf, dst->pageId); char * dstBuf = getPosInResultPage(pRuntimeEnv, i, dst, dstpage); - tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pos.pageId); - char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SWindowResult *)src, srcpage); - size_t s = pRuntimeEnv->pQuery->pSelectExpr[i].bytes; + tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pageId); + char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SResultRow *)src, srcpage); + size_t s = pRuntimeEnv->pQuery->pExpr1[i].bytes; memcpy(dstBuf, srcBuf, s); } } +SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index) { + assert(index >= 0 && index < pRuntimeEnv->pQuery->numOfOutput); + return (SResultRowCellInfo*)((char*) pRow->pCellInfo + pRuntimeEnv->rowCellInfoOffset[index]); +} + +size_t getWindowResultSize(SQueryRuntimeEnv* pRuntimeEnv) { + return (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pRuntimeEnv->interBufSize + sizeof(SResultRow); +} + +SResultRowPool* initResultRowPool(size_t size) { + SResultRowPool* p = calloc(1, sizeof(SResultRowPool)); + if (p == NULL) { + return NULL; + } + + p->numOfElemPerBlock = 128; + + p->elemSize = (int32_t) size; + p->blockSize = p->numOfElemPerBlock * p->elemSize; + p->position.pos = 0; + + p->pData = taosArrayInit(8, POINTER_BYTES); + return p; +} + +SResultRow* getNewResultRow(SResultRowPool* p) { + if (p == NULL) { + return NULL; + } + + void* ptr = NULL; + if (p->position.pos == 0) { + ptr = calloc(1, p->blockSize); + taosArrayPush(p->pData, &ptr); + + } else { + size_t last = taosArrayGetSize(p->pData); + + void** pBlock = taosArrayGet(p->pData, last - 1); + ptr = ((char*) (*pBlock)) + p->elemSize * p->position.pos; + } + + p->position.pos = (p->position.pos + 1)%p->numOfElemPerBlock; + initResultRow(ptr); + + return ptr; +} + +int64_t getResultRowPoolMemSize(SResultRowPool* p) { + if (p == NULL) { + return 0; + } + + return taosArrayGetSize(p->pData) * p->blockSize; +} + +int32_t getNumOfAllocatedResultRows(SResultRowPool* p) { + return (int32_t) taosArrayGetSize(p->pData) * p->numOfElemPerBlock; +} + +int32_t getNumOfUsedResultRows(SResultRowPool* p) { + return getNumOfAllocatedResultRows(p) - p->numOfElemPerBlock + p->position.pos; +} + +void* destroyResultRowPool(SResultRowPool* p) { + if (p == NULL) { + return NULL; + } + + size_t size = taosArrayGetSize(p->pData); + for(int32_t i = 0; i < size; ++i) { + void** ptr = taosArrayGet(p->pData, i); + tfree(*ptr); + } + + taosArrayDestroy(p->pData); + + tfree(p); + return NULL; +} diff --git a/src/query/src/sql.c b/src/query/src/sql.c index a18efdeb744ba039c685d5fc1067c95e3ca15d86..da2c56ee9e4ec3b4bb1ea9c323bdd4da39821048 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -97,27 +97,26 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 272 +#define YYNOCODE 274 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SSubclauseInfo* yy25; - tSQLExpr* yy66; - SCreateAcctSQL yy73; - int yy82; - SQuerySQL* yy150; - SCreateDBInfo yy158; - TAOS_FIELD yy181; - SLimitVal yy188; - tSQLExprList* yy224; - int64_t yy271; - tVariant yy312; - SIntervalVal yy314; - SCreateTableSQL* yy374; - tFieldList* yy449; - tVariantList* yy494; + int yy46; + tSQLExpr* yy64; + tVariant yy134; + SCreateAcctSQL yy149; + SArray* yy165; + int64_t yy207; + SLimitVal yy216; + TAOS_FIELD yy223; + SSubclauseInfo* yy231; + SCreateDBInfo yy268; + tSQLExprList* yy290; + SQuerySQL* yy414; + SCreateTableSQL* yy470; + SIntervalVal yy532; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -127,17 +126,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 252 -#define YYNRULE 230 -#define YYNTOKEN 206 -#define YY_MAX_SHIFT 251 -#define YY_MIN_SHIFTREDUCE 416 -#define YY_MAX_SHIFTREDUCE 645 -#define YY_ERROR_ACTION 646 -#define YY_ACCEPT_ACTION 647 -#define YY_NO_ACTION 648 -#define YY_MIN_REDUCE 649 -#define YY_MAX_REDUCE 878 +#define YYNSTATE 253 +#define YYNRULE 233 +#define YYNTOKEN 207 +#define YY_MAX_SHIFT 252 +#define YY_MIN_SHIFTREDUCE 420 +#define YY_MAX_SHIFTREDUCE 652 +#define YY_ERROR_ACTION 653 +#define YY_ACCEPT_ACTION 654 +#define YY_NO_ACTION 655 +#define YY_MIN_REDUCE 656 +#define YY_MAX_REDUCE 888 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -203,223 +202,224 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (566) +#define YY_ACTTAB_COUNT (571) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 751, 459, 11, 749, 750, 647, 251, 459, 752, 460, - /* 10 */ 754, 755, 753, 35, 36, 460, 37, 38, 159, 249, - /* 20 */ 170, 29, 141, 459, 206, 41, 39, 43, 40, 140, - /* 30 */ 145, 460, 865, 34, 33, 862, 141, 32, 31, 30, - /* 40 */ 35, 36, 781, 37, 38, 165, 866, 170, 29, 141, - /* 50 */ 62, 206, 41, 39, 43, 40, 191, 525, 164, 866, - /* 60 */ 34, 33, 27, 21, 32, 31, 30, 417, 418, 419, - /* 70 */ 420, 421, 422, 423, 424, 425, 426, 427, 428, 250, - /* 80 */ 35, 36, 181, 37, 38, 227, 226, 170, 29, 781, - /* 90 */ 176, 206, 41, 39, 43, 40, 174, 162, 767, 792, - /* 100 */ 34, 33, 56, 160, 32, 31, 30, 21, 36, 8, - /* 110 */ 37, 38, 63, 118, 170, 29, 770, 108, 206, 41, - /* 120 */ 39, 43, 40, 32, 31, 30, 599, 34, 33, 78, - /* 130 */ 875, 32, 31, 30, 238, 37, 38, 108, 238, 170, - /* 140 */ 29, 184, 766, 206, 41, 39, 43, 40, 188, 187, - /* 150 */ 789, 177, 34, 33, 224, 223, 32, 31, 30, 16, - /* 160 */ 218, 244, 243, 217, 216, 215, 242, 214, 241, 240, - /* 170 */ 239, 213, 747, 818, 735, 736, 737, 738, 739, 740, - /* 180 */ 741, 742, 743, 744, 745, 746, 169, 612, 103, 12, - /* 190 */ 603, 17, 606, 819, 609, 201, 169, 612, 26, 108, - /* 200 */ 603, 108, 606, 861, 609, 153, 169, 612, 173, 567, - /* 210 */ 603, 154, 606, 105, 609, 90, 89, 148, 166, 167, - /* 220 */ 34, 33, 205, 102, 32, 31, 30, 770, 166, 167, - /* 230 */ 26, 21, 557, 41, 39, 43, 40, 549, 166, 167, - /* 240 */ 194, 34, 33, 17, 193, 32, 31, 30, 860, 16, - /* 250 */ 26, 244, 243, 203, 21, 60, 242, 61, 241, 240, - /* 260 */ 239, 248, 247, 96, 175, 229, 767, 76, 80, 245, - /* 270 */ 190, 554, 21, 85, 88, 79, 18, 156, 121, 122, - /* 280 */ 605, 82, 608, 42, 70, 66, 69, 225, 770, 767, - /* 290 */ 135, 133, 601, 42, 611, 768, 93, 92, 91, 690, - /* 300 */ 168, 207, 131, 42, 611, 230, 545, 767, 546, 610, - /* 310 */ 699, 157, 691, 131, 611, 131, 604, 541, 607, 610, - /* 320 */ 538, 571, 539, 47, 540, 46, 580, 581, 602, 610, - /* 330 */ 572, 631, 613, 50, 14, 13, 13, 531, 543, 3, - /* 340 */ 544, 46, 48, 530, 75, 74, 811, 22, 178, 179, - /* 350 */ 51, 211, 10, 9, 829, 22, 87, 86, 101, 99, - /* 360 */ 158, 143, 144, 146, 147, 151, 152, 150, 139, 149, - /* 370 */ 769, 142, 828, 171, 825, 824, 172, 791, 761, 796, - /* 380 */ 228, 783, 798, 104, 810, 119, 120, 701, 117, 212, - /* 390 */ 615, 137, 24, 221, 698, 26, 222, 192, 874, 72, - /* 400 */ 873, 871, 123, 719, 25, 100, 23, 138, 566, 688, - /* 410 */ 81, 686, 83, 84, 684, 195, 780, 683, 161, 542, - /* 420 */ 180, 199, 132, 681, 680, 679, 52, 49, 678, 677, - /* 430 */ 109, 134, 44, 675, 204, 673, 671, 669, 667, 202, - /* 440 */ 200, 198, 196, 28, 136, 220, 57, 58, 812, 77, - /* 450 */ 231, 232, 233, 234, 235, 236, 237, 246, 209, 645, - /* 460 */ 53, 182, 183, 644, 110, 64, 67, 155, 186, 185, - /* 470 */ 682, 643, 94, 636, 676, 189, 126, 125, 720, 124, - /* 480 */ 127, 128, 130, 129, 95, 668, 1, 551, 193, 765, - /* 490 */ 2, 55, 113, 111, 114, 112, 115, 116, 59, 568, - /* 500 */ 163, 106, 197, 5, 573, 107, 6, 65, 614, 19, - /* 510 */ 4, 20, 15, 208, 616, 7, 210, 500, 496, 494, - /* 520 */ 493, 492, 489, 463, 219, 68, 45, 71, 73, 22, - /* 530 */ 527, 526, 524, 54, 484, 482, 474, 480, 476, 478, - /* 540 */ 472, 470, 499, 498, 497, 495, 491, 490, 46, 461, - /* 550 */ 432, 430, 649, 648, 648, 648, 648, 648, 648, 648, - /* 560 */ 648, 648, 648, 648, 97, 98, + /* 0 */ 108, 463, 141, 11, 654, 252, 802, 463, 140, 464, + /* 10 */ 162, 165, 876, 35, 36, 464, 37, 38, 159, 250, + /* 20 */ 170, 29, 141, 463, 206, 41, 39, 43, 40, 173, + /* 30 */ 780, 464, 875, 34, 33, 145, 141, 32, 31, 30, + /* 40 */ 35, 36, 791, 37, 38, 164, 876, 170, 29, 780, + /* 50 */ 21, 206, 41, 39, 43, 40, 191, 829, 799, 201, + /* 60 */ 34, 33, 21, 21, 32, 31, 30, 421, 422, 423, + /* 70 */ 424, 425, 426, 427, 428, 429, 430, 431, 432, 251, + /* 80 */ 35, 36, 181, 37, 38, 532, 776, 170, 29, 238, + /* 90 */ 246, 206, 41, 39, 43, 40, 174, 175, 777, 777, + /* 100 */ 34, 33, 872, 56, 32, 31, 30, 176, 871, 36, + /* 110 */ 780, 37, 38, 227, 226, 170, 29, 791, 17, 206, + /* 120 */ 41, 39, 43, 40, 108, 26, 870, 606, 34, 33, + /* 130 */ 78, 160, 32, 31, 30, 238, 157, 16, 218, 245, + /* 140 */ 244, 217, 216, 215, 243, 214, 242, 241, 240, 213, + /* 150 */ 239, 755, 103, 743, 744, 745, 746, 747, 748, 749, + /* 160 */ 750, 751, 752, 753, 754, 756, 37, 38, 229, 177, + /* 170 */ 170, 29, 224, 223, 206, 41, 39, 43, 40, 203, + /* 180 */ 62, 60, 8, 34, 33, 63, 118, 32, 31, 30, + /* 190 */ 169, 619, 27, 12, 610, 184, 613, 158, 616, 778, + /* 200 */ 169, 619, 188, 187, 610, 194, 613, 108, 616, 153, + /* 210 */ 169, 619, 561, 108, 610, 154, 613, 18, 616, 90, + /* 220 */ 89, 148, 166, 167, 34, 33, 205, 143, 32, 31, + /* 230 */ 30, 697, 166, 167, 131, 144, 564, 41, 39, 43, + /* 240 */ 40, 706, 166, 167, 131, 34, 33, 146, 17, 32, + /* 250 */ 31, 30, 32, 31, 30, 26, 16, 207, 245, 244, + /* 260 */ 21, 587, 588, 243, 828, 242, 241, 240, 698, 239, + /* 270 */ 61, 131, 76, 80, 147, 190, 102, 151, 85, 88, + /* 280 */ 79, 760, 156, 26, 758, 759, 82, 21, 42, 761, + /* 290 */ 556, 763, 764, 762, 225, 765, 777, 193, 42, 618, + /* 300 */ 249, 248, 96, 574, 121, 122, 608, 105, 42, 618, + /* 310 */ 70, 66, 69, 578, 617, 168, 579, 46, 152, 618, + /* 320 */ 14, 230, 548, 777, 617, 545, 638, 546, 150, 547, + /* 330 */ 13, 135, 133, 612, 617, 615, 139, 93, 92, 91, + /* 340 */ 620, 611, 609, 614, 13, 47, 538, 622, 50, 552, + /* 350 */ 46, 553, 537, 178, 179, 3, 22, 211, 75, 74, + /* 360 */ 149, 22, 10, 9, 48, 51, 142, 550, 885, 551, + /* 370 */ 87, 86, 101, 99, 779, 839, 838, 171, 835, 834, + /* 380 */ 172, 801, 771, 228, 806, 793, 808, 104, 821, 119, + /* 390 */ 820, 117, 120, 708, 212, 137, 24, 221, 705, 222, + /* 400 */ 26, 192, 100, 884, 72, 883, 881, 123, 726, 25, + /* 410 */ 573, 23, 138, 695, 49, 81, 693, 83, 84, 691, + /* 420 */ 790, 690, 195, 161, 199, 549, 57, 52, 180, 132, + /* 430 */ 688, 687, 686, 685, 684, 134, 682, 109, 680, 678, + /* 440 */ 44, 676, 674, 136, 204, 202, 58, 822, 200, 198, + /* 450 */ 196, 220, 77, 28, 231, 232, 233, 235, 652, 234, + /* 460 */ 236, 237, 247, 209, 183, 53, 651, 182, 185, 186, + /* 470 */ 64, 67, 155, 650, 643, 189, 193, 689, 558, 94, + /* 480 */ 683, 675, 126, 125, 727, 129, 124, 127, 128, 95, + /* 490 */ 130, 1, 114, 110, 111, 775, 2, 55, 59, 116, + /* 500 */ 112, 113, 115, 575, 106, 163, 197, 5, 580, 107, + /* 510 */ 6, 65, 621, 19, 4, 20, 15, 208, 623, 7, + /* 520 */ 210, 504, 500, 498, 497, 496, 493, 467, 219, 68, + /* 530 */ 45, 71, 73, 22, 534, 533, 531, 488, 54, 486, + /* 540 */ 478, 484, 480, 482, 476, 474, 505, 503, 502, 501, + /* 550 */ 499, 495, 494, 46, 465, 436, 434, 656, 655, 655, + /* 560 */ 655, 655, 655, 655, 655, 655, 655, 655, 655, 97, + /* 570 */ 98, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 226, 1, 260, 229, 230, 207, 208, 1, 234, 9, - /* 10 */ 236, 237, 238, 13, 14, 9, 16, 17, 209, 210, - /* 20 */ 20, 21, 260, 1, 24, 25, 26, 27, 28, 260, - /* 30 */ 260, 9, 270, 33, 34, 260, 260, 37, 38, 39, - /* 40 */ 13, 14, 244, 16, 17, 269, 270, 20, 21, 260, - /* 50 */ 247, 24, 25, 26, 27, 28, 258, 5, 269, 270, - /* 60 */ 33, 34, 259, 210, 37, 38, 39, 45, 46, 47, + /* 0 */ 211, 1, 262, 262, 208, 209, 211, 1, 262, 9, + /* 10 */ 228, 271, 272, 13, 14, 9, 16, 17, 210, 211, + /* 20 */ 20, 21, 262, 1, 24, 25, 26, 27, 28, 228, + /* 30 */ 248, 9, 272, 33, 34, 262, 262, 37, 38, 39, + /* 40 */ 13, 14, 246, 16, 17, 271, 272, 20, 21, 248, + /* 50 */ 211, 24, 25, 26, 27, 28, 260, 268, 263, 270, + /* 60 */ 33, 34, 211, 211, 37, 38, 39, 45, 46, 47, /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 80 */ 13, 14, 60, 16, 17, 33, 34, 20, 21, 244, - /* 90 */ 66, 24, 25, 26, 27, 28, 243, 227, 245, 210, - /* 100 */ 33, 34, 102, 258, 37, 38, 39, 210, 14, 98, - /* 110 */ 16, 17, 101, 102, 20, 21, 246, 210, 24, 25, - /* 120 */ 26, 27, 28, 37, 38, 39, 99, 33, 34, 73, - /* 130 */ 246, 37, 38, 39, 78, 16, 17, 210, 78, 20, - /* 140 */ 21, 126, 245, 24, 25, 26, 27, 28, 133, 134, - /* 150 */ 261, 127, 33, 34, 130, 131, 37, 38, 39, 85, - /* 160 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 170 */ 96, 97, 226, 266, 228, 229, 230, 231, 232, 233, - /* 180 */ 234, 235, 236, 237, 238, 239, 1, 2, 210, 44, - /* 190 */ 5, 98, 7, 266, 9, 268, 1, 2, 105, 210, - /* 200 */ 5, 210, 7, 260, 9, 60, 1, 2, 227, 99, - /* 210 */ 5, 66, 7, 103, 9, 70, 71, 72, 33, 34, - /* 220 */ 33, 34, 37, 98, 37, 38, 39, 246, 33, 34, - /* 230 */ 105, 210, 37, 25, 26, 27, 28, 99, 33, 34, - /* 240 */ 262, 33, 34, 98, 106, 37, 38, 39, 260, 85, - /* 250 */ 105, 87, 88, 264, 210, 266, 92, 266, 94, 95, - /* 260 */ 96, 63, 64, 65, 243, 210, 245, 61, 62, 227, - /* 270 */ 125, 103, 210, 67, 68, 69, 108, 132, 61, 62, - /* 280 */ 5, 75, 7, 98, 67, 68, 69, 243, 246, 245, - /* 290 */ 61, 62, 1, 98, 109, 240, 67, 68, 69, 214, - /* 300 */ 59, 15, 217, 98, 109, 243, 5, 245, 7, 124, - /* 310 */ 214, 260, 214, 217, 109, 217, 5, 2, 7, 124, - /* 320 */ 5, 99, 7, 103, 9, 103, 115, 116, 37, 124, - /* 330 */ 99, 99, 99, 103, 103, 103, 103, 99, 5, 98, - /* 340 */ 7, 103, 122, 99, 128, 129, 267, 103, 33, 34, - /* 350 */ 120, 99, 128, 129, 241, 103, 73, 74, 61, 62, - /* 360 */ 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, - /* 370 */ 246, 260, 241, 241, 241, 241, 241, 210, 242, 210, - /* 380 */ 241, 244, 210, 210, 267, 210, 210, 210, 248, 210, - /* 390 */ 104, 210, 210, 210, 210, 105, 210, 244, 210, 210, - /* 400 */ 210, 210, 210, 210, 210, 59, 210, 210, 109, 210, - /* 410 */ 210, 210, 210, 210, 210, 263, 257, 210, 263, 104, - /* 420 */ 210, 263, 210, 210, 210, 210, 119, 121, 210, 210, - /* 430 */ 256, 210, 118, 210, 113, 210, 210, 210, 210, 117, - /* 440 */ 112, 111, 110, 123, 210, 76, 211, 211, 211, 84, - /* 450 */ 83, 49, 80, 82, 53, 81, 79, 76, 211, 5, - /* 460 */ 211, 135, 5, 5, 255, 215, 215, 211, 5, 135, - /* 470 */ 211, 5, 212, 86, 211, 126, 219, 223, 225, 224, - /* 480 */ 222, 220, 218, 221, 212, 211, 216, 99, 106, 244, - /* 490 */ 213, 107, 252, 254, 251, 253, 250, 249, 103, 99, - /* 500 */ 1, 98, 98, 114, 99, 98, 114, 73, 99, 103, - /* 510 */ 98, 103, 98, 100, 104, 98, 100, 9, 5, 5, - /* 520 */ 5, 5, 5, 77, 15, 73, 16, 129, 129, 103, - /* 530 */ 5, 5, 99, 98, 5, 5, 5, 5, 5, 5, - /* 540 */ 5, 5, 5, 5, 5, 5, 5, 5, 103, 77, - /* 550 */ 59, 58, 0, 271, 271, 271, 271, 271, 271, 271, - /* 560 */ 271, 271, 271, 271, 21, 21, 271, 271, 271, 271, - /* 570 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 580 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 590 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 600 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 610 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 620 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 630 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 640 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 650 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 660 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 670 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 680 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 690 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 700 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 710 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 720 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 730 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 740 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 750 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 760 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, - /* 770 */ 271, 271, + /* 80 */ 13, 14, 60, 16, 17, 5, 247, 20, 21, 78, + /* 90 */ 228, 24, 25, 26, 27, 28, 245, 245, 247, 247, + /* 100 */ 33, 34, 262, 103, 37, 38, 39, 66, 262, 14, + /* 110 */ 248, 16, 17, 33, 34, 20, 21, 246, 99, 24, + /* 120 */ 25, 26, 27, 28, 211, 106, 262, 100, 33, 34, + /* 130 */ 73, 260, 37, 38, 39, 78, 262, 85, 86, 87, + /* 140 */ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + /* 150 */ 98, 227, 211, 229, 230, 231, 232, 233, 234, 235, + /* 160 */ 236, 237, 238, 239, 240, 241, 16, 17, 211, 128, + /* 170 */ 20, 21, 131, 132, 24, 25, 26, 27, 28, 266, + /* 180 */ 249, 268, 99, 33, 34, 102, 103, 37, 38, 39, + /* 190 */ 1, 2, 261, 44, 5, 127, 7, 262, 9, 242, + /* 200 */ 1, 2, 134, 135, 5, 264, 7, 211, 9, 60, + /* 210 */ 1, 2, 104, 211, 5, 66, 7, 109, 9, 70, + /* 220 */ 71, 72, 33, 34, 33, 34, 37, 262, 37, 38, + /* 230 */ 39, 215, 33, 34, 218, 262, 37, 25, 26, 27, + /* 240 */ 28, 215, 33, 34, 218, 33, 34, 262, 99, 37, + /* 250 */ 38, 39, 37, 38, 39, 106, 85, 15, 87, 88, + /* 260 */ 211, 116, 117, 92, 268, 94, 95, 96, 215, 98, + /* 270 */ 268, 218, 61, 62, 262, 126, 99, 262, 67, 68, + /* 280 */ 69, 227, 133, 106, 230, 231, 75, 211, 99, 235, + /* 290 */ 100, 237, 238, 239, 245, 241, 247, 107, 99, 110, + /* 300 */ 63, 64, 65, 100, 61, 62, 1, 104, 99, 110, + /* 310 */ 67, 68, 69, 100, 125, 59, 100, 104, 262, 110, + /* 320 */ 104, 245, 2, 247, 125, 5, 100, 7, 262, 9, + /* 330 */ 104, 61, 62, 5, 125, 7, 262, 67, 68, 69, + /* 340 */ 100, 5, 37, 7, 104, 104, 100, 105, 104, 5, + /* 350 */ 104, 7, 100, 33, 34, 99, 104, 100, 129, 130, + /* 360 */ 262, 104, 129, 130, 123, 121, 262, 5, 248, 7, + /* 370 */ 73, 74, 61, 62, 248, 243, 243, 243, 243, 243, + /* 380 */ 243, 211, 244, 243, 211, 246, 211, 211, 269, 211, + /* 390 */ 269, 250, 211, 211, 211, 211, 211, 211, 211, 211, + /* 400 */ 106, 246, 59, 211, 211, 211, 211, 211, 211, 211, + /* 410 */ 110, 211, 211, 211, 122, 211, 211, 211, 211, 211, + /* 420 */ 259, 211, 265, 265, 265, 105, 212, 120, 211, 211, + /* 430 */ 211, 211, 211, 211, 211, 211, 211, 258, 211, 211, + /* 440 */ 119, 211, 211, 211, 114, 118, 212, 212, 113, 112, + /* 450 */ 111, 76, 84, 124, 83, 49, 80, 53, 5, 82, + /* 460 */ 81, 79, 76, 212, 5, 212, 5, 136, 136, 5, + /* 470 */ 216, 216, 212, 5, 86, 127, 107, 212, 100, 213, + /* 480 */ 212, 212, 220, 224, 226, 222, 225, 223, 221, 213, + /* 490 */ 219, 217, 253, 257, 256, 246, 214, 108, 104, 251, + /* 500 */ 255, 254, 252, 100, 99, 1, 99, 115, 100, 99, + /* 510 */ 115, 73, 100, 104, 99, 104, 99, 101, 105, 99, + /* 520 */ 101, 9, 5, 5, 5, 5, 5, 77, 15, 73, + /* 530 */ 16, 130, 130, 104, 5, 5, 100, 5, 99, 5, + /* 540 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 550 */ 5, 5, 5, 104, 77, 59, 58, 0, 273, 273, + /* 560 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 21, + /* 570 */ 21, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 580 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 590 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 600 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 610 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 620 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 630 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 640 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 650 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 660 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 670 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 680 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 690 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 700 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 710 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 720 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 730 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 740 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 760 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 770 */ 273, 273, 273, 273, 273, 273, 273, 273, }; -#define YY_SHIFT_COUNT (251) +#define YY_SHIFT_COUNT (252) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (552) +#define YY_SHIFT_MAX (557) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 145, 74, 164, 185, 205, 6, 6, 6, 6, 6, - /* 10 */ 6, 0, 22, 205, 315, 315, 315, 93, 6, 6, - /* 20 */ 6, 6, 6, 56, 60, 60, 566, 195, 205, 205, - /* 30 */ 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, - /* 40 */ 205, 205, 205, 205, 205, 315, 315, 52, 52, 52, - /* 50 */ 52, 52, 52, 11, 52, 125, 6, 6, 6, 6, - /* 60 */ 211, 211, 168, 6, 6, 6, 6, 6, 6, 6, + /* 0 */ 149, 52, 171, 189, 209, 6, 6, 6, 6, 6, + /* 10 */ 6, 0, 22, 209, 320, 320, 320, 19, 6, 6, + /* 20 */ 6, 6, 6, 57, 11, 11, 571, 199, 209, 209, + /* 30 */ 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, + /* 40 */ 209, 209, 209, 209, 209, 320, 320, 80, 80, 80, + /* 50 */ 80, 80, 80, 83, 80, 177, 6, 6, 6, 6, + /* 60 */ 145, 145, 108, 6, 6, 6, 6, 6, 6, 6, /* 70 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, /* 80 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, /* 90 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - /* 100 */ 6, 6, 290, 346, 346, 299, 299, 299, 346, 307, - /* 110 */ 306, 314, 321, 322, 328, 330, 332, 320, 290, 346, - /* 120 */ 346, 369, 369, 346, 365, 367, 402, 372, 371, 401, - /* 130 */ 374, 377, 346, 381, 346, 381, 346, 566, 566, 27, - /* 140 */ 67, 67, 67, 94, 119, 208, 208, 208, 206, 187, - /* 150 */ 187, 187, 187, 217, 229, 24, 15, 86, 86, 198, - /* 160 */ 138, 110, 222, 231, 232, 233, 275, 311, 291, 241, - /* 170 */ 286, 220, 230, 238, 244, 252, 216, 224, 301, 333, - /* 180 */ 283, 297, 454, 326, 457, 458, 334, 463, 466, 387, - /* 190 */ 349, 382, 388, 384, 395, 400, 403, 499, 404, 405, - /* 200 */ 407, 406, 389, 408, 392, 409, 412, 410, 414, 413, - /* 210 */ 417, 416, 434, 508, 513, 514, 515, 516, 517, 446, - /* 220 */ 509, 452, 510, 398, 399, 426, 525, 526, 433, 435, - /* 230 */ 426, 529, 530, 531, 532, 533, 534, 535, 536, 537, - /* 240 */ 538, 539, 540, 541, 542, 445, 472, 543, 544, 491, - /* 250 */ 493, 552, + /* 100 */ 6, 6, 294, 343, 343, 300, 300, 300, 343, 307, + /* 110 */ 292, 321, 330, 327, 335, 337, 339, 329, 294, 343, + /* 120 */ 343, 375, 375, 343, 368, 371, 406, 376, 377, 404, + /* 130 */ 379, 382, 343, 386, 343, 386, 343, 571, 571, 27, + /* 140 */ 67, 67, 67, 95, 150, 212, 212, 212, 211, 191, + /* 150 */ 191, 191, 191, 243, 270, 41, 68, 215, 215, 237, + /* 160 */ 190, 203, 213, 216, 226, 240, 328, 336, 305, 256, + /* 170 */ 242, 241, 244, 246, 252, 257, 229, 233, 344, 362, + /* 180 */ 297, 311, 453, 331, 459, 461, 332, 464, 468, 388, + /* 190 */ 348, 369, 378, 389, 394, 403, 405, 504, 407, 408, + /* 200 */ 410, 409, 392, 411, 395, 412, 415, 413, 417, 416, + /* 210 */ 420, 419, 438, 512, 517, 518, 519, 520, 521, 450, + /* 220 */ 513, 456, 514, 401, 402, 429, 529, 530, 436, 439, + /* 230 */ 429, 532, 534, 535, 536, 537, 538, 539, 540, 541, + /* 240 */ 542, 543, 544, 545, 546, 547, 449, 477, 548, 549, + /* 250 */ 496, 498, 557, }; #define YY_REDUCE_COUNT (138) -#define YY_REDUCE_MIN (-258) -#define YY_REDUCE_MAX (277) +#define YY_REDUCE_MIN (-260) +#define YY_REDUCE_MAX (282) static const short yy_reduce_ofst[] = { - /* 0 */ -202, -54, -226, -224, -211, -73, -11, -147, 21, 44, - /* 10 */ 62, -111, -191, -238, -130, -19, 42, -155, -22, -93, - /* 20 */ -9, 55, -103, 85, 96, 98, -197, -258, -231, -230, - /* 30 */ -225, -57, -12, 51, 100, 101, 102, 103, 104, 105, - /* 40 */ 106, 107, 108, 109, 111, -116, 124, 113, 131, 132, - /* 50 */ 133, 134, 135, 136, 139, 137, 167, 169, 172, 173, - /* 60 */ 79, 117, 140, 175, 176, 177, 179, 181, 182, 183, - /* 70 */ 184, 186, 188, 189, 190, 191, 192, 193, 194, 196, - /* 80 */ 197, 199, 200, 201, 202, 203, 204, 207, 210, 212, - /* 90 */ 213, 214, 215, 218, 219, 221, 223, 225, 226, 227, - /* 100 */ 228, 234, 153, 235, 236, 152, 155, 158, 237, 159, - /* 110 */ 174, 209, 239, 242, 240, 243, 246, 248, 245, 247, - /* 120 */ 249, 250, 251, 256, 253, 255, 254, 257, 258, 261, - /* 130 */ 262, 264, 259, 260, 263, 272, 274, 270, 277, + /* 0 */ -204, -76, 54, -260, -226, -211, -87, -149, -148, 49, + /* 10 */ 76, -205, -192, -240, -218, -199, -138, -129, -59, -4, + /* 20 */ 2, -43, -161, 16, 26, 53, -69, -259, -254, -227, + /* 30 */ -160, -154, -136, -126, -65, -35, -27, -15, 12, 15, + /* 40 */ 56, 66, 74, 98, 104, 120, 126, 132, 133, 134, + /* 50 */ 135, 136, 137, 138, 140, 139, 170, 173, 175, 176, + /* 60 */ 119, 121, 141, 178, 181, 182, 183, 184, 185, 186, + /* 70 */ 187, 188, 192, 193, 194, 195, 196, 197, 198, 200, + /* 80 */ 201, 202, 204, 205, 206, 207, 208, 210, 217, 218, + /* 90 */ 219, 220, 221, 222, 223, 224, 225, 227, 228, 230, + /* 100 */ 231, 232, 155, 214, 234, 157, 158, 159, 235, 161, + /* 110 */ 179, 236, 238, 245, 247, 239, 250, 248, 249, 251, + /* 120 */ 253, 254, 255, 260, 258, 261, 259, 262, 264, 267, + /* 130 */ 263, 271, 265, 266, 268, 276, 269, 274, 282, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 646, 700, 689, 868, 868, 646, 646, 646, 646, 646, - /* 10 */ 646, 793, 664, 868, 646, 646, 646, 646, 646, 646, - /* 20 */ 646, 646, 646, 702, 702, 702, 788, 646, 646, 646, - /* 30 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, - /* 40 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, - /* 50 */ 646, 646, 646, 646, 646, 646, 646, 795, 797, 646, - /* 60 */ 815, 815, 786, 646, 646, 646, 646, 646, 646, 646, - /* 70 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, - /* 80 */ 646, 687, 646, 685, 646, 646, 646, 646, 646, 646, - /* 90 */ 646, 646, 646, 646, 646, 646, 674, 646, 646, 646, - /* 100 */ 646, 646, 646, 666, 666, 646, 646, 646, 666, 822, - /* 110 */ 826, 820, 808, 816, 807, 803, 802, 830, 646, 666, - /* 120 */ 666, 697, 697, 666, 718, 716, 714, 706, 712, 708, - /* 130 */ 710, 704, 666, 695, 666, 695, 666, 734, 748, 646, - /* 140 */ 831, 867, 821, 857, 856, 863, 855, 854, 646, 850, - /* 150 */ 851, 853, 852, 646, 646, 646, 646, 859, 858, 646, - /* 160 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 833, - /* 170 */ 646, 827, 823, 646, 646, 646, 646, 646, 646, 646, - /* 180 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, - /* 190 */ 646, 785, 646, 646, 794, 646, 646, 646, 646, 646, - /* 200 */ 646, 817, 646, 809, 646, 646, 646, 646, 646, 646, - /* 210 */ 646, 762, 646, 646, 646, 646, 646, 646, 646, 646, - /* 220 */ 646, 646, 646, 646, 646, 872, 646, 646, 646, 756, - /* 230 */ 870, 646, 646, 646, 646, 646, 646, 646, 646, 646, - /* 240 */ 646, 646, 646, 646, 646, 721, 646, 672, 670, 646, - /* 250 */ 662, 646, + /* 0 */ 653, 707, 696, 878, 878, 653, 653, 653, 653, 653, + /* 10 */ 653, 803, 671, 878, 653, 653, 653, 653, 653, 653, + /* 20 */ 653, 653, 653, 709, 709, 709, 798, 653, 653, 653, + /* 30 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, + /* 40 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, + /* 50 */ 653, 653, 653, 653, 653, 653, 653, 805, 807, 653, + /* 60 */ 825, 825, 796, 653, 653, 653, 653, 653, 653, 653, + /* 70 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, + /* 80 */ 653, 694, 653, 692, 653, 653, 653, 653, 653, 653, + /* 90 */ 653, 653, 653, 653, 653, 653, 681, 653, 653, 653, + /* 100 */ 653, 653, 653, 673, 673, 653, 653, 653, 673, 832, + /* 110 */ 836, 830, 818, 826, 817, 813, 812, 840, 653, 673, + /* 120 */ 673, 704, 704, 673, 725, 723, 721, 713, 719, 715, + /* 130 */ 717, 711, 673, 702, 673, 702, 673, 742, 757, 653, + /* 140 */ 841, 877, 831, 867, 866, 873, 865, 864, 653, 860, + /* 150 */ 861, 863, 862, 653, 653, 653, 653, 869, 868, 653, + /* 160 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 843, + /* 170 */ 653, 837, 833, 653, 653, 653, 653, 653, 653, 653, + /* 180 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, + /* 190 */ 653, 795, 653, 653, 804, 653, 653, 653, 653, 653, + /* 200 */ 653, 827, 653, 819, 653, 653, 653, 653, 653, 653, + /* 210 */ 653, 772, 653, 653, 653, 653, 653, 653, 653, 653, + /* 220 */ 653, 653, 653, 653, 653, 882, 653, 653, 653, 766, + /* 230 */ 880, 653, 653, 653, 653, 653, 653, 653, 653, 653, + /* 240 */ 653, 653, 653, 653, 653, 653, 728, 653, 679, 677, + /* 250 */ 653, 669, 653, }; /********** End of lemon-generated parsing tables *****************************/ @@ -537,6 +537,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* FSYNC => nothing */ 0, /* COMP => nothing */ 0, /* PRECISION => nothing */ + 0, /* UPDATE => nothing */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* TAGS => nothing */ @@ -829,179 +830,181 @@ static const char *const yyTokenName[] = { /* 95 */ "FSYNC", /* 96 */ "COMP", /* 97 */ "PRECISION", - /* 98 */ "LP", - /* 99 */ "RP", - /* 100 */ "TAGS", - /* 101 */ "USING", - /* 102 */ "AS", - /* 103 */ "COMMA", - /* 104 */ "NULL", - /* 105 */ "SELECT", - /* 106 */ "UNION", - /* 107 */ "ALL", - /* 108 */ "FROM", - /* 109 */ "VARIABLE", - /* 110 */ "INTERVAL", - /* 111 */ "FILL", - /* 112 */ "SLIDING", - /* 113 */ "ORDER", - /* 114 */ "BY", - /* 115 */ "ASC", - /* 116 */ "DESC", - /* 117 */ "GROUP", - /* 118 */ "HAVING", - /* 119 */ "LIMIT", - /* 120 */ "OFFSET", - /* 121 */ "SLIMIT", - /* 122 */ "SOFFSET", - /* 123 */ "WHERE", - /* 124 */ "NOW", - /* 125 */ "RESET", - /* 126 */ "QUERY", - /* 127 */ "ADD", - /* 128 */ "COLUMN", - /* 129 */ "TAG", - /* 130 */ "CHANGE", - /* 131 */ "SET", - /* 132 */ "KILL", - /* 133 */ "CONNECTION", - /* 134 */ "STREAM", - /* 135 */ "COLON", - /* 136 */ "ABORT", - /* 137 */ "AFTER", - /* 138 */ "ATTACH", - /* 139 */ "BEFORE", - /* 140 */ "BEGIN", - /* 141 */ "CASCADE", - /* 142 */ "CLUSTER", - /* 143 */ "CONFLICT", - /* 144 */ "COPY", - /* 145 */ "DEFERRED", - /* 146 */ "DELIMITERS", - /* 147 */ "DETACH", - /* 148 */ "EACH", - /* 149 */ "END", - /* 150 */ "EXPLAIN", - /* 151 */ "FAIL", - /* 152 */ "FOR", - /* 153 */ "IGNORE", - /* 154 */ "IMMEDIATE", - /* 155 */ "INITIALLY", - /* 156 */ "INSTEAD", - /* 157 */ "MATCH", - /* 158 */ "KEY", - /* 159 */ "OF", - /* 160 */ "RAISE", - /* 161 */ "REPLACE", - /* 162 */ "RESTRICT", - /* 163 */ "ROW", - /* 164 */ "STATEMENT", - /* 165 */ "TRIGGER", - /* 166 */ "VIEW", - /* 167 */ "COUNT", - /* 168 */ "SUM", - /* 169 */ "AVG", - /* 170 */ "MIN", - /* 171 */ "MAX", - /* 172 */ "FIRST", - /* 173 */ "LAST", - /* 174 */ "TOP", - /* 175 */ "BOTTOM", - /* 176 */ "STDDEV", - /* 177 */ "PERCENTILE", - /* 178 */ "APERCENTILE", - /* 179 */ "LEASTSQUARES", - /* 180 */ "HISTOGRAM", - /* 181 */ "DIFF", - /* 182 */ "SPREAD", - /* 183 */ "TWA", - /* 184 */ "INTERP", - /* 185 */ "LAST_ROW", - /* 186 */ "RATE", - /* 187 */ "IRATE", - /* 188 */ "SUM_RATE", - /* 189 */ "SUM_IRATE", - /* 190 */ "AVG_RATE", - /* 191 */ "AVG_IRATE", - /* 192 */ "TBID", - /* 193 */ "SEMI", - /* 194 */ "NONE", - /* 195 */ "PREV", - /* 196 */ "LINEAR", - /* 197 */ "IMPORT", - /* 198 */ "METRIC", - /* 199 */ "TBNAME", - /* 200 */ "JOIN", - /* 201 */ "METRICS", - /* 202 */ "STABLE", - /* 203 */ "INSERT", - /* 204 */ "INTO", - /* 205 */ "VALUES", - /* 206 */ "error", - /* 207 */ "program", - /* 208 */ "cmd", - /* 209 */ "dbPrefix", - /* 210 */ "ids", - /* 211 */ "cpxName", - /* 212 */ "ifexists", - /* 213 */ "alter_db_optr", - /* 214 */ "acct_optr", - /* 215 */ "ifnotexists", - /* 216 */ "db_optr", - /* 217 */ "pps", - /* 218 */ "tseries", - /* 219 */ "dbs", - /* 220 */ "streams", - /* 221 */ "storage", - /* 222 */ "qtime", - /* 223 */ "users", - /* 224 */ "conns", - /* 225 */ "state", - /* 226 */ "keep", - /* 227 */ "tagitemlist", - /* 228 */ "cache", - /* 229 */ "replica", - /* 230 */ "quorum", - /* 231 */ "days", - /* 232 */ "minrows", - /* 233 */ "maxrows", - /* 234 */ "blocks", - /* 235 */ "ctime", - /* 236 */ "wal", - /* 237 */ "fsync", - /* 238 */ "comp", - /* 239 */ "prec", - /* 240 */ "typename", - /* 241 */ "signed", - /* 242 */ "create_table_args", - /* 243 */ "columnlist", - /* 244 */ "select", - /* 245 */ "column", - /* 246 */ "tagitem", - /* 247 */ "selcollist", - /* 248 */ "from", - /* 249 */ "where_opt", - /* 250 */ "interval_opt", - /* 251 */ "fill_opt", - /* 252 */ "sliding_opt", - /* 253 */ "groupby_opt", - /* 254 */ "orderby_opt", - /* 255 */ "having_opt", - /* 256 */ "slimit_opt", - /* 257 */ "limit_opt", - /* 258 */ "union", - /* 259 */ "sclp", - /* 260 */ "expr", - /* 261 */ "as", - /* 262 */ "tablelist", - /* 263 */ "tmvar", - /* 264 */ "sortlist", - /* 265 */ "sortitem", - /* 266 */ "item", - /* 267 */ "sortorder", - /* 268 */ "grouplist", - /* 269 */ "exprlist", - /* 270 */ "expritem", + /* 98 */ "UPDATE", + /* 99 */ "LP", + /* 100 */ "RP", + /* 101 */ "TAGS", + /* 102 */ "USING", + /* 103 */ "AS", + /* 104 */ "COMMA", + /* 105 */ "NULL", + /* 106 */ "SELECT", + /* 107 */ "UNION", + /* 108 */ "ALL", + /* 109 */ "FROM", + /* 110 */ "VARIABLE", + /* 111 */ "INTERVAL", + /* 112 */ "FILL", + /* 113 */ "SLIDING", + /* 114 */ "ORDER", + /* 115 */ "BY", + /* 116 */ "ASC", + /* 117 */ "DESC", + /* 118 */ "GROUP", + /* 119 */ "HAVING", + /* 120 */ "LIMIT", + /* 121 */ "OFFSET", + /* 122 */ "SLIMIT", + /* 123 */ "SOFFSET", + /* 124 */ "WHERE", + /* 125 */ "NOW", + /* 126 */ "RESET", + /* 127 */ "QUERY", + /* 128 */ "ADD", + /* 129 */ "COLUMN", + /* 130 */ "TAG", + /* 131 */ "CHANGE", + /* 132 */ "SET", + /* 133 */ "KILL", + /* 134 */ "CONNECTION", + /* 135 */ "STREAM", + /* 136 */ "COLON", + /* 137 */ "ABORT", + /* 138 */ "AFTER", + /* 139 */ "ATTACH", + /* 140 */ "BEFORE", + /* 141 */ "BEGIN", + /* 142 */ "CASCADE", + /* 143 */ "CLUSTER", + /* 144 */ "CONFLICT", + /* 145 */ "COPY", + /* 146 */ "DEFERRED", + /* 147 */ "DELIMITERS", + /* 148 */ "DETACH", + /* 149 */ "EACH", + /* 150 */ "END", + /* 151 */ "EXPLAIN", + /* 152 */ "FAIL", + /* 153 */ "FOR", + /* 154 */ "IGNORE", + /* 155 */ "IMMEDIATE", + /* 156 */ "INITIALLY", + /* 157 */ "INSTEAD", + /* 158 */ "MATCH", + /* 159 */ "KEY", + /* 160 */ "OF", + /* 161 */ "RAISE", + /* 162 */ "REPLACE", + /* 163 */ "RESTRICT", + /* 164 */ "ROW", + /* 165 */ "STATEMENT", + /* 166 */ "TRIGGER", + /* 167 */ "VIEW", + /* 168 */ "COUNT", + /* 169 */ "SUM", + /* 170 */ "AVG", + /* 171 */ "MIN", + /* 172 */ "MAX", + /* 173 */ "FIRST", + /* 174 */ "LAST", + /* 175 */ "TOP", + /* 176 */ "BOTTOM", + /* 177 */ "STDDEV", + /* 178 */ "PERCENTILE", + /* 179 */ "APERCENTILE", + /* 180 */ "LEASTSQUARES", + /* 181 */ "HISTOGRAM", + /* 182 */ "DIFF", + /* 183 */ "SPREAD", + /* 184 */ "TWA", + /* 185 */ "INTERP", + /* 186 */ "LAST_ROW", + /* 187 */ "RATE", + /* 188 */ "IRATE", + /* 189 */ "SUM_RATE", + /* 190 */ "SUM_IRATE", + /* 191 */ "AVG_RATE", + /* 192 */ "AVG_IRATE", + /* 193 */ "TBID", + /* 194 */ "SEMI", + /* 195 */ "NONE", + /* 196 */ "PREV", + /* 197 */ "LINEAR", + /* 198 */ "IMPORT", + /* 199 */ "METRIC", + /* 200 */ "TBNAME", + /* 201 */ "JOIN", + /* 202 */ "METRICS", + /* 203 */ "STABLE", + /* 204 */ "INSERT", + /* 205 */ "INTO", + /* 206 */ "VALUES", + /* 207 */ "error", + /* 208 */ "program", + /* 209 */ "cmd", + /* 210 */ "dbPrefix", + /* 211 */ "ids", + /* 212 */ "cpxName", + /* 213 */ "ifexists", + /* 214 */ "alter_db_optr", + /* 215 */ "acct_optr", + /* 216 */ "ifnotexists", + /* 217 */ "db_optr", + /* 218 */ "pps", + /* 219 */ "tseries", + /* 220 */ "dbs", + /* 221 */ "streams", + /* 222 */ "storage", + /* 223 */ "qtime", + /* 224 */ "users", + /* 225 */ "conns", + /* 226 */ "state", + /* 227 */ "keep", + /* 228 */ "tagitemlist", + /* 229 */ "cache", + /* 230 */ "replica", + /* 231 */ "quorum", + /* 232 */ "days", + /* 233 */ "minrows", + /* 234 */ "maxrows", + /* 235 */ "blocks", + /* 236 */ "ctime", + /* 237 */ "wal", + /* 238 */ "fsync", + /* 239 */ "comp", + /* 240 */ "prec", + /* 241 */ "update", + /* 242 */ "typename", + /* 243 */ "signed", + /* 244 */ "create_table_args", + /* 245 */ "columnlist", + /* 246 */ "select", + /* 247 */ "column", + /* 248 */ "tagitem", + /* 249 */ "selcollist", + /* 250 */ "from", + /* 251 */ "where_opt", + /* 252 */ "interval_opt", + /* 253 */ "fill_opt", + /* 254 */ "sliding_opt", + /* 255 */ "groupby_opt", + /* 256 */ "orderby_opt", + /* 257 */ "having_opt", + /* 258 */ "slimit_opt", + /* 259 */ "limit_opt", + /* 260 */ "union", + /* 261 */ "sclp", + /* 262 */ "expr", + /* 263 */ "as", + /* 264 */ "tablelist", + /* 265 */ "tmvar", + /* 266 */ "sortlist", + /* 267 */ "sortitem", + /* 268 */ "item", + /* 269 */ "sortorder", + /* 270 */ "grouplist", + /* 271 */ "exprlist", + /* 272 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1094,151 +1097,154 @@ static const char *const yyRuleName[] = { /* 82 */ "fsync ::= FSYNC INTEGER", /* 83 */ "comp ::= COMP INTEGER", /* 84 */ "prec ::= PRECISION STRING", - /* 85 */ "db_optr ::=", - /* 86 */ "db_optr ::= db_optr cache", - /* 87 */ "db_optr ::= db_optr replica", - /* 88 */ "db_optr ::= db_optr quorum", - /* 89 */ "db_optr ::= db_optr days", - /* 90 */ "db_optr ::= db_optr minrows", - /* 91 */ "db_optr ::= db_optr maxrows", - /* 92 */ "db_optr ::= db_optr blocks", - /* 93 */ "db_optr ::= db_optr ctime", - /* 94 */ "db_optr ::= db_optr wal", - /* 95 */ "db_optr ::= db_optr fsync", - /* 96 */ "db_optr ::= db_optr comp", - /* 97 */ "db_optr ::= db_optr prec", - /* 98 */ "db_optr ::= db_optr keep", - /* 99 */ "alter_db_optr ::=", - /* 100 */ "alter_db_optr ::= alter_db_optr replica", - /* 101 */ "alter_db_optr ::= alter_db_optr quorum", - /* 102 */ "alter_db_optr ::= alter_db_optr keep", - /* 103 */ "alter_db_optr ::= alter_db_optr blocks", - /* 104 */ "alter_db_optr ::= alter_db_optr comp", - /* 105 */ "alter_db_optr ::= alter_db_optr wal", - /* 106 */ "alter_db_optr ::= alter_db_optr fsync", - /* 107 */ "typename ::= ids", - /* 108 */ "typename ::= ids LP signed RP", - /* 109 */ "signed ::= INTEGER", - /* 110 */ "signed ::= PLUS INTEGER", - /* 111 */ "signed ::= MINUS INTEGER", - /* 112 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", - /* 113 */ "create_table_args ::= LP columnlist RP", - /* 114 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", - /* 115 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", - /* 116 */ "create_table_args ::= AS select", - /* 117 */ "columnlist ::= columnlist COMMA column", - /* 118 */ "columnlist ::= column", - /* 119 */ "column ::= ids typename", - /* 120 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 121 */ "tagitemlist ::= tagitem", - /* 122 */ "tagitem ::= INTEGER", - /* 123 */ "tagitem ::= FLOAT", - /* 124 */ "tagitem ::= STRING", - /* 125 */ "tagitem ::= BOOL", - /* 126 */ "tagitem ::= NULL", - /* 127 */ "tagitem ::= MINUS INTEGER", - /* 128 */ "tagitem ::= MINUS FLOAT", - /* 129 */ "tagitem ::= PLUS INTEGER", - /* 130 */ "tagitem ::= PLUS FLOAT", - /* 131 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 132 */ "union ::= select", - /* 133 */ "union ::= LP union RP", - /* 134 */ "union ::= union UNION ALL select", - /* 135 */ "union ::= union UNION ALL LP select RP", - /* 136 */ "cmd ::= union", - /* 137 */ "select ::= SELECT selcollist", - /* 138 */ "sclp ::= selcollist COMMA", - /* 139 */ "sclp ::=", - /* 140 */ "selcollist ::= sclp expr as", - /* 141 */ "selcollist ::= sclp STAR", - /* 142 */ "as ::= AS ids", - /* 143 */ "as ::= ids", - /* 144 */ "as ::=", - /* 145 */ "from ::= FROM tablelist", - /* 146 */ "tablelist ::= ids cpxName", - /* 147 */ "tablelist ::= ids cpxName ids", - /* 148 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 149 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 150 */ "tmvar ::= VARIABLE", - /* 151 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 152 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 153 */ "interval_opt ::=", - /* 154 */ "fill_opt ::=", - /* 155 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 156 */ "fill_opt ::= FILL LP ID RP", - /* 157 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 158 */ "sliding_opt ::=", - /* 159 */ "orderby_opt ::=", - /* 160 */ "orderby_opt ::= ORDER BY sortlist", - /* 161 */ "sortlist ::= sortlist COMMA item sortorder", - /* 162 */ "sortlist ::= item sortorder", - /* 163 */ "item ::= ids cpxName", - /* 164 */ "sortorder ::= ASC", - /* 165 */ "sortorder ::= DESC", - /* 166 */ "sortorder ::=", - /* 167 */ "groupby_opt ::=", - /* 168 */ "groupby_opt ::= GROUP BY grouplist", - /* 169 */ "grouplist ::= grouplist COMMA item", - /* 170 */ "grouplist ::= item", - /* 171 */ "having_opt ::=", - /* 172 */ "having_opt ::= HAVING expr", - /* 173 */ "limit_opt ::=", - /* 174 */ "limit_opt ::= LIMIT signed", - /* 175 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 176 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 177 */ "slimit_opt ::=", - /* 178 */ "slimit_opt ::= SLIMIT signed", - /* 179 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 180 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 181 */ "where_opt ::=", - /* 182 */ "where_opt ::= WHERE expr", - /* 183 */ "expr ::= LP expr RP", - /* 184 */ "expr ::= ID", - /* 185 */ "expr ::= ID DOT ID", - /* 186 */ "expr ::= ID DOT STAR", - /* 187 */ "expr ::= INTEGER", - /* 188 */ "expr ::= MINUS INTEGER", - /* 189 */ "expr ::= PLUS INTEGER", - /* 190 */ "expr ::= FLOAT", - /* 191 */ "expr ::= MINUS FLOAT", - /* 192 */ "expr ::= PLUS FLOAT", - /* 193 */ "expr ::= STRING", - /* 194 */ "expr ::= NOW", - /* 195 */ "expr ::= VARIABLE", - /* 196 */ "expr ::= BOOL", - /* 197 */ "expr ::= ID LP exprlist RP", - /* 198 */ "expr ::= ID LP STAR RP", - /* 199 */ "expr ::= expr IS NULL", - /* 200 */ "expr ::= expr IS NOT NULL", - /* 201 */ "expr ::= expr LT expr", - /* 202 */ "expr ::= expr GT expr", - /* 203 */ "expr ::= expr LE expr", - /* 204 */ "expr ::= expr GE expr", - /* 205 */ "expr ::= expr NE expr", - /* 206 */ "expr ::= expr EQ expr", - /* 207 */ "expr ::= expr AND expr", - /* 208 */ "expr ::= expr OR expr", - /* 209 */ "expr ::= expr PLUS expr", - /* 210 */ "expr ::= expr MINUS expr", - /* 211 */ "expr ::= expr STAR expr", - /* 212 */ "expr ::= expr SLASH expr", - /* 213 */ "expr ::= expr REM expr", - /* 214 */ "expr ::= expr LIKE expr", - /* 215 */ "expr ::= expr IN LP exprlist RP", - /* 216 */ "exprlist ::= exprlist COMMA expritem", - /* 217 */ "exprlist ::= expritem", - /* 218 */ "expritem ::= expr", - /* 219 */ "expritem ::=", - /* 220 */ "cmd ::= RESET QUERY CACHE", - /* 221 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 222 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 223 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 224 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 225 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 226 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 227 */ "cmd ::= KILL CONNECTION INTEGER", - /* 228 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 229 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 85 */ "update ::= UPDATE INTEGER", + /* 86 */ "db_optr ::=", + /* 87 */ "db_optr ::= db_optr cache", + /* 88 */ "db_optr ::= db_optr replica", + /* 89 */ "db_optr ::= db_optr quorum", + /* 90 */ "db_optr ::= db_optr days", + /* 91 */ "db_optr ::= db_optr minrows", + /* 92 */ "db_optr ::= db_optr maxrows", + /* 93 */ "db_optr ::= db_optr blocks", + /* 94 */ "db_optr ::= db_optr ctime", + /* 95 */ "db_optr ::= db_optr wal", + /* 96 */ "db_optr ::= db_optr fsync", + /* 97 */ "db_optr ::= db_optr comp", + /* 98 */ "db_optr ::= db_optr prec", + /* 99 */ "db_optr ::= db_optr keep", + /* 100 */ "db_optr ::= db_optr update", + /* 101 */ "alter_db_optr ::=", + /* 102 */ "alter_db_optr ::= alter_db_optr replica", + /* 103 */ "alter_db_optr ::= alter_db_optr quorum", + /* 104 */ "alter_db_optr ::= alter_db_optr keep", + /* 105 */ "alter_db_optr ::= alter_db_optr blocks", + /* 106 */ "alter_db_optr ::= alter_db_optr comp", + /* 107 */ "alter_db_optr ::= alter_db_optr wal", + /* 108 */ "alter_db_optr ::= alter_db_optr fsync", + /* 109 */ "alter_db_optr ::= alter_db_optr update", + /* 110 */ "typename ::= ids", + /* 111 */ "typename ::= ids LP signed RP", + /* 112 */ "signed ::= INTEGER", + /* 113 */ "signed ::= PLUS INTEGER", + /* 114 */ "signed ::= MINUS INTEGER", + /* 115 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", + /* 116 */ "create_table_args ::= LP columnlist RP", + /* 117 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", + /* 118 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", + /* 119 */ "create_table_args ::= AS select", + /* 120 */ "columnlist ::= columnlist COMMA column", + /* 121 */ "columnlist ::= column", + /* 122 */ "column ::= ids typename", + /* 123 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 124 */ "tagitemlist ::= tagitem", + /* 125 */ "tagitem ::= INTEGER", + /* 126 */ "tagitem ::= FLOAT", + /* 127 */ "tagitem ::= STRING", + /* 128 */ "tagitem ::= BOOL", + /* 129 */ "tagitem ::= NULL", + /* 130 */ "tagitem ::= MINUS INTEGER", + /* 131 */ "tagitem ::= MINUS FLOAT", + /* 132 */ "tagitem ::= PLUS INTEGER", + /* 133 */ "tagitem ::= PLUS FLOAT", + /* 134 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 135 */ "union ::= select", + /* 136 */ "union ::= LP union RP", + /* 137 */ "union ::= union UNION ALL select", + /* 138 */ "union ::= union UNION ALL LP select RP", + /* 139 */ "cmd ::= union", + /* 140 */ "select ::= SELECT selcollist", + /* 141 */ "sclp ::= selcollist COMMA", + /* 142 */ "sclp ::=", + /* 143 */ "selcollist ::= sclp expr as", + /* 144 */ "selcollist ::= sclp STAR", + /* 145 */ "as ::= AS ids", + /* 146 */ "as ::= ids", + /* 147 */ "as ::=", + /* 148 */ "from ::= FROM tablelist", + /* 149 */ "tablelist ::= ids cpxName", + /* 150 */ "tablelist ::= ids cpxName ids", + /* 151 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 152 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 153 */ "tmvar ::= VARIABLE", + /* 154 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 155 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 156 */ "interval_opt ::=", + /* 157 */ "fill_opt ::=", + /* 158 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 159 */ "fill_opt ::= FILL LP ID RP", + /* 160 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 161 */ "sliding_opt ::=", + /* 162 */ "orderby_opt ::=", + /* 163 */ "orderby_opt ::= ORDER BY sortlist", + /* 164 */ "sortlist ::= sortlist COMMA item sortorder", + /* 165 */ "sortlist ::= item sortorder", + /* 166 */ "item ::= ids cpxName", + /* 167 */ "sortorder ::= ASC", + /* 168 */ "sortorder ::= DESC", + /* 169 */ "sortorder ::=", + /* 170 */ "groupby_opt ::=", + /* 171 */ "groupby_opt ::= GROUP BY grouplist", + /* 172 */ "grouplist ::= grouplist COMMA item", + /* 173 */ "grouplist ::= item", + /* 174 */ "having_opt ::=", + /* 175 */ "having_opt ::= HAVING expr", + /* 176 */ "limit_opt ::=", + /* 177 */ "limit_opt ::= LIMIT signed", + /* 178 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 179 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 180 */ "slimit_opt ::=", + /* 181 */ "slimit_opt ::= SLIMIT signed", + /* 182 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 183 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 184 */ "where_opt ::=", + /* 185 */ "where_opt ::= WHERE expr", + /* 186 */ "expr ::= LP expr RP", + /* 187 */ "expr ::= ID", + /* 188 */ "expr ::= ID DOT ID", + /* 189 */ "expr ::= ID DOT STAR", + /* 190 */ "expr ::= INTEGER", + /* 191 */ "expr ::= MINUS INTEGER", + /* 192 */ "expr ::= PLUS INTEGER", + /* 193 */ "expr ::= FLOAT", + /* 194 */ "expr ::= MINUS FLOAT", + /* 195 */ "expr ::= PLUS FLOAT", + /* 196 */ "expr ::= STRING", + /* 197 */ "expr ::= NOW", + /* 198 */ "expr ::= VARIABLE", + /* 199 */ "expr ::= BOOL", + /* 200 */ "expr ::= ID LP exprlist RP", + /* 201 */ "expr ::= ID LP STAR RP", + /* 202 */ "expr ::= expr IS NULL", + /* 203 */ "expr ::= expr IS NOT NULL", + /* 204 */ "expr ::= expr LT expr", + /* 205 */ "expr ::= expr GT expr", + /* 206 */ "expr ::= expr LE expr", + /* 207 */ "expr ::= expr GE expr", + /* 208 */ "expr ::= expr NE expr", + /* 209 */ "expr ::= expr EQ expr", + /* 210 */ "expr ::= expr AND expr", + /* 211 */ "expr ::= expr OR expr", + /* 212 */ "expr ::= expr PLUS expr", + /* 213 */ "expr ::= expr MINUS expr", + /* 214 */ "expr ::= expr STAR expr", + /* 215 */ "expr ::= expr SLASH expr", + /* 216 */ "expr ::= expr REM expr", + /* 217 */ "expr ::= expr LIKE expr", + /* 218 */ "expr ::= expr IN LP exprlist RP", + /* 219 */ "exprlist ::= exprlist COMMA expritem", + /* 220 */ "exprlist ::= expritem", + /* 221 */ "expritem ::= expr", + /* 222 */ "expritem ::=", + /* 223 */ "cmd ::= RESET QUERY CACHE", + /* 224 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 225 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 226 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 227 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 228 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 229 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 230 */ "cmd ::= KILL CONNECTION INTEGER", + /* 231 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 232 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1359,50 +1365,46 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 226: /* keep */ - case 227: /* tagitemlist */ - case 251: /* fill_opt */ - case 253: /* groupby_opt */ - case 254: /* orderby_opt */ - case 264: /* sortlist */ - case 268: /* grouplist */ + case 227: /* keep */ + case 228: /* tagitemlist */ + case 245: /* columnlist */ + case 253: /* fill_opt */ + case 255: /* groupby_opt */ + case 256: /* orderby_opt */ + case 266: /* sortlist */ + case 270: /* grouplist */ { -tVariantListDestroy((yypminor->yy494)); +taosArrayDestroy((yypminor->yy165)); } break; - case 243: /* columnlist */ + case 246: /* select */ { -tFieldListDestroy((yypminor->yy449)); +doDestroyQuerySql((yypminor->yy414)); } break; - case 244: /* select */ + case 249: /* selcollist */ + case 261: /* sclp */ + case 271: /* exprlist */ { -doDestroyQuerySql((yypminor->yy150)); +tSQLExprListDestroy((yypminor->yy290)); } break; - case 247: /* selcollist */ - case 259: /* sclp */ - case 269: /* exprlist */ + case 251: /* where_opt */ + case 257: /* having_opt */ + case 262: /* expr */ + case 272: /* expritem */ { -tSQLExprListDestroy((yypminor->yy224)); +tSQLExprDestroy((yypminor->yy64)); } break; - case 249: /* where_opt */ - case 255: /* having_opt */ - case 260: /* expr */ - case 270: /* expritem */ + case 260: /* union */ { -tSQLExprDestroy((yypminor->yy66)); +destroyAllSelectClause((yypminor->yy231)); } break; - case 258: /* union */ + case 267: /* sortitem */ { -destroyAllSelectClause((yypminor->yy25)); -} - break; - case 265: /* sortitem */ -{ -tVariantDestroy(&(yypminor->yy312)); +tVariantDestroy(&(yypminor->yy134)); } break; /********* End destructor definitions *****************************************/ @@ -1696,236 +1698,239 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 207, -1 }, /* (0) program ::= cmd */ - { 208, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 208, -2 }, /* (2) cmd ::= SHOW MNODES */ - { 208, -2 }, /* (3) cmd ::= SHOW DNODES */ - { 208, -2 }, /* (4) cmd ::= SHOW ACCOUNTS */ - { 208, -2 }, /* (5) cmd ::= SHOW USERS */ - { 208, -2 }, /* (6) cmd ::= SHOW MODULES */ - { 208, -2 }, /* (7) cmd ::= SHOW QUERIES */ - { 208, -2 }, /* (8) cmd ::= SHOW CONNECTIONS */ - { 208, -2 }, /* (9) cmd ::= SHOW STREAMS */ - { 208, -2 }, /* (10) cmd ::= SHOW VARIABLES */ - { 208, -2 }, /* (11) cmd ::= SHOW SCORES */ - { 208, -2 }, /* (12) cmd ::= SHOW GRANTS */ - { 208, -2 }, /* (13) cmd ::= SHOW VNODES */ - { 208, -3 }, /* (14) cmd ::= SHOW VNODES IPTOKEN */ - { 209, 0 }, /* (15) dbPrefix ::= */ - { 209, -2 }, /* (16) dbPrefix ::= ids DOT */ - { 211, 0 }, /* (17) cpxName ::= */ - { 211, -2 }, /* (18) cpxName ::= DOT ids */ - { 208, -5 }, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 208, -4 }, /* (20) cmd ::= SHOW CREATE DATABASE ids */ - { 208, -3 }, /* (21) cmd ::= SHOW dbPrefix TABLES */ - { 208, -5 }, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 208, -3 }, /* (23) cmd ::= SHOW dbPrefix STABLES */ - { 208, -5 }, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 208, -3 }, /* (25) cmd ::= SHOW dbPrefix VGROUPS */ - { 208, -4 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 208, -5 }, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */ - { 208, -4 }, /* (28) cmd ::= DROP DATABASE ifexists ids */ - { 208, -3 }, /* (29) cmd ::= DROP DNODE ids */ - { 208, -3 }, /* (30) cmd ::= DROP USER ids */ - { 208, -3 }, /* (31) cmd ::= DROP ACCOUNT ids */ - { 208, -2 }, /* (32) cmd ::= USE ids */ - { 208, -3 }, /* (33) cmd ::= DESCRIBE ids cpxName */ - { 208, -5 }, /* (34) cmd ::= ALTER USER ids PASS ids */ - { 208, -5 }, /* (35) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 208, -4 }, /* (36) cmd ::= ALTER DNODE ids ids */ - { 208, -5 }, /* (37) cmd ::= ALTER DNODE ids ids ids */ - { 208, -3 }, /* (38) cmd ::= ALTER LOCAL ids */ - { 208, -4 }, /* (39) cmd ::= ALTER LOCAL ids ids */ - { 208, -4 }, /* (40) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 208, -4 }, /* (41) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 208, -6 }, /* (42) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 210, -1 }, /* (43) ids ::= ID */ - { 210, -1 }, /* (44) ids ::= STRING */ - { 212, -2 }, /* (45) ifexists ::= IF EXISTS */ - { 212, 0 }, /* (46) ifexists ::= */ - { 215, -3 }, /* (47) ifnotexists ::= IF NOT EXISTS */ - { 215, 0 }, /* (48) ifnotexists ::= */ - { 208, -3 }, /* (49) cmd ::= CREATE DNODE ids */ - { 208, -6 }, /* (50) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 208, -5 }, /* (51) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 208, -5 }, /* (52) cmd ::= CREATE USER ids PASS ids */ - { 217, 0 }, /* (53) pps ::= */ - { 217, -2 }, /* (54) pps ::= PPS INTEGER */ - { 218, 0 }, /* (55) tseries ::= */ - { 218, -2 }, /* (56) tseries ::= TSERIES INTEGER */ - { 219, 0 }, /* (57) dbs ::= */ - { 219, -2 }, /* (58) dbs ::= DBS INTEGER */ - { 220, 0 }, /* (59) streams ::= */ - { 220, -2 }, /* (60) streams ::= STREAMS INTEGER */ - { 221, 0 }, /* (61) storage ::= */ - { 221, -2 }, /* (62) storage ::= STORAGE INTEGER */ - { 222, 0 }, /* (63) qtime ::= */ - { 222, -2 }, /* (64) qtime ::= QTIME INTEGER */ - { 223, 0 }, /* (65) users ::= */ - { 223, -2 }, /* (66) users ::= USERS INTEGER */ - { 224, 0 }, /* (67) conns ::= */ - { 224, -2 }, /* (68) conns ::= CONNS INTEGER */ - { 225, 0 }, /* (69) state ::= */ - { 225, -2 }, /* (70) state ::= STATE ids */ - { 214, -9 }, /* (71) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 226, -2 }, /* (72) keep ::= KEEP tagitemlist */ - { 228, -2 }, /* (73) cache ::= CACHE INTEGER */ - { 229, -2 }, /* (74) replica ::= REPLICA INTEGER */ - { 230, -2 }, /* (75) quorum ::= QUORUM INTEGER */ - { 231, -2 }, /* (76) days ::= DAYS INTEGER */ - { 232, -2 }, /* (77) minrows ::= MINROWS INTEGER */ - { 233, -2 }, /* (78) maxrows ::= MAXROWS INTEGER */ - { 234, -2 }, /* (79) blocks ::= BLOCKS INTEGER */ - { 235, -2 }, /* (80) ctime ::= CTIME INTEGER */ - { 236, -2 }, /* (81) wal ::= WAL INTEGER */ - { 237, -2 }, /* (82) fsync ::= FSYNC INTEGER */ - { 238, -2 }, /* (83) comp ::= COMP INTEGER */ - { 239, -2 }, /* (84) prec ::= PRECISION STRING */ - { 216, 0 }, /* (85) db_optr ::= */ - { 216, -2 }, /* (86) db_optr ::= db_optr cache */ - { 216, -2 }, /* (87) db_optr ::= db_optr replica */ - { 216, -2 }, /* (88) db_optr ::= db_optr quorum */ - { 216, -2 }, /* (89) db_optr ::= db_optr days */ - { 216, -2 }, /* (90) db_optr ::= db_optr minrows */ - { 216, -2 }, /* (91) db_optr ::= db_optr maxrows */ - { 216, -2 }, /* (92) db_optr ::= db_optr blocks */ - { 216, -2 }, /* (93) db_optr ::= db_optr ctime */ - { 216, -2 }, /* (94) db_optr ::= db_optr wal */ - { 216, -2 }, /* (95) db_optr ::= db_optr fsync */ - { 216, -2 }, /* (96) db_optr ::= db_optr comp */ - { 216, -2 }, /* (97) db_optr ::= db_optr prec */ - { 216, -2 }, /* (98) db_optr ::= db_optr keep */ - { 213, 0 }, /* (99) alter_db_optr ::= */ - { 213, -2 }, /* (100) alter_db_optr ::= alter_db_optr replica */ - { 213, -2 }, /* (101) alter_db_optr ::= alter_db_optr quorum */ - { 213, -2 }, /* (102) alter_db_optr ::= alter_db_optr keep */ - { 213, -2 }, /* (103) alter_db_optr ::= alter_db_optr blocks */ - { 213, -2 }, /* (104) alter_db_optr ::= alter_db_optr comp */ - { 213, -2 }, /* (105) alter_db_optr ::= alter_db_optr wal */ - { 213, -2 }, /* (106) alter_db_optr ::= alter_db_optr fsync */ - { 240, -1 }, /* (107) typename ::= ids */ - { 240, -4 }, /* (108) typename ::= ids LP signed RP */ - { 241, -1 }, /* (109) signed ::= INTEGER */ - { 241, -2 }, /* (110) signed ::= PLUS INTEGER */ - { 241, -2 }, /* (111) signed ::= MINUS INTEGER */ - { 208, -6 }, /* (112) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ - { 242, -3 }, /* (113) create_table_args ::= LP columnlist RP */ - { 242, -7 }, /* (114) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ - { 242, -7 }, /* (115) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ - { 242, -2 }, /* (116) create_table_args ::= AS select */ - { 243, -3 }, /* (117) columnlist ::= columnlist COMMA column */ - { 243, -1 }, /* (118) columnlist ::= column */ - { 245, -2 }, /* (119) column ::= ids typename */ - { 227, -3 }, /* (120) tagitemlist ::= tagitemlist COMMA tagitem */ - { 227, -1 }, /* (121) tagitemlist ::= tagitem */ - { 246, -1 }, /* (122) tagitem ::= INTEGER */ - { 246, -1 }, /* (123) tagitem ::= FLOAT */ - { 246, -1 }, /* (124) tagitem ::= STRING */ - { 246, -1 }, /* (125) tagitem ::= BOOL */ - { 246, -1 }, /* (126) tagitem ::= NULL */ - { 246, -2 }, /* (127) tagitem ::= MINUS INTEGER */ - { 246, -2 }, /* (128) tagitem ::= MINUS FLOAT */ - { 246, -2 }, /* (129) tagitem ::= PLUS INTEGER */ - { 246, -2 }, /* (130) tagitem ::= PLUS FLOAT */ - { 244, -12 }, /* (131) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 258, -1 }, /* (132) union ::= select */ - { 258, -3 }, /* (133) union ::= LP union RP */ - { 258, -4 }, /* (134) union ::= union UNION ALL select */ - { 258, -6 }, /* (135) union ::= union UNION ALL LP select RP */ - { 208, -1 }, /* (136) cmd ::= union */ - { 244, -2 }, /* (137) select ::= SELECT selcollist */ - { 259, -2 }, /* (138) sclp ::= selcollist COMMA */ - { 259, 0 }, /* (139) sclp ::= */ - { 247, -3 }, /* (140) selcollist ::= sclp expr as */ - { 247, -2 }, /* (141) selcollist ::= sclp STAR */ - { 261, -2 }, /* (142) as ::= AS ids */ - { 261, -1 }, /* (143) as ::= ids */ - { 261, 0 }, /* (144) as ::= */ - { 248, -2 }, /* (145) from ::= FROM tablelist */ - { 262, -2 }, /* (146) tablelist ::= ids cpxName */ - { 262, -3 }, /* (147) tablelist ::= ids cpxName ids */ - { 262, -4 }, /* (148) tablelist ::= tablelist COMMA ids cpxName */ - { 262, -5 }, /* (149) tablelist ::= tablelist COMMA ids cpxName ids */ - { 263, -1 }, /* (150) tmvar ::= VARIABLE */ - { 250, -4 }, /* (151) interval_opt ::= INTERVAL LP tmvar RP */ - { 250, -6 }, /* (152) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 250, 0 }, /* (153) interval_opt ::= */ - { 251, 0 }, /* (154) fill_opt ::= */ - { 251, -6 }, /* (155) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 251, -4 }, /* (156) fill_opt ::= FILL LP ID RP */ - { 252, -4 }, /* (157) sliding_opt ::= SLIDING LP tmvar RP */ - { 252, 0 }, /* (158) sliding_opt ::= */ - { 254, 0 }, /* (159) orderby_opt ::= */ - { 254, -3 }, /* (160) orderby_opt ::= ORDER BY sortlist */ - { 264, -4 }, /* (161) sortlist ::= sortlist COMMA item sortorder */ - { 264, -2 }, /* (162) sortlist ::= item sortorder */ - { 266, -2 }, /* (163) item ::= ids cpxName */ - { 267, -1 }, /* (164) sortorder ::= ASC */ - { 267, -1 }, /* (165) sortorder ::= DESC */ - { 267, 0 }, /* (166) sortorder ::= */ - { 253, 0 }, /* (167) groupby_opt ::= */ - { 253, -3 }, /* (168) groupby_opt ::= GROUP BY grouplist */ - { 268, -3 }, /* (169) grouplist ::= grouplist COMMA item */ - { 268, -1 }, /* (170) grouplist ::= item */ - { 255, 0 }, /* (171) having_opt ::= */ - { 255, -2 }, /* (172) having_opt ::= HAVING expr */ - { 257, 0 }, /* (173) limit_opt ::= */ - { 257, -2 }, /* (174) limit_opt ::= LIMIT signed */ - { 257, -4 }, /* (175) limit_opt ::= LIMIT signed OFFSET signed */ - { 257, -4 }, /* (176) limit_opt ::= LIMIT signed COMMA signed */ - { 256, 0 }, /* (177) slimit_opt ::= */ - { 256, -2 }, /* (178) slimit_opt ::= SLIMIT signed */ - { 256, -4 }, /* (179) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 256, -4 }, /* (180) slimit_opt ::= SLIMIT signed COMMA signed */ - { 249, 0 }, /* (181) where_opt ::= */ - { 249, -2 }, /* (182) where_opt ::= WHERE expr */ - { 260, -3 }, /* (183) expr ::= LP expr RP */ - { 260, -1 }, /* (184) expr ::= ID */ - { 260, -3 }, /* (185) expr ::= ID DOT ID */ - { 260, -3 }, /* (186) expr ::= ID DOT STAR */ - { 260, -1 }, /* (187) expr ::= INTEGER */ - { 260, -2 }, /* (188) expr ::= MINUS INTEGER */ - { 260, -2 }, /* (189) expr ::= PLUS INTEGER */ - { 260, -1 }, /* (190) expr ::= FLOAT */ - { 260, -2 }, /* (191) expr ::= MINUS FLOAT */ - { 260, -2 }, /* (192) expr ::= PLUS FLOAT */ - { 260, -1 }, /* (193) expr ::= STRING */ - { 260, -1 }, /* (194) expr ::= NOW */ - { 260, -1 }, /* (195) expr ::= VARIABLE */ - { 260, -1 }, /* (196) expr ::= BOOL */ - { 260, -4 }, /* (197) expr ::= ID LP exprlist RP */ - { 260, -4 }, /* (198) expr ::= ID LP STAR RP */ - { 260, -3 }, /* (199) expr ::= expr IS NULL */ - { 260, -4 }, /* (200) expr ::= expr IS NOT NULL */ - { 260, -3 }, /* (201) expr ::= expr LT expr */ - { 260, -3 }, /* (202) expr ::= expr GT expr */ - { 260, -3 }, /* (203) expr ::= expr LE expr */ - { 260, -3 }, /* (204) expr ::= expr GE expr */ - { 260, -3 }, /* (205) expr ::= expr NE expr */ - { 260, -3 }, /* (206) expr ::= expr EQ expr */ - { 260, -3 }, /* (207) expr ::= expr AND expr */ - { 260, -3 }, /* (208) expr ::= expr OR expr */ - { 260, -3 }, /* (209) expr ::= expr PLUS expr */ - { 260, -3 }, /* (210) expr ::= expr MINUS expr */ - { 260, -3 }, /* (211) expr ::= expr STAR expr */ - { 260, -3 }, /* (212) expr ::= expr SLASH expr */ - { 260, -3 }, /* (213) expr ::= expr REM expr */ - { 260, -3 }, /* (214) expr ::= expr LIKE expr */ - { 260, -5 }, /* (215) expr ::= expr IN LP exprlist RP */ - { 269, -3 }, /* (216) exprlist ::= exprlist COMMA expritem */ - { 269, -1 }, /* (217) exprlist ::= expritem */ - { 270, -1 }, /* (218) expritem ::= expr */ - { 270, 0 }, /* (219) expritem ::= */ - { 208, -3 }, /* (220) cmd ::= RESET QUERY CACHE */ - { 208, -7 }, /* (221) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 208, -7 }, /* (222) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 208, -7 }, /* (223) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 208, -7 }, /* (224) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 208, -8 }, /* (225) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 208, -9 }, /* (226) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 208, -3 }, /* (227) cmd ::= KILL CONNECTION INTEGER */ - { 208, -5 }, /* (228) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 208, -5 }, /* (229) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 208, -1 }, /* (0) program ::= cmd */ + { 209, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 209, -2 }, /* (2) cmd ::= SHOW MNODES */ + { 209, -2 }, /* (3) cmd ::= SHOW DNODES */ + { 209, -2 }, /* (4) cmd ::= SHOW ACCOUNTS */ + { 209, -2 }, /* (5) cmd ::= SHOW USERS */ + { 209, -2 }, /* (6) cmd ::= SHOW MODULES */ + { 209, -2 }, /* (7) cmd ::= SHOW QUERIES */ + { 209, -2 }, /* (8) cmd ::= SHOW CONNECTIONS */ + { 209, -2 }, /* (9) cmd ::= SHOW STREAMS */ + { 209, -2 }, /* (10) cmd ::= SHOW VARIABLES */ + { 209, -2 }, /* (11) cmd ::= SHOW SCORES */ + { 209, -2 }, /* (12) cmd ::= SHOW GRANTS */ + { 209, -2 }, /* (13) cmd ::= SHOW VNODES */ + { 209, -3 }, /* (14) cmd ::= SHOW VNODES IPTOKEN */ + { 210, 0 }, /* (15) dbPrefix ::= */ + { 210, -2 }, /* (16) dbPrefix ::= ids DOT */ + { 212, 0 }, /* (17) cpxName ::= */ + { 212, -2 }, /* (18) cpxName ::= DOT ids */ + { 209, -5 }, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */ + { 209, -4 }, /* (20) cmd ::= SHOW CREATE DATABASE ids */ + { 209, -3 }, /* (21) cmd ::= SHOW dbPrefix TABLES */ + { 209, -5 }, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 209, -3 }, /* (23) cmd ::= SHOW dbPrefix STABLES */ + { 209, -5 }, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 209, -3 }, /* (25) cmd ::= SHOW dbPrefix VGROUPS */ + { 209, -4 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 209, -5 }, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */ + { 209, -4 }, /* (28) cmd ::= DROP DATABASE ifexists ids */ + { 209, -3 }, /* (29) cmd ::= DROP DNODE ids */ + { 209, -3 }, /* (30) cmd ::= DROP USER ids */ + { 209, -3 }, /* (31) cmd ::= DROP ACCOUNT ids */ + { 209, -2 }, /* (32) cmd ::= USE ids */ + { 209, -3 }, /* (33) cmd ::= DESCRIBE ids cpxName */ + { 209, -5 }, /* (34) cmd ::= ALTER USER ids PASS ids */ + { 209, -5 }, /* (35) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 209, -4 }, /* (36) cmd ::= ALTER DNODE ids ids */ + { 209, -5 }, /* (37) cmd ::= ALTER DNODE ids ids ids */ + { 209, -3 }, /* (38) cmd ::= ALTER LOCAL ids */ + { 209, -4 }, /* (39) cmd ::= ALTER LOCAL ids ids */ + { 209, -4 }, /* (40) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 209, -4 }, /* (41) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 209, -6 }, /* (42) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 211, -1 }, /* (43) ids ::= ID */ + { 211, -1 }, /* (44) ids ::= STRING */ + { 213, -2 }, /* (45) ifexists ::= IF EXISTS */ + { 213, 0 }, /* (46) ifexists ::= */ + { 216, -3 }, /* (47) ifnotexists ::= IF NOT EXISTS */ + { 216, 0 }, /* (48) ifnotexists ::= */ + { 209, -3 }, /* (49) cmd ::= CREATE DNODE ids */ + { 209, -6 }, /* (50) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 209, -5 }, /* (51) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 209, -5 }, /* (52) cmd ::= CREATE USER ids PASS ids */ + { 218, 0 }, /* (53) pps ::= */ + { 218, -2 }, /* (54) pps ::= PPS INTEGER */ + { 219, 0 }, /* (55) tseries ::= */ + { 219, -2 }, /* (56) tseries ::= TSERIES INTEGER */ + { 220, 0 }, /* (57) dbs ::= */ + { 220, -2 }, /* (58) dbs ::= DBS INTEGER */ + { 221, 0 }, /* (59) streams ::= */ + { 221, -2 }, /* (60) streams ::= STREAMS INTEGER */ + { 222, 0 }, /* (61) storage ::= */ + { 222, -2 }, /* (62) storage ::= STORAGE INTEGER */ + { 223, 0 }, /* (63) qtime ::= */ + { 223, -2 }, /* (64) qtime ::= QTIME INTEGER */ + { 224, 0 }, /* (65) users ::= */ + { 224, -2 }, /* (66) users ::= USERS INTEGER */ + { 225, 0 }, /* (67) conns ::= */ + { 225, -2 }, /* (68) conns ::= CONNS INTEGER */ + { 226, 0 }, /* (69) state ::= */ + { 226, -2 }, /* (70) state ::= STATE ids */ + { 215, -9 }, /* (71) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 227, -2 }, /* (72) keep ::= KEEP tagitemlist */ + { 229, -2 }, /* (73) cache ::= CACHE INTEGER */ + { 230, -2 }, /* (74) replica ::= REPLICA INTEGER */ + { 231, -2 }, /* (75) quorum ::= QUORUM INTEGER */ + { 232, -2 }, /* (76) days ::= DAYS INTEGER */ + { 233, -2 }, /* (77) minrows ::= MINROWS INTEGER */ + { 234, -2 }, /* (78) maxrows ::= MAXROWS INTEGER */ + { 235, -2 }, /* (79) blocks ::= BLOCKS INTEGER */ + { 236, -2 }, /* (80) ctime ::= CTIME INTEGER */ + { 237, -2 }, /* (81) wal ::= WAL INTEGER */ + { 238, -2 }, /* (82) fsync ::= FSYNC INTEGER */ + { 239, -2 }, /* (83) comp ::= COMP INTEGER */ + { 240, -2 }, /* (84) prec ::= PRECISION STRING */ + { 241, -2 }, /* (85) update ::= UPDATE INTEGER */ + { 217, 0 }, /* (86) db_optr ::= */ + { 217, -2 }, /* (87) db_optr ::= db_optr cache */ + { 217, -2 }, /* (88) db_optr ::= db_optr replica */ + { 217, -2 }, /* (89) db_optr ::= db_optr quorum */ + { 217, -2 }, /* (90) db_optr ::= db_optr days */ + { 217, -2 }, /* (91) db_optr ::= db_optr minrows */ + { 217, -2 }, /* (92) db_optr ::= db_optr maxrows */ + { 217, -2 }, /* (93) db_optr ::= db_optr blocks */ + { 217, -2 }, /* (94) db_optr ::= db_optr ctime */ + { 217, -2 }, /* (95) db_optr ::= db_optr wal */ + { 217, -2 }, /* (96) db_optr ::= db_optr fsync */ + { 217, -2 }, /* (97) db_optr ::= db_optr comp */ + { 217, -2 }, /* (98) db_optr ::= db_optr prec */ + { 217, -2 }, /* (99) db_optr ::= db_optr keep */ + { 217, -2 }, /* (100) db_optr ::= db_optr update */ + { 214, 0 }, /* (101) alter_db_optr ::= */ + { 214, -2 }, /* (102) alter_db_optr ::= alter_db_optr replica */ + { 214, -2 }, /* (103) alter_db_optr ::= alter_db_optr quorum */ + { 214, -2 }, /* (104) alter_db_optr ::= alter_db_optr keep */ + { 214, -2 }, /* (105) alter_db_optr ::= alter_db_optr blocks */ + { 214, -2 }, /* (106) alter_db_optr ::= alter_db_optr comp */ + { 214, -2 }, /* (107) alter_db_optr ::= alter_db_optr wal */ + { 214, -2 }, /* (108) alter_db_optr ::= alter_db_optr fsync */ + { 214, -2 }, /* (109) alter_db_optr ::= alter_db_optr update */ + { 242, -1 }, /* (110) typename ::= ids */ + { 242, -4 }, /* (111) typename ::= ids LP signed RP */ + { 243, -1 }, /* (112) signed ::= INTEGER */ + { 243, -2 }, /* (113) signed ::= PLUS INTEGER */ + { 243, -2 }, /* (114) signed ::= MINUS INTEGER */ + { 209, -6 }, /* (115) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + { 244, -3 }, /* (116) create_table_args ::= LP columnlist RP */ + { 244, -7 }, /* (117) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + { 244, -7 }, /* (118) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + { 244, -2 }, /* (119) create_table_args ::= AS select */ + { 245, -3 }, /* (120) columnlist ::= columnlist COMMA column */ + { 245, -1 }, /* (121) columnlist ::= column */ + { 247, -2 }, /* (122) column ::= ids typename */ + { 228, -3 }, /* (123) tagitemlist ::= tagitemlist COMMA tagitem */ + { 228, -1 }, /* (124) tagitemlist ::= tagitem */ + { 248, -1 }, /* (125) tagitem ::= INTEGER */ + { 248, -1 }, /* (126) tagitem ::= FLOAT */ + { 248, -1 }, /* (127) tagitem ::= STRING */ + { 248, -1 }, /* (128) tagitem ::= BOOL */ + { 248, -1 }, /* (129) tagitem ::= NULL */ + { 248, -2 }, /* (130) tagitem ::= MINUS INTEGER */ + { 248, -2 }, /* (131) tagitem ::= MINUS FLOAT */ + { 248, -2 }, /* (132) tagitem ::= PLUS INTEGER */ + { 248, -2 }, /* (133) tagitem ::= PLUS FLOAT */ + { 246, -12 }, /* (134) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 260, -1 }, /* (135) union ::= select */ + { 260, -3 }, /* (136) union ::= LP union RP */ + { 260, -4 }, /* (137) union ::= union UNION ALL select */ + { 260, -6 }, /* (138) union ::= union UNION ALL LP select RP */ + { 209, -1 }, /* (139) cmd ::= union */ + { 246, -2 }, /* (140) select ::= SELECT selcollist */ + { 261, -2 }, /* (141) sclp ::= selcollist COMMA */ + { 261, 0 }, /* (142) sclp ::= */ + { 249, -3 }, /* (143) selcollist ::= sclp expr as */ + { 249, -2 }, /* (144) selcollist ::= sclp STAR */ + { 263, -2 }, /* (145) as ::= AS ids */ + { 263, -1 }, /* (146) as ::= ids */ + { 263, 0 }, /* (147) as ::= */ + { 250, -2 }, /* (148) from ::= FROM tablelist */ + { 264, -2 }, /* (149) tablelist ::= ids cpxName */ + { 264, -3 }, /* (150) tablelist ::= ids cpxName ids */ + { 264, -4 }, /* (151) tablelist ::= tablelist COMMA ids cpxName */ + { 264, -5 }, /* (152) tablelist ::= tablelist COMMA ids cpxName ids */ + { 265, -1 }, /* (153) tmvar ::= VARIABLE */ + { 252, -4 }, /* (154) interval_opt ::= INTERVAL LP tmvar RP */ + { 252, -6 }, /* (155) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 252, 0 }, /* (156) interval_opt ::= */ + { 253, 0 }, /* (157) fill_opt ::= */ + { 253, -6 }, /* (158) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 253, -4 }, /* (159) fill_opt ::= FILL LP ID RP */ + { 254, -4 }, /* (160) sliding_opt ::= SLIDING LP tmvar RP */ + { 254, 0 }, /* (161) sliding_opt ::= */ + { 256, 0 }, /* (162) orderby_opt ::= */ + { 256, -3 }, /* (163) orderby_opt ::= ORDER BY sortlist */ + { 266, -4 }, /* (164) sortlist ::= sortlist COMMA item sortorder */ + { 266, -2 }, /* (165) sortlist ::= item sortorder */ + { 268, -2 }, /* (166) item ::= ids cpxName */ + { 269, -1 }, /* (167) sortorder ::= ASC */ + { 269, -1 }, /* (168) sortorder ::= DESC */ + { 269, 0 }, /* (169) sortorder ::= */ + { 255, 0 }, /* (170) groupby_opt ::= */ + { 255, -3 }, /* (171) groupby_opt ::= GROUP BY grouplist */ + { 270, -3 }, /* (172) grouplist ::= grouplist COMMA item */ + { 270, -1 }, /* (173) grouplist ::= item */ + { 257, 0 }, /* (174) having_opt ::= */ + { 257, -2 }, /* (175) having_opt ::= HAVING expr */ + { 259, 0 }, /* (176) limit_opt ::= */ + { 259, -2 }, /* (177) limit_opt ::= LIMIT signed */ + { 259, -4 }, /* (178) limit_opt ::= LIMIT signed OFFSET signed */ + { 259, -4 }, /* (179) limit_opt ::= LIMIT signed COMMA signed */ + { 258, 0 }, /* (180) slimit_opt ::= */ + { 258, -2 }, /* (181) slimit_opt ::= SLIMIT signed */ + { 258, -4 }, /* (182) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 258, -4 }, /* (183) slimit_opt ::= SLIMIT signed COMMA signed */ + { 251, 0 }, /* (184) where_opt ::= */ + { 251, -2 }, /* (185) where_opt ::= WHERE expr */ + { 262, -3 }, /* (186) expr ::= LP expr RP */ + { 262, -1 }, /* (187) expr ::= ID */ + { 262, -3 }, /* (188) expr ::= ID DOT ID */ + { 262, -3 }, /* (189) expr ::= ID DOT STAR */ + { 262, -1 }, /* (190) expr ::= INTEGER */ + { 262, -2 }, /* (191) expr ::= MINUS INTEGER */ + { 262, -2 }, /* (192) expr ::= PLUS INTEGER */ + { 262, -1 }, /* (193) expr ::= FLOAT */ + { 262, -2 }, /* (194) expr ::= MINUS FLOAT */ + { 262, -2 }, /* (195) expr ::= PLUS FLOAT */ + { 262, -1 }, /* (196) expr ::= STRING */ + { 262, -1 }, /* (197) expr ::= NOW */ + { 262, -1 }, /* (198) expr ::= VARIABLE */ + { 262, -1 }, /* (199) expr ::= BOOL */ + { 262, -4 }, /* (200) expr ::= ID LP exprlist RP */ + { 262, -4 }, /* (201) expr ::= ID LP STAR RP */ + { 262, -3 }, /* (202) expr ::= expr IS NULL */ + { 262, -4 }, /* (203) expr ::= expr IS NOT NULL */ + { 262, -3 }, /* (204) expr ::= expr LT expr */ + { 262, -3 }, /* (205) expr ::= expr GT expr */ + { 262, -3 }, /* (206) expr ::= expr LE expr */ + { 262, -3 }, /* (207) expr ::= expr GE expr */ + { 262, -3 }, /* (208) expr ::= expr NE expr */ + { 262, -3 }, /* (209) expr ::= expr EQ expr */ + { 262, -3 }, /* (210) expr ::= expr AND expr */ + { 262, -3 }, /* (211) expr ::= expr OR expr */ + { 262, -3 }, /* (212) expr ::= expr PLUS expr */ + { 262, -3 }, /* (213) expr ::= expr MINUS expr */ + { 262, -3 }, /* (214) expr ::= expr STAR expr */ + { 262, -3 }, /* (215) expr ::= expr SLASH expr */ + { 262, -3 }, /* (216) expr ::= expr REM expr */ + { 262, -3 }, /* (217) expr ::= expr LIKE expr */ + { 262, -5 }, /* (218) expr ::= expr IN LP exprlist RP */ + { 271, -3 }, /* (219) exprlist ::= exprlist COMMA expritem */ + { 271, -1 }, /* (220) exprlist ::= expritem */ + { 272, -1 }, /* (221) expritem ::= expr */ + { 272, 0 }, /* (222) expritem ::= */ + { 209, -3 }, /* (223) cmd ::= RESET QUERY CACHE */ + { 209, -7 }, /* (224) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 209, -7 }, /* (225) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 209, -7 }, /* (226) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 209, -7 }, /* (227) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 209, -8 }, /* (228) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 209, -9 }, /* (229) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 209, -3 }, /* (230) cmd ::= KILL CONNECTION INTEGER */ + { 209, -5 }, /* (231) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 209, -5 }, /* (232) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2156,13 +2161,13 @@ static void yy_reduce( { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 40: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy158, &t);} +{ SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy268, &t);} break; case 41: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy73);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy149);} break; case 42: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy73);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy149);} break; case 43: /* ids ::= ID */ case 44: /* ids ::= STRING */ yytestcase(yyruleno==44); @@ -2183,10 +2188,10 @@ static void yy_reduce( { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 50: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy73);} +{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy149);} break; case 51: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy158, &yymsp[-2].minor.yy0);} +{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy268, &yymsp[-2].minor.yy0);} break; case 52: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSQL(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} @@ -2215,20 +2220,20 @@ static void yy_reduce( break; case 71: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy73.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy73.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy73.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy73.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy73.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy73.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy73.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy73.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy73.stat = yymsp[0].minor.yy0; -} - yymsp[-8].minor.yy73 = yylhsminor.yy73; + yylhsminor.yy149.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy149.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy149.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy149.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy149.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy149.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy149.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy149.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy149.stat = yymsp[0].minor.yy0; +} + yymsp[-8].minor.yy149 = yylhsminor.yy149; break; case 72: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy494 = yymsp[0].minor.yy494; } +{ yymsp[-1].minor.yy165 = yymsp[0].minor.yy165; } break; case 73: /* cache ::= CACHE INTEGER */ case 74: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==74); @@ -2242,574 +2247,586 @@ static void yy_reduce( case 82: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==82); case 83: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==83); case 84: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==84); + case 85: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==85); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 85: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy158);} - break; - case 86: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 87: /* db_optr ::= db_optr replica */ - case 100: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==100); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 88: /* db_optr ::= db_optr quorum */ - case 101: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==101); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 89: /* db_optr ::= db_optr days */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 90: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 91: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 92: /* db_optr ::= db_optr blocks */ - case 103: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==103); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 93: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 94: /* db_optr ::= db_optr wal */ - case 105: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==105); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 95: /* db_optr ::= db_optr fsync */ - case 106: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==106); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 96: /* db_optr ::= db_optr comp */ - case 104: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==104); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 97: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 98: /* db_optr ::= db_optr keep */ - case 102: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==102); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.keep = yymsp[0].minor.yy494; } - yymsp[-1].minor.yy158 = yylhsminor.yy158; - break; - case 99: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy158);} - break; - case 107: /* typename ::= ids */ + case 86: /* db_optr ::= */ +{setDefaultCreateDbOption(&yymsp[1].minor.yy268);} + break; + case 87: /* db_optr ::= db_optr cache */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 88: /* db_optr ::= db_optr replica */ + case 102: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==102); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 89: /* db_optr ::= db_optr quorum */ + case 103: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==103); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 90: /* db_optr ::= db_optr days */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 91: /* db_optr ::= db_optr minrows */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 92: /* db_optr ::= db_optr maxrows */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 93: /* db_optr ::= db_optr blocks */ + case 105: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==105); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 94: /* db_optr ::= db_optr ctime */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 95: /* db_optr ::= db_optr wal */ + case 107: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==107); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 96: /* db_optr ::= db_optr fsync */ + case 108: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==108); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 97: /* db_optr ::= db_optr comp */ + case 106: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==106); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 98: /* db_optr ::= db_optr prec */ +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 99: /* db_optr ::= db_optr keep */ + case 104: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==104); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.keep = yymsp[0].minor.yy165; } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 100: /* db_optr ::= db_optr update */ + case 109: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==109); +{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy268 = yylhsminor.yy268; + break; + case 101: /* alter_db_optr ::= */ +{ setDefaultCreateDbOption(&yymsp[1].minor.yy268);} + break; + case 110: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSQLSetColumnType (&yylhsminor.yy181, &yymsp[0].minor.yy0); + tSQLSetColumnType (&yylhsminor.yy223, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy181 = yylhsminor.yy181; + yymsp[0].minor.yy223 = yylhsminor.yy223; break; - case 108: /* typename ::= ids LP signed RP */ + case 111: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy271 <= 0) { + if (yymsp[-1].minor.yy207 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSQLSetColumnType(&yylhsminor.yy181, &yymsp[-3].minor.yy0); + tSQLSetColumnType(&yylhsminor.yy223, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy271; // negative value of name length - tSQLSetColumnType(&yylhsminor.yy181, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy207; // negative value of name length + tSQLSetColumnType(&yylhsminor.yy223, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy181 = yylhsminor.yy181; + yymsp[-3].minor.yy223 = yylhsminor.yy223; break; - case 109: /* signed ::= INTEGER */ -{ yylhsminor.yy271 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy271 = yylhsminor.yy271; + case 112: /* signed ::= INTEGER */ +{ yylhsminor.yy207 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy207 = yylhsminor.yy207; break; - case 110: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy271 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 113: /* signed ::= PLUS INTEGER */ +{ yymsp[-1].minor.yy207 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 111: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy271 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} + case 114: /* signed ::= MINUS INTEGER */ +{ yymsp[-1].minor.yy207 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 112: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + case 115: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0); } break; - case 113: /* create_table_args ::= LP columnlist RP */ + case 116: /* create_table_args ::= LP columnlist RP */ { - yymsp[-2].minor.yy374 = tSetCreateSQLElems(yymsp[-1].minor.yy449, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); - setSQLInfo(pInfo, yymsp[-2].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-2].minor.yy470 = tSetCreateSQLElems(yymsp[-1].minor.yy165, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, yymsp[-2].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 114: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + case 117: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ { - yymsp[-6].minor.yy374 = tSetCreateSQLElems(yymsp[-5].minor.yy449, yymsp[-1].minor.yy449, NULL, NULL, NULL, TSQL_CREATE_STABLE); - setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-6].minor.yy470 = tSetCreateSQLElems(yymsp[-5].minor.yy165, yymsp[-1].minor.yy165, NULL, NULL, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 115: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + case 118: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yymsp[-6].minor.yy374 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy494, NULL, TSQL_CREATE_TABLE_FROM_STABLE); - setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-6].minor.yy470 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy165, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + setSQLInfo(pInfo, yymsp[-6].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 116: /* create_table_args ::= AS select */ + case 119: /* create_table_args ::= AS select */ { - yymsp[-1].minor.yy374 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy150, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, yymsp[-1].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-1].minor.yy470 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy414, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, yymsp[-1].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); } break; - case 117: /* columnlist ::= columnlist COMMA column */ -{yylhsminor.yy449 = tFieldListAppend(yymsp[-2].minor.yy449, &yymsp[0].minor.yy181); } - yymsp[-2].minor.yy449 = yylhsminor.yy449; + case 120: /* columnlist ::= columnlist COMMA column */ +{taosArrayPush(yymsp[-2].minor.yy165, &yymsp[0].minor.yy223); yylhsminor.yy165 = yymsp[-2].minor.yy165; } + yymsp[-2].minor.yy165 = yylhsminor.yy165; break; - case 118: /* columnlist ::= column */ -{yylhsminor.yy449 = tFieldListAppend(NULL, &yymsp[0].minor.yy181);} - yymsp[0].minor.yy449 = yylhsminor.yy449; + case 121: /* columnlist ::= column */ +{yylhsminor.yy165 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy165, &yymsp[0].minor.yy223);} + yymsp[0].minor.yy165 = yylhsminor.yy165; break; - case 119: /* column ::= ids typename */ + case 122: /* column ::= ids typename */ { - tSQLSetColumnInfo(&yylhsminor.yy181, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy181); + tSQLSetColumnInfo(&yylhsminor.yy223, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy223); } - yymsp[-1].minor.yy181 = yylhsminor.yy181; + yymsp[-1].minor.yy223 = yylhsminor.yy223; break; - case 120: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; + case 123: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yylhsminor.yy165 = tVariantListAppend(yymsp[-2].minor.yy165, &yymsp[0].minor.yy134, -1); } + yymsp[-2].minor.yy165 = yylhsminor.yy165; break; - case 121: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); } - yymsp[0].minor.yy494 = yylhsminor.yy494; + case 124: /* tagitemlist ::= tagitem */ +{ yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); } + yymsp[0].minor.yy165 = yylhsminor.yy165; break; - case 122: /* tagitem ::= INTEGER */ - case 123: /* tagitem ::= FLOAT */ yytestcase(yyruleno==123); - case 124: /* tagitem ::= STRING */ yytestcase(yyruleno==124); - case 125: /* tagitem ::= BOOL */ yytestcase(yyruleno==125); -{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy312, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy312 = yylhsminor.yy312; + case 125: /* tagitem ::= INTEGER */ + case 126: /* tagitem ::= FLOAT */ yytestcase(yyruleno==126); + case 127: /* tagitem ::= STRING */ yytestcase(yyruleno==127); + case 128: /* tagitem ::= BOOL */ yytestcase(yyruleno==128); +{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy134, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy134 = yylhsminor.yy134; break; - case 126: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy312, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy312 = yylhsminor.yy312; + case 129: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy134, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy134 = yylhsminor.yy134; break; - case 127: /* tagitem ::= MINUS INTEGER */ - case 128: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==128); - case 129: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==129); - case 130: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==130); + case 130: /* tagitem ::= MINUS INTEGER */ + case 131: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==131); + case 132: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==132); + case 133: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==133); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy312, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy134, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy312 = yylhsminor.yy312; + yymsp[-1].minor.yy134 = yylhsminor.yy134; break; - case 131: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 134: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy224, yymsp[-9].minor.yy494, yymsp[-8].minor.yy66, yymsp[-4].minor.yy494, yymsp[-3].minor.yy494, &yymsp[-7].minor.yy314, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy494, &yymsp[0].minor.yy188, &yymsp[-1].minor.yy188); + yylhsminor.yy414 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy290, yymsp[-9].minor.yy165, yymsp[-8].minor.yy64, yymsp[-4].minor.yy165, yymsp[-3].minor.yy165, &yymsp[-7].minor.yy532, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy165, &yymsp[0].minor.yy216, &yymsp[-1].minor.yy216); } - yymsp[-11].minor.yy150 = yylhsminor.yy150; + yymsp[-11].minor.yy414 = yylhsminor.yy414; break; - case 132: /* union ::= select */ -{ yylhsminor.yy25 = setSubclause(NULL, yymsp[0].minor.yy150); } - yymsp[0].minor.yy25 = yylhsminor.yy25; + case 135: /* union ::= select */ +{ yylhsminor.yy231 = setSubclause(NULL, yymsp[0].minor.yy414); } + yymsp[0].minor.yy231 = yylhsminor.yy231; break; - case 133: /* union ::= LP union RP */ -{ yymsp[-2].minor.yy25 = yymsp[-1].minor.yy25; } + case 136: /* union ::= LP union RP */ +{ yymsp[-2].minor.yy231 = yymsp[-1].minor.yy231; } break; - case 134: /* union ::= union UNION ALL select */ -{ yylhsminor.yy25 = appendSelectClause(yymsp[-3].minor.yy25, yymsp[0].minor.yy150); } - yymsp[-3].minor.yy25 = yylhsminor.yy25; + case 137: /* union ::= union UNION ALL select */ +{ yylhsminor.yy231 = appendSelectClause(yymsp[-3].minor.yy231, yymsp[0].minor.yy414); } + yymsp[-3].minor.yy231 = yylhsminor.yy231; break; - case 135: /* union ::= union UNION ALL LP select RP */ -{ yylhsminor.yy25 = appendSelectClause(yymsp[-5].minor.yy25, yymsp[-1].minor.yy150); } - yymsp[-5].minor.yy25 = yylhsminor.yy25; + case 138: /* union ::= union UNION ALL LP select RP */ +{ yylhsminor.yy231 = appendSelectClause(yymsp[-5].minor.yy231, yymsp[-1].minor.yy414); } + yymsp[-5].minor.yy231 = yylhsminor.yy231; break; - case 136: /* cmd ::= union */ -{ setSQLInfo(pInfo, yymsp[0].minor.yy25, NULL, TSDB_SQL_SELECT); } + case 139: /* cmd ::= union */ +{ setSQLInfo(pInfo, yymsp[0].minor.yy231, NULL, TSDB_SQL_SELECT); } break; - case 137: /* select ::= SELECT selcollist */ + case 140: /* select ::= SELECT selcollist */ { - yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy224, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy414 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy290, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy150 = yylhsminor.yy150; + yymsp[-1].minor.yy414 = yylhsminor.yy414; break; - case 138: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy224 = yymsp[-1].minor.yy224;} - yymsp[-1].minor.yy224 = yylhsminor.yy224; + case 141: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy290 = yymsp[-1].minor.yy290;} + yymsp[-1].minor.yy290 = yylhsminor.yy290; break; - case 139: /* sclp ::= */ -{yymsp[1].minor.yy224 = 0;} + case 142: /* sclp ::= */ +{yymsp[1].minor.yy290 = 0;} break; - case 140: /* selcollist ::= sclp expr as */ + case 143: /* selcollist ::= sclp expr as */ { - yylhsminor.yy224 = tSQLExprListAppend(yymsp[-2].minor.yy224, yymsp[-1].minor.yy66, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290, yymsp[-1].minor.yy64, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-2].minor.yy224 = yylhsminor.yy224; + yymsp[-2].minor.yy290 = yylhsminor.yy290; break; - case 141: /* selcollist ::= sclp STAR */ + case 144: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); - yylhsminor.yy224 = tSQLExprListAppend(yymsp[-1].minor.yy224, pNode, 0); + yylhsminor.yy290 = tSQLExprListAppend(yymsp[-1].minor.yy290, pNode, 0); } - yymsp[-1].minor.yy224 = yylhsminor.yy224; + yymsp[-1].minor.yy290 = yylhsminor.yy290; break; - case 142: /* as ::= AS ids */ + case 145: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 143: /* as ::= ids */ + case 146: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 144: /* as ::= */ + case 147: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 145: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy494 = yymsp[0].minor.yy494;} + case 148: /* from ::= FROM tablelist */ +{yymsp[-1].minor.yy165 = yymsp[0].minor.yy165;} break; - case 146: /* tablelist ::= ids cpxName */ + case 149: /* tablelist ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[-1].minor.yy0, -1); // table alias name + yylhsminor.yy165 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[-1].minor.yy0, -1); // table alias name } - yymsp[-1].minor.yy494 = yylhsminor.yy494; + yymsp[-1].minor.yy165 = yylhsminor.yy165; break; - case 147: /* tablelist ::= ids cpxName ids */ + case 150: /* tablelist ::= ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[0].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[0].minor.yy0, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; + yymsp[-2].minor.yy165 = yylhsminor.yy165; break; - case 148: /* tablelist ::= tablelist COMMA ids cpxName */ + case 151: /* tablelist ::= tablelist COMMA ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(yymsp[-3].minor.yy494, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yymsp[-3].minor.yy165, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[-1].minor.yy0, -1); } - yymsp[-3].minor.yy494 = yylhsminor.yy494; + yymsp[-3].minor.yy165 = yylhsminor.yy165; break; - case 149: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 152: /* tablelist ::= tablelist COMMA ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(yymsp[-4].minor.yy494, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[0].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yymsp[-4].minor.yy165, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[0].minor.yy0, -1); } - yymsp[-4].minor.yy494 = yylhsminor.yy494; + yymsp[-4].minor.yy165 = yylhsminor.yy165; break; - case 150: /* tmvar ::= VARIABLE */ + case 153: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 151: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy314.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy314.offset.n = 0; yymsp[-3].minor.yy314.offset.z = NULL; yymsp[-3].minor.yy314.offset.type = 0;} + case 154: /* interval_opt ::= INTERVAL LP tmvar RP */ +{yymsp[-3].minor.yy532.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy532.offset.n = 0; yymsp[-3].minor.yy532.offset.z = NULL; yymsp[-3].minor.yy532.offset.type = 0;} break; - case 152: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy314.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy314.offset = yymsp[-1].minor.yy0;} + case 155: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ +{yymsp[-5].minor.yy532.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy532.offset = yymsp[-1].minor.yy0;} break; - case 153: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy314, 0, sizeof(yymsp[1].minor.yy314));} + case 156: /* interval_opt ::= */ +{memset(&yymsp[1].minor.yy532, 0, sizeof(yymsp[1].minor.yy532));} break; - case 154: /* fill_opt ::= */ -{yymsp[1].minor.yy494 = 0; } + case 157: /* fill_opt ::= */ +{yymsp[1].minor.yy165 = 0; } break; - case 155: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 158: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy494, &A, -1, 0); - yymsp[-5].minor.yy494 = yymsp[-1].minor.yy494; + tVariantListInsert(yymsp[-1].minor.yy165, &A, -1, 0); + yymsp[-5].minor.yy165 = yymsp[-1].minor.yy165; } break; - case 156: /* fill_opt ::= FILL LP ID RP */ + case 159: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy494 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy165 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 157: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 160: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 158: /* sliding_opt ::= */ + case 161: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 159: /* orderby_opt ::= */ - case 167: /* groupby_opt ::= */ yytestcase(yyruleno==167); -{yymsp[1].minor.yy494 = 0;} + case 162: /* orderby_opt ::= */ + case 170: /* groupby_opt ::= */ yytestcase(yyruleno==170); +{yymsp[1].minor.yy165 = 0;} break; - case 160: /* orderby_opt ::= ORDER BY sortlist */ - case 168: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==168); -{yymsp[-2].minor.yy494 = yymsp[0].minor.yy494;} + case 163: /* orderby_opt ::= ORDER BY sortlist */ + case 171: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==171); +{yymsp[-2].minor.yy165 = yymsp[0].minor.yy165;} break; - case 161: /* sortlist ::= sortlist COMMA item sortorder */ + case 164: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy494 = tVariantListAppend(yymsp[-3].minor.yy494, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); + yylhsminor.yy165 = tVariantListAppend(yymsp[-3].minor.yy165, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } - yymsp[-3].minor.yy494 = yylhsminor.yy494; + yymsp[-3].minor.yy165 = yylhsminor.yy165; break; - case 162: /* sortlist ::= item sortorder */ + case 165: /* sortlist ::= item sortorder */ { - yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); + yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } - yymsp[-1].minor.yy494 = yylhsminor.yy494; + yymsp[-1].minor.yy165 = yylhsminor.yy165; break; - case 163: /* item ::= ids cpxName */ + case 166: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy312, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy134, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy312 = yylhsminor.yy312; + yymsp[-1].minor.yy134 = yylhsminor.yy134; break; - case 164: /* sortorder ::= ASC */ -{yymsp[0].minor.yy82 = TSDB_ORDER_ASC; } + case 167: /* sortorder ::= ASC */ +{yymsp[0].minor.yy46 = TSDB_ORDER_ASC; } break; - case 165: /* sortorder ::= DESC */ -{yymsp[0].minor.yy82 = TSDB_ORDER_DESC;} + case 168: /* sortorder ::= DESC */ +{yymsp[0].minor.yy46 = TSDB_ORDER_DESC;} break; - case 166: /* sortorder ::= */ -{yymsp[1].minor.yy82 = TSDB_ORDER_ASC;} + case 169: /* sortorder ::= */ +{yymsp[1].minor.yy46 = TSDB_ORDER_ASC;} break; - case 169: /* grouplist ::= grouplist COMMA item */ + case 172: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1); + yylhsminor.yy165 = tVariantListAppend(yymsp[-2].minor.yy165, &yymsp[0].minor.yy134, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; + yymsp[-2].minor.yy165 = yylhsminor.yy165; break; - case 170: /* grouplist ::= item */ + case 173: /* grouplist ::= item */ { - yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); + yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); } - yymsp[0].minor.yy494 = yylhsminor.yy494; + yymsp[0].minor.yy165 = yylhsminor.yy165; + break; + case 174: /* having_opt ::= */ + case 184: /* where_opt ::= */ yytestcase(yyruleno==184); + case 222: /* expritem ::= */ yytestcase(yyruleno==222); +{yymsp[1].minor.yy64 = 0;} + break; + case 175: /* having_opt ::= HAVING expr */ + case 185: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==185); +{yymsp[-1].minor.yy64 = yymsp[0].minor.yy64;} + break; + case 176: /* limit_opt ::= */ + case 180: /* slimit_opt ::= */ yytestcase(yyruleno==180); +{yymsp[1].minor.yy216.limit = -1; yymsp[1].minor.yy216.offset = 0;} break; - case 171: /* having_opt ::= */ - case 181: /* where_opt ::= */ yytestcase(yyruleno==181); - case 219: /* expritem ::= */ yytestcase(yyruleno==219); -{yymsp[1].minor.yy66 = 0;} + case 177: /* limit_opt ::= LIMIT signed */ +{yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} break; - case 172: /* having_opt ::= HAVING expr */ - case 182: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==182); -{yymsp[-1].minor.yy66 = yymsp[0].minor.yy66;} + case 178: /* limit_opt ::= LIMIT signed OFFSET signed */ +{yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} break; - case 173: /* limit_opt ::= */ - case 177: /* slimit_opt ::= */ yytestcase(yyruleno==177); -{yymsp[1].minor.yy188.limit = -1; yymsp[1].minor.yy188.offset = 0;} + case 179: /* limit_opt ::= LIMIT signed COMMA signed */ +{yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} break; - case 174: /* limit_opt ::= LIMIT signed */ - case 178: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==178); -{yymsp[-1].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-1].minor.yy188.offset = 0;} + case 181: /* slimit_opt ::= SLIMIT signed */ +{yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} break; - case 175: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==179); -{yymsp[-3].minor.yy188.limit = yymsp[-2].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[0].minor.yy271;} + case 182: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} break; - case 176: /* limit_opt ::= LIMIT signed COMMA signed */ - case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==180); -{yymsp[-3].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[-2].minor.yy271;} + case 183: /* slimit_opt ::= SLIMIT signed COMMA signed */ +{yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} break; - case 183: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy66 = yymsp[-1].minor.yy66; } + case 186: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy64 = yymsp[-1].minor.yy64; } break; - case 184: /* expr ::= ID */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 187: /* expr ::= ID */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 185: /* expr ::= ID DOT ID */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 188: /* expr ::= ID DOT ID */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 186: /* expr ::= ID DOT STAR */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 189: /* expr ::= ID DOT STAR */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 187: /* expr ::= INTEGER */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 190: /* expr ::= INTEGER */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 188: /* expr ::= MINUS INTEGER */ - case 189: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==189); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy66 = yylhsminor.yy66; + case 191: /* expr ::= MINUS INTEGER */ + case 192: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==192); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 190: /* expr ::= FLOAT */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 193: /* expr ::= FLOAT */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 191: /* expr ::= MINUS FLOAT */ - case 192: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==192); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy66 = yylhsminor.yy66; + case 194: /* expr ::= MINUS FLOAT */ + case 195: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==195); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 193: /* expr ::= STRING */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 196: /* expr ::= STRING */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 194: /* expr ::= NOW */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 197: /* expr ::= NOW */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 195: /* expr ::= VARIABLE */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 198: /* expr ::= VARIABLE */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 196: /* expr ::= BOOL */ -{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 199: /* expr ::= BOOL */ +{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 197: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy66 = tSQLExprCreateFunction(yymsp[-1].minor.yy224, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy66 = yylhsminor.yy66; + case 200: /* expr ::= ID LP exprlist RP */ +{ yylhsminor.yy64 = tSQLExprCreateFunction(yymsp[-1].minor.yy290, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 198: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy66 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy66 = yylhsminor.yy66; + case 201: /* expr ::= ID LP STAR RP */ +{ yylhsminor.yy64 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 199: /* expr ::= expr IS NULL */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, NULL, TK_ISNULL);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 202: /* expr ::= expr IS NULL */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, NULL, TK_ISNULL);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 200: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-3].minor.yy66, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy66 = yylhsminor.yy66; + case 203: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-3].minor.yy64, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 201: /* expr ::= expr LT expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LT);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 204: /* expr ::= expr LT expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LT);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 202: /* expr ::= expr GT expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_GT);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 205: /* expr ::= expr GT expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GT);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 203: /* expr ::= expr LE expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LE);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 206: /* expr ::= expr LE expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LE);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 204: /* expr ::= expr GE expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_GE);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 207: /* expr ::= expr GE expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GE);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 205: /* expr ::= expr NE expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_NE);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 208: /* expr ::= expr NE expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_NE);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 206: /* expr ::= expr EQ expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_EQ);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 209: /* expr ::= expr EQ expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_EQ);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 207: /* expr ::= expr AND expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_AND);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 210: /* expr ::= expr AND expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_AND);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 208: /* expr ::= expr OR expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_OR); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 211: /* expr ::= expr OR expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_OR); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 209: /* expr ::= expr PLUS expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_PLUS); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 212: /* expr ::= expr PLUS expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_PLUS); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 210: /* expr ::= expr MINUS expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_MINUS); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 213: /* expr ::= expr MINUS expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_MINUS); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 211: /* expr ::= expr STAR expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_STAR); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 214: /* expr ::= expr STAR expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_STAR); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 212: /* expr ::= expr SLASH expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_DIVIDE);} - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 215: /* expr ::= expr SLASH expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_DIVIDE);} + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 213: /* expr ::= expr REM expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_REM); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 216: /* expr ::= expr REM expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_REM); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 214: /* expr ::= expr LIKE expr */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LIKE); } - yymsp[-2].minor.yy66 = yylhsminor.yy66; + case 217: /* expr ::= expr LIKE expr */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LIKE); } + yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 215: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy66 = tSQLExprCreate(yymsp[-4].minor.yy66, (tSQLExpr*)yymsp[-1].minor.yy224, TK_IN); } - yymsp[-4].minor.yy66 = yylhsminor.yy66; + case 218: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy64 = tSQLExprCreate(yymsp[-4].minor.yy64, (tSQLExpr*)yymsp[-1].minor.yy290, TK_IN); } + yymsp[-4].minor.yy64 = yylhsminor.yy64; break; - case 216: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy224 = tSQLExprListAppend(yymsp[-2].minor.yy224,yymsp[0].minor.yy66,0);} - yymsp[-2].minor.yy224 = yylhsminor.yy224; + case 219: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290,yymsp[0].minor.yy64,0);} + yymsp[-2].minor.yy290 = yylhsminor.yy290; break; - case 217: /* exprlist ::= expritem */ -{yylhsminor.yy224 = tSQLExprListAppend(0,yymsp[0].minor.yy66,0);} - yymsp[0].minor.yy224 = yylhsminor.yy224; + case 220: /* exprlist ::= expritem */ +{yylhsminor.yy290 = tSQLExprListAppend(0,yymsp[0].minor.yy64,0);} + yymsp[0].minor.yy290 = yylhsminor.yy290; break; - case 218: /* expritem ::= expr */ -{yylhsminor.yy66 = yymsp[0].minor.yy66;} - yymsp[0].minor.yy66 = yylhsminor.yy66; + case 221: /* expritem ::= expr */ +{yylhsminor.yy64 = yymsp[0].minor.yy64;} + yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 220: /* cmd ::= RESET QUERY CACHE */ + case 223: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 221: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 224: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy165, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 222: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 225: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - tVariantList* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 223: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 226: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy165, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 224: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 227: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 225: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 228: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); toTSDBType(yymsp[0].minor.yy0.type); A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); @@ -2818,25 +2835,25 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 226: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 229: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy312, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy134, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 227: /* cmd ::= KILL CONNECTION INTEGER */ + case 230: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 228: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 231: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 229: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 232: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index b78c5314f243874e348748a6434d224592489528..bada3194bd90e30de465dceaef07742cf6e8ac07 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -42,7 +42,7 @@ void simpleTest() { EXPECT_EQ(pTSBuf->tsData.len, sizeof(int64_t) * num); EXPECT_EQ(tVariantCompare(&pTSBuf->block.tag, &t), 0); - EXPECT_EQ(pTSBuf->numOfVnodes, 1); + EXPECT_EQ(pTSBuf->numOfGroups, 1); tsBufFlush(pTSBuf); EXPECT_EQ(pTSBuf->tsData.len, 0); @@ -69,7 +69,7 @@ void largeTSTest() { // the data has been flush to disk, no data in cache EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(tVariantCompare(&pTSBuf->block.tag, &t), 0); - EXPECT_EQ(pTSBuf->numOfVnodes, 1); + EXPECT_EQ(pTSBuf->numOfGroups, 1); EXPECT_EQ(pTSBuf->tsOrder, TSDB_ORDER_ASC); tsBufFlush(pTSBuf); @@ -105,7 +105,7 @@ void multiTagsTest() { EXPECT_EQ(pTSBuf->tsData.len, num * sizeof(int64_t)); EXPECT_EQ(pTSBuf->block.tag.i64Key, numOfTags - 1); - EXPECT_EQ(pTSBuf->numOfVnodes, 1); + EXPECT_EQ(pTSBuf->numOfGroups, 1); tsBufFlush(pTSBuf); EXPECT_EQ(pTSBuf->tsData.len, 0); @@ -139,7 +139,7 @@ void multiVnodeTagsTest() { start += step * num; } - EXPECT_EQ(pTSBuf->numOfVnodes, j + 1); + EXPECT_EQ(pTSBuf->numOfGroups, j + 1); } EXPECT_EQ(pTSBuf->tsOrder, TSDB_ORDER_ASC); @@ -184,7 +184,7 @@ void loadDataTest() { start += step * num; } - EXPECT_EQ(pTSBuf->numOfVnodes, j + 1); + EXPECT_EQ(pTSBuf->numOfGroups, j + 1); } EXPECT_EQ(pTSBuf->tsOrder, TSDB_ORDER_ASC); @@ -203,7 +203,7 @@ void loadDataTest() { // create from exists file STSBuf* pNewBuf = tsBufCreateFromFile(pTSBuf->path, false); EXPECT_EQ(pNewBuf->tsOrder, pTSBuf->tsOrder); - EXPECT_EQ(pNewBuf->numOfVnodes, numOfVnode); + EXPECT_EQ(pNewBuf->numOfGroups, numOfVnode); EXPECT_EQ(pNewBuf->fileSize, pTSBuf->fileSize); EXPECT_EQ(pNewBuf->pData[0].info.offset, pTSBuf->pData[0].info.offset); @@ -269,7 +269,7 @@ void TSTraverse() { start += step * num; } - EXPECT_EQ(pTSBuf->numOfVnodes, j + 1); + EXPECT_EQ(pTSBuf->numOfGroups, j + 1); } tsBufResetPos(pTSBuf); @@ -304,7 +304,7 @@ void TSTraverse() { int32_t totalOutput = 10; while (1) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.id, elem.tag->i64Key, elem.ts); if (!tsBufNextPos(pTSBuf)) { break; @@ -352,7 +352,7 @@ void TSTraverse() { totalOutput = 10; while (1) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.id, elem.tag->i64Key, elem.ts); if (!tsBufNextPos(pTSBuf)) { break; @@ -416,8 +416,8 @@ void mergeDiffVnodeBufferTest() { int64_t* list = createTsList(num, start, step); t.i64Key = i; - tsBufAppend(pTSBuf1, 0, &t, (const char*)list, num * sizeof(int64_t)); - tsBufAppend(pTSBuf2, 0, &t, (const char*)list, num * sizeof(int64_t)); + tsBufAppend(pTSBuf1, 1, &t, (const char*)list, num * sizeof(int64_t)); + tsBufAppend(pTSBuf2, 9, &t, (const char*)list, num * sizeof(int64_t)); free(list); @@ -426,8 +426,8 @@ void mergeDiffVnodeBufferTest() { tsBufFlush(pTSBuf2); - tsBufMerge(pTSBuf1, pTSBuf2, 9); - EXPECT_EQ(pTSBuf1->numOfVnodes, 2); + tsBufMerge(pTSBuf1, pTSBuf2); + EXPECT_EQ(pTSBuf1->numOfGroups, 2); EXPECT_EQ(pTSBuf1->numOfTotal, numOfTags * 2 * num); tsBufDisplay(pTSBuf1); @@ -459,8 +459,6 @@ void mergeIdenticalVnodeBufferTest() { start += step * num; } - - for (int32_t i = numOfTags; i < numOfTags * 2; ++i) { int64_t* list = createTsList(num, start, step); @@ -473,16 +471,23 @@ void mergeIdenticalVnodeBufferTest() { tsBufFlush(pTSBuf2); - tsBufMerge(pTSBuf1, pTSBuf2, 12); - EXPECT_EQ(pTSBuf1->numOfVnodes, 1); + tsBufMerge(pTSBuf1, pTSBuf2); + EXPECT_EQ(pTSBuf1->numOfGroups, 2); EXPECT_EQ(pTSBuf1->numOfTotal, numOfTags * 2 * num); tsBufResetPos(pTSBuf1); + + int32_t count = 0; while (tsBufNextPos(pTSBuf1)) { STSElem elem = tsBufGetElem(pTSBuf1); - EXPECT_EQ(elem.vnode, 12); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + if (count++ < numOfTags * num) { + EXPECT_EQ(elem.id, 12); + } else { + EXPECT_EQ(elem.id, 77); + } + + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.id, elem.tag->i64Key, elem.ts); } tsBufDestroy(pTSBuf1); diff --git a/src/rpc/src/rpcCache.c b/src/rpc/src/rpcCache.c index 46b0d4e3bb9428f98c1022556f2f57c6ec0e9a14..09d8f3bff1faad5596d85e931adea7f83670a48a 100644 --- a/src/rpc/src/rpcCache.c +++ b/src/rpc/src/rpcCache.c @@ -101,9 +101,9 @@ void rpcCloseConnCache(void *handle) { if (pCache->connHashMemPool) taosMemPoolCleanUp(pCache->connHashMemPool); - taosTFree(pCache->connHashList); - taosTFree(pCache->count); - taosTFree(pCache->lockedBy); + tfree(pCache->connHashList); + tfree(pCache->count); + tfree(pCache->lockedBy); pthread_mutex_unlock(&pCache->mutex); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 6e9088d9fb82a80bb28857d563ad51f19594e6b7..acceaf9d7a63f3378afbc8d8f485f14f77b50af6 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -20,6 +20,7 @@ #include "ttimer.h" #include "tutil.h" #include "lz4.h" +#include "tref.h" #include "taoserror.h" #include "tsocket.h" #include "tglobal.h" @@ -72,7 +73,6 @@ typedef struct { SRpcInfo *pRpc; // associated SRpcInfo SRpcEpSet epSet; // ip list provided by app void *ahandle; // handle provided by app - void *signature; // for validation struct SRpcConn *pConn; // pConn allocated char msgType; // message type uint8_t *pCont; // content provided by app @@ -82,6 +82,7 @@ typedef struct { int8_t oldInUse; // server EP inUse passed by app int8_t redirect; // flag to indicate redirect int8_t connType; // connection type + int64_t rid; // refId returned by taosAddRef SRpcMsg *pRsp; // for synchronous API tsem_t *pSem; // for synchronous API SRpcEpSet *pSet; // for synchronous API @@ -132,6 +133,10 @@ int tsRpcMaxRetry; int tsRpcHeadSize; int tsRpcOverhead; +static int tsRpcRefId = -1; +static int32_t tsRpcNum = 0; +//static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT; + // server:0 client:1 tcp:2 udp:0 #define RPC_CONN_UDPS 0 #define RPC_CONN_UDPC 1 @@ -211,14 +216,32 @@ static void rpcUnlockConn(SRpcConn *pConn); static void rpcAddRef(SRpcInfo *pRpc); static void rpcDecRef(SRpcInfo *pRpc); -void *rpcOpen(const SRpcInit *pInit) { - SRpcInfo *pRpc; +static void rpcFree(void *p) { + tTrace("free mem: %p", p); + free(p); +} +int32_t rpcInit(void) { tsProgressTimer = tsRpcTimer/2; tsRpcMaxRetry = tsRpcMaxTime * 1000/tsProgressTimer; tsRpcHeadSize = RPC_MSG_OVERHEAD; tsRpcOverhead = sizeof(SRpcReqContext); + tsRpcRefId = taosOpenRef(200, rpcFree); + + return 0; +} + +void rpcCleanup(void) { + taosCloseRef(tsRpcRefId); + tsRpcRefId = -1; +} + +void *rpcOpen(const SRpcInit *pInit) { + SRpcInfo *pRpc; + + //pthread_once(&tsRpcInit, rpcInit); + pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) return NULL; @@ -237,6 +260,8 @@ void *rpcOpen(const SRpcInit *pInit) { pRpc->afp = pInit->afp; pRpc->refCount = 1; + atomic_add_fetch_32(&tsRpcNum, 1); + size_t size = sizeof(SRpcConn) * pRpc->sessions; pRpc->connList = (SRpcConn *)calloc(1, size); if (pRpc->connList == NULL) { @@ -323,7 +348,7 @@ void *rpcMallocCont(int contLen) { tError("failed to malloc msg, size:%d", size); return NULL; } else { - tTrace("malloc mem: %p", start); + tTrace("malloc mem:%p size:%d", start, size); } return start + sizeof(SRpcReqContext) + sizeof(SRpcHead); @@ -356,14 +381,13 @@ void *rpcReallocCont(void *ptr, int contLen) { return start + sizeof(SRpcReqContext) + sizeof(SRpcHead); } -void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { +void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { SRpcInfo *pRpc = (SRpcInfo *)shandle; SRpcReqContext *pContext; int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen); pContext = (SRpcReqContext *) ((char*)pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext)); pContext->ahandle = pMsg->ahandle; - pContext->signature = pContext; pContext->pRpc = (SRpcInfo *)shandle; pContext->epSet = *pEpSet; pContext->contLen = contLen; @@ -383,12 +407,10 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { || type == TSDB_MSG_TYPE_CM_SHOW ) pContext->connType = RPC_CONN_TCPC; - // set the handle to pContext, so app can cancel the request - if (pMsg->handle) *((void **)pMsg->handle) = pContext; + pContext->rid = taosAddRef(tsRpcRefId, pContext); + if (pRid) *pRid = pContext->rid; rpcSendReqToServer(pRpc, pContext); - - return; } void rpcSendResponse(const SRpcMsg *pRsp) { @@ -504,7 +526,7 @@ void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) pContext->pRsp = pRsp; pContext->pSet = pEpSet; - rpcSendRequest(shandle, pEpSet, pMsg); + rpcSendRequest(shandle, pEpSet, pMsg, NULL); tsem_wait(&sem); tsem_destroy(&sem); @@ -533,17 +555,14 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) { return code; } -void rpcCancelRequest(void *handle) { - SRpcReqContext *pContext = handle; +void rpcCancelRequest(int64_t rid) { - // signature is used to check if pContext is freed. - // pContext may have been released just before app calls the rpcCancelRequest - if (pContext == NULL || pContext->signature != pContext) return; + SRpcReqContext *pContext = taosAcquireRef(tsRpcRefId, rid); + if (pContext == NULL) return; - if (pContext->pConn) { - tDebug("%s, app tries to cancel request", pContext->pConn->info); - rpcCloseConn(pContext->pConn); - } + rpcCloseConn(pContext->pConn); + + taosReleaseRef(tsRpcRefId, rid); } static void rpcFreeMsg(void *msg) { @@ -612,7 +631,7 @@ static void rpcReleaseConn(SRpcConn *pConn) { // if there is an outgoing message, free it if (pConn->outType && pConn->pReqMsg) { if (pConn->pContext) pConn->pContext->pConn = NULL; - rpcFreeMsg(pConn->pReqMsg); + taosRemoveRef(tsRpcRefId, pConn->pContext->rid); } } @@ -636,6 +655,7 @@ static void rpcReleaseConn(SRpcConn *pConn) { static void rpcCloseConn(void *thandle) { SRpcConn *pConn = (SRpcConn *)thandle; + if (pConn == NULL) return; rpcLockConn(pConn); @@ -1007,6 +1027,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) { if (pConn->outType) { SRpcReqContext *pContext = pConn->pContext; pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + pContext->pConn = NULL; pConn->pReqMsg = NULL; taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl); } @@ -1057,6 +1078,13 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); } } else { // msg is passed to app only parsing is ok + + if (pHead->msgType == TSDB_MSG_TYPE_NETWORK_TEST) { + rpcSendQuickRsp(pConn, TSDB_CODE_SUCCESS); + rpcFreeMsg(pRecv->msg); + return pConn; + } + rpcProcessIncomingMsg(pConn, pHead, pContext); } } @@ -1068,7 +1096,6 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { SRpcInfo *pRpc = pContext->pRpc; - pContext->signature = NULL; pContext->pConn = NULL; if (pContext->pRsp) { // for synchronous API @@ -1085,7 +1112,7 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { } // free the request message - rpcFreeCont(pContext->pCont); + taosRemoveRef(tsRpcRefId, pContext->rid); } static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext) { @@ -1110,6 +1137,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte // it's a response rpcMsg.handle = pContext; rpcMsg.ahandle = pContext->ahandle; + pContext->pConn = NULL; // for UDP, port may be changed by server, the port in epSet shall be used for cache if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) { @@ -1345,6 +1373,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) { tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); if (pConn->pContext) { pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + pConn->pContext->pConn = NULL; pConn->pReqMsg = NULL; taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl); rpcReleaseConn(pConn); @@ -1453,7 +1482,7 @@ static SRpcHead *rpcDecompressRpcMsg(SRpcHead *pHead) { pNewHead->msgLen = rpcMsgLenFromCont(origLen); rpcFreeMsg(pHead); // free the compressed message buffer pHead = pNewHead; - tTrace("decomp malloc mem: %p", temp); + tTrace("decomp malloc mem:%p", temp); } else { tError("failed to allocate memory to decompress msg, contLen:%d", contLen); } @@ -1589,10 +1618,12 @@ static void rpcDecRef(SRpcInfo *pRpc) taosTmrCleanUp(pRpc->tmrCtrl); taosIdPoolCleanUp(pRpc->idPool); - taosTFree(pRpc->connList); + tfree(pRpc->connList); pthread_mutex_destroy(&pRpc->mutex); tDebug("%s rpc resources are released", pRpc->label); - taosTFree(pRpc); + tfree(pRpc); + + atomic_sub_fetch_32(&tsRpcNum, 1); } } diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index bc8d360d39509ce9a2fdbe6a9dd883c5c5c99190..7b8cf3cda2f98137ac68bacec257d5b7d14a2a4c 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -171,40 +171,17 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread } static void taosStopTcpThread(SThreadObj* pThreadObj) { - pThreadObj->stop = true; - eventfd_t fd = -1; - - // save thread into local variable since pThreadObj is freed when thread exits + // save thread into local variable and signal thread to stop pthread_t thread = pThreadObj->thread; - - if (taosComparePthread(pThreadObj->thread, pthread_self())) { - pthread_detach(pthread_self()); + if (!taosCheckPthreadValid(thread)) { return; } - - if (taosCheckPthreadValid(pThreadObj->thread)) { - // signal the thread to stop, try graceful method first, - // and use pthread_cancel when failed - struct epoll_event event = { .events = EPOLLIN }; - fd = eventfd(1, 0); - if (fd == -1) { - // failed to create eventfd, call pthread_cancel instead, which may result in data corruption: - tError("%s, failed to create eventfd(%s)", pThreadObj->label, strerror(errno)); - pThreadObj->stop = true; - pthread_cancel(pThreadObj->thread); - } else if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { - // failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption: - tError("%s, failed to call epoll_ctl(%s)", pThreadObj->label, strerror(errno)); - pthread_cancel(pThreadObj->thread); - } - } - - // at this step, pThreadObj has already been released - if (taosCheckPthreadValid(thread)) { - pthread_join(thread, NULL); + pThreadObj->stop = true; + if (taosComparePthread(thread, pthread_self())) { + pthread_detach(pthread_self()); + return; } - - if (fd != -1) taosCloseSocket(fd); + pthread_join(thread, NULL); } void taosStopTcpServer(void *handle) { @@ -236,8 +213,8 @@ void taosCleanUpTcpServer(void *handle) { tDebug("%s TCP server is cleaned up", pServerObj->label); - taosTFree(pServerObj->pThreadObj); - taosTFree(pServerObj); + tfree(pServerObj->pThreadObj); + tfree(pServerObj); } static void *taosAcceptTcpConnection(void *arg) { @@ -373,8 +350,8 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds); } else { - taosCloseSocket(fd); tError("%s failed to malloc client FdObj(%s)", pThreadObj->label, strerror(errno)); + taosCloseSocket(fd); } return pFdObj; @@ -437,12 +414,13 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) { } msgLen = (int32_t)htonl((uint32_t)rpcHead.msgLen); - buffer = malloc(msgLen + tsRpcOverhead); + int32_t size = msgLen + tsRpcOverhead; + buffer = malloc(size); if (NULL == buffer) { tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen); return -1; } else { - tTrace("TCP malloc mem: %p", buffer); + tTrace("TCP malloc mem:%p size:%d", buffer, size); } msg = buffer + tsRpcOverhead; @@ -534,7 +512,7 @@ static void *taosProcessTcpData(void *param) { pthread_mutex_destroy(&(pThreadObj->mutex)); tDebug("%s TCP thread exits ...", pThreadObj->label); - taosTFree(pThreadObj); + tfree(pThreadObj); return NULL; } @@ -555,7 +533,7 @@ static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) { event.events = EPOLLIN | EPOLLRDHUP; event.data.ptr = pFdObj; if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { - taosTFree(pFdObj); + tfree(pFdObj); terrno = TAOS_SYSTEM_ERROR(errno); return NULL; } @@ -608,5 +586,5 @@ static void taosFreeFdObj(SFdObj *pFdObj) { tDebug("%s %p TCP connection is closed, FD:%p numOfFds:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pThreadObj->numOfFds); - taosTFree(pFdObj); + tfree(pFdObj); } diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 6f653046615f162c516b5eebf08995d30c6214d7..22301fcecc83fb1f4c9a29132b0311e05b2382e6 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -147,7 +147,7 @@ void taosStopUdpConnection(void *handle) { if (taosCheckPthreadValid(pConn->thread)) { pthread_join(pConn->thread, NULL); } - taosTFree(pConn->buffer); + tfree(pConn->buffer); // tTrace("%s UDP thread is closed, index:%d", pConn->label, i); } @@ -166,7 +166,7 @@ void taosCleanUpUdpConnection(void *handle) { } tDebug("%s UDP is cleaned up", pSet->label); - taosTFree(pSet); + tfree(pSet); } void *taosOpenUdpConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) { @@ -209,12 +209,13 @@ static void *taosRecvUdpData(void *param) { continue; } - char *tmsg = malloc(dataLen + tsRpcOverhead); + int32_t size = dataLen + tsRpcOverhead; + char *tmsg = malloc(size); if (NULL == tmsg) { tError("%s failed to allocate memory, size:%" PRId64, pConn->label, (int64_t)dataLen); continue; } else { - tTrace("UDP malloc mem: %p", tmsg); + tTrace("UDP malloc mem:%p size:%d", tmsg, size); } tmsg += tsRpcOverhead; // overhead for SRpcReqContext diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 7a963e9ce47f7c89edc0204d6502d548dbdb40eb..5721525adee3fc847a1ba2476ccb0995fb50a65c 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -57,7 +57,7 @@ static void *sendRequest(void *param) { rpcMsg.ahandle = pInfo; rpcMsg.msgType = 1; tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); - rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); + rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if ( pInfo->num % 20000 == 0 ) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); tsem_wait(&pInfo->rspSem); diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index f6818106462cb9c9d694d59c7cb9012cd1a54c8b..7d846ebc80e6ccebdddfd6f4a642c8166de5b68c 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -27,13 +27,20 @@ extern "C" { #define sDebug(...) { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLog("SYN ", sDebugFlag, __VA_ARGS__); }} #define sTrace(...) { if (sDebugFlag & DEBUG_TRACE) { taosPrintLog("SYN ", sDebugFlag, __VA_ARGS__); }} -#define TAOS_SMSG_SYNC_DATA 1 -#define TAOS_SMSG_FORWARD 2 -#define TAOS_SMSG_FORWARD_RSP 3 -#define TAOS_SMSG_SYNC_REQ 4 -#define TAOS_SMSG_SYNC_RSP 5 -#define TAOS_SMSG_SYNC_MUST 6 -#define TAOS_SMSG_STATUS 7 +typedef enum { + TAOS_SMSG_SYNC_DATA = 1, + TAOS_SMSG_FORWARD = 2, + TAOS_SMSG_FORWARD_RSP = 3, + TAOS_SMSG_SYNC_REQ = 4, + TAOS_SMSG_SYNC_RSP = 5, + TAOS_SMSG_SYNC_MUST = 6, + TAOS_SMSG_STATUS = 7 +} ESyncMsgType; + +#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16) +#define SYNC_RECV_BUFFER_SIZE (5*1024*1024) +#define SYNC_FWD_TIMER 300 +#define SYNC_ROLE_TIMER 10000 #define nodeRole pNode->peerInfo[pNode->selfIndex]->role #define nodeVersion pNode->peerInfo[pNode->selfIndex]->version @@ -65,6 +72,9 @@ typedef struct { typedef struct { int8_t role; int8_t ack; + int8_t type; + int8_t reserved[3]; + uint16_t tranId; uint64_t version; SPeerStatus peersStatus[]; } SPeersStatus; @@ -89,11 +99,11 @@ typedef struct { #pragma pack(pop) typedef struct { - char *buffer; - int bufferSize; - char *offset; - int forwards; - int code; + char * buffer; + int32_t bufferSize; + char * offset; + int32_t forwards; + int32_t code; } SRecvBuffer; typedef struct { @@ -103,36 +113,36 @@ typedef struct { int8_t nacks; int8_t confirmed; int32_t code; - uint64_t time; + int64_t time; } SFwdInfo; typedef struct { - int first; - int last; - int fwds; // number of forwards - SFwdInfo fwdInfo[]; + int32_t first; + int32_t last; + int32_t fwds; // number of forwards + SFwdInfo fwdInfo[]; } SSyncFwds; typedef struct SsyncPeer { int32_t nodeId; uint32_t ip; uint16_t port; + int8_t role; + int8_t sstatus; // sync status char fqdn[TSDB_FQDN_LEN]; // peer ip string char id[TSDB_EP_LEN + 32]; // peer vgId + end point - int8_t role; - int8_t sstatus; // sync status uint64_t version; - uint64_t sversion; // track the peer version in retrieve process - int syncFd; - int peerFd; // forward FD - int numOfRetrieves; // number of retrieves tried - int fileChanged; // a flag to indicate file is changed during retrieving process + uint64_t sversion; // track the peer version in retrieve process + int32_t syncFd; + int32_t peerFd; // forward FD + int32_t numOfRetrieves; // number of retrieves tried + int32_t fileChanged; // a flag to indicate file is changed during retrieving process void * timer; void * pConn; - int notifyFd; - int watchNum; - int * watchFd; - int8_t refCount; // reference count + int32_t notifyFd; + int32_t watchNum; + int32_t *watchFd; + int32_t refCount; // reference count struct SSyncNode *pSyncNode; } SSyncPeer; @@ -140,15 +150,16 @@ typedef struct SSyncNode { char path[TSDB_FILENAME_LEN]; int8_t replica; int8_t quorum; + int8_t selfIndex; uint32_t vgId; + int64_t rid; void *ahandle; - int8_t selfIndex; SSyncPeer *peerInfo[TAOS_SYNC_MAX_REPLICA+1]; // extra one for arbitrator SSyncPeer *pMaster; - int8_t refCount; SRecvBuffer *pRecv; SSyncFwds *pSyncFwds; // saved forward info if quorum >1 void *pFwdTimer; + void *pRoleTimer; FGetFileInfo getFileInfo; FGetWalInfo getWalInfo; FWriteToCache writeToCache; @@ -160,16 +171,16 @@ typedef struct SSyncNode { } SSyncNode; // sync module global -extern int tsSyncNum; -extern char tsNodeFqdn[TSDB_FQDN_LEN]; +extern int32_t tsSyncNum; +extern char tsNodeFqdn[TSDB_FQDN_LEN]; void *syncRetrieveData(void *param); void *syncRestoreData(void *param); -int syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead); -void syncRestartConnection(SSyncPeer *pPeer); -void syncBroadcastStatus(SSyncNode *pNode); -void syncAddPeerRef(SSyncPeer *pPeer); -int syncDecPeerRef(SSyncPeer *pPeer); +int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead); +void syncRestartConnection(SSyncPeer *pPeer); +void syncBroadcastStatus(SSyncNode *pNode); +void syncAddPeerRef(SSyncPeer *pPeer); +int32_t syncDecPeerRef(SSyncPeer *pPeer); #ifdef __cplusplus } diff --git a/src/sync/inc/taosTcpPool.h b/src/sync/inc/taosTcpPool.h index 261d190ad3b7cbe4fbf46a061a1432c217262d24..41043b0cd4c886616d5cecd2739eae684052c395 100644 --- a/src/sync/inc/taosTcpPool.h +++ b/src/sync/inc/taosTcpPool.h @@ -20,23 +20,23 @@ extern "C" { #endif -typedef void* ttpool_h; -typedef void* tthread_h; +typedef void *ttpool_h; +typedef void *tthread_h; typedef struct { - int numOfThreads; + int32_t numOfThreads; uint32_t serverIp; int16_t port; - int bufferSize; - void (*processBrokenLink)(void *ahandle); - int (*processIncomingMsg)(void *ahandle, void *buffer); - void (*processIncomingConn)(int fd, uint32_t ip); + int32_t bufferSize; + void (*processBrokenLink)(void *ahandle); + int32_t (*processIncomingMsg)(void *ahandle, void *buffer); + void (*processIncomingConn)(int32_t fd, uint32_t ip); } SPoolInfo; -ttpool_h taosOpenTcpThreadPool(SPoolInfo *pInfo); -void taosCloseTcpThreadPool(ttpool_h); -void *taosAllocateTcpConn(void *, void *ahandle, int connFd); -void taosFreeTcpConn(void *); +ttpool_h taosOpenTcpThreadPool(SPoolInfo *pInfo); +void taosCloseTcpThreadPool(ttpool_h); +void * taosAllocateTcpConn(void *, void *ahandle, int32_t connFd); +void taosFreeTcpConn(void *); #ifdef __cplusplus } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index ef635e6efc1ca5f071c64dbe00920c3987837494..843de9461fd956f0602a0d0dce9aca7fb219d42f 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -13,13 +13,13 @@ * along with this program. If not, see . */ -//#include -//#include +#define _DEFAULT_SOURCE #include "os.h" #include "hash.h" #include "tlog.h" #include "tutil.h" #include "ttimer.h" +#include "tref.h" #include "tsocket.h" #include "tglobal.h" #include "taoserror.h" @@ -30,37 +30,39 @@ #include "syncInt.h" // global configurable -int tsMaxSyncNum = 2; -int tsSyncTcpThreads = 2; -int tsMaxWatchFiles = 500; -int tsMaxFwdInfo = 200; -int tsSyncTimer = 1; +int32_t tsMaxSyncNum = 2; +int32_t tsSyncTcpThreads = 2; +int32_t tsMaxWatchFiles = 500; +int32_t tsMaxFwdInfo = 200; +int32_t tsSyncTimer = 1; // module global, not configurable -int tsSyncNum; // number of sync in process in whole system -char tsNodeFqdn[TSDB_FQDN_LEN]; +int32_t tsSyncNum; // number of sync in process in whole system +char tsNodeFqdn[TSDB_FQDN_LEN]; static ttpool_h tsTcpPool; -static void * syncTmrCtrl = NULL; -static void * vgIdHash; +static void * tsSyncTmrCtrl = NULL; +static void * tsVgIdHash; +static int32_t tsSyncRefId = -1; // local functions -static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer); -static void syncRecoverFromMaster(SSyncPeer *pPeer); -static void syncCheckPeerConnection(void *param, void *tmrId); -static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack); -static void syncProcessBrokenLink(void *param); -static int syncProcessPeerMsg(void *param, void *buffer); -static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp); -static void syncRemovePeer(SSyncPeer *pPeer); -static void syncAddArbitrator(SSyncNode *pNode); -static void syncAddNodeRef(SSyncNode *pNode); -static void syncDecNodeRef(SSyncNode *pNode); -static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode); -static void syncMonitorFwdInfos(void *param, void *tmrId); -static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code); -static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle); -static void syncRestartPeer(SSyncPeer *pPeer); +static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer); +static void syncRecoverFromMaster(SSyncPeer *pPeer); +static void syncCheckPeerConnection(void *param, void *tmrId); +static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId); +static void syncProcessBrokenLink(void *param); +static int32_t syncProcessPeerMsg(void *param, void *buffer); +static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp); +static void syncRemovePeer(SSyncPeer *pPeer); +static void syncAddArbitrator(SSyncNode *pNode); +static void syncFreeNode(void *); +static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode); +static void syncMonitorFwdInfos(void *param, void *tmrId); +static void syncMonitorNodeRole(void *param, void *tmrId); +static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code); +static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle); +static void syncRestartPeer(SSyncPeer *pPeer); +static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle, int32_t qtyp); static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo); char* syncRole[] = { @@ -71,13 +73,39 @@ char* syncRole[] = { "master" }; +typedef enum { + SYNC_STATUS_BROADCAST, + SYNC_STATUS_BROADCAST_RSP, + SYNC_STATUS_SETUP_CONN, + SYNC_STATUS_SETUP_CONN_RSP, + SYNC_STATUS_EXCHANGE_DATA, + SYNC_STATUS_EXCHANGE_DATA_RSP, + SYNC_STATUS_CHECK_ROLE, + SYNC_STATUS_CHECK_ROLE_RSP +} ESyncStatusType; + +char *statusType[] = { + "broadcast", + "broadcast-rsp", + "setup-conn", + "setup-conn-rsp", + "exchange-data", + "exchange-data-rsp", + "check-role", + "check-role-rsp" +}; + +uint16_t syncGenTranId() { + return taosRand() & 0XFFFF; +} + int32_t syncInit() { SPoolInfo info; info.numOfThreads = tsSyncTcpThreads; info.serverIp = 0; info.port = tsSyncPort; - info.bufferSize = 640000; + info.bufferSize = SYNC_MAX_SIZE; info.processBrokenLink = syncProcessBrokenLink; info.processIncomingMsg = syncProcessPeerMsg; info.processIncomingConn = syncProcessIncommingConnection; @@ -88,21 +116,27 @@ int32_t syncInit() { return -1; } - syncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC"); - if (syncTmrCtrl == NULL) { + tsSyncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC"); + if (tsSyncTmrCtrl == NULL) { sError("failed to init tmrCtrl"); taosCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; return -1; } - vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); - if (vgIdHash == NULL) { - sError("failed to init vgIdHash"); - taosTmrCleanUp(syncTmrCtrl); + tsVgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); + if (tsVgIdHash == NULL) { + sError("failed to init tsVgIdHash"); + taosTmrCleanUp(tsSyncTmrCtrl); taosCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; - syncTmrCtrl = NULL; + tsSyncTmrCtrl = NULL; + return -1; + } + + tsSyncRefId = taosOpenRef(200, syncFreeNode); + if (tsSyncRefId < 0) { + syncCleanUp(); return -1; } @@ -118,27 +152,30 @@ void syncCleanUp() { tsTcpPool = NULL; } - if (syncTmrCtrl) { - taosTmrCleanUp(syncTmrCtrl); - syncTmrCtrl = NULL; + if (tsSyncTmrCtrl) { + taosTmrCleanUp(tsSyncTmrCtrl); + tsSyncTmrCtrl = NULL; } - if (vgIdHash) { - taosHashCleanup(vgIdHash); - vgIdHash = NULL; + if (tsVgIdHash) { + taosHashCleanup(tsVgIdHash); + tsVgIdHash = NULL; } + taosCloseRef(tsSyncRefId); + tsSyncRefId = -1; + sInfo("sync module is cleaned up"); } -void *syncStart(const SSyncInfo *pInfo) { +int64_t syncStart(const SSyncInfo *pInfo) { const SSyncCfg *pCfg = &pInfo->syncCfg; - SSyncNode *pNode = (SSyncNode *)calloc(sizeof(SSyncNode), 1); + SSyncNode *pNode = calloc(sizeof(SSyncNode), 1); if (pNode == NULL) { sError("no memory to allocate syncNode"); terrno = TAOS_SYSTEM_ERROR(errno); - return NULL; + return -1; } tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path)); @@ -159,21 +196,32 @@ void *syncStart(const SSyncInfo *pInfo) { pNode->quorum = pCfg->quorum; if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica; - for (int i = 0; i < pCfg->replica; ++i) { + pNode->rid = taosAddRef(tsSyncRefId, pNode); + if (pNode->rid < 0) { + syncFreeNode(pNode); + return -1; + } + + for (int32_t i = 0; i < pCfg->replica; ++i) { const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i; pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo); + if (pNode->peerInfo[i] == NULL) { + sError("vgId:%d, node:%d fqdn:%s port:%u is not configured, stop taosd", pNode->vgId, pNodeInfo->nodeId, + pNodeInfo->nodeFqdn, pNodeInfo->nodePort); + syncStop(pNode->rid); + exit(1); + } + if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) { pNode->selfIndex = i; } } - syncAddNodeRef(pNode); - if (pNode->selfIndex < 0) { sInfo("vgId:%d, this node is not configured", pNode->vgId); terrno = TSDB_CODE_SYN_INVALID_CONFIG; - syncStop(pNode); - return NULL; + syncStop(pNode->rid); + return -1; } nodeVersion = pInfo->version; // set the initial version @@ -185,40 +233,49 @@ void *syncStart(const SSyncInfo *pInfo) { if (pNode->pSyncFwds == NULL) { sError("vgId:%d, no memory to allocate syncFwds", pNode->vgId); terrno = TAOS_SYSTEM_ERROR(errno); - syncStop(pNode); - return NULL; + syncStop(pNode->rid); + return -1; } - pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, 300, pNode, syncTmrCtrl); + pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, SYNC_FWD_TIMER, (void *)pNode->rid, tsSyncTmrCtrl); if (pNode->pFwdTimer == NULL) { - sError("vgId:%d, failed to allocate timer", pNode->vgId); - syncStop(pNode); - return NULL; + sError("vgId:%d, failed to allocate fwd timer", pNode->vgId); + syncStop(pNode->rid); + return -1; + } + + pNode->pRoleTimer = taosTmrStart(syncMonitorNodeRole, SYNC_ROLE_TIMER, (void *)pNode->rid, tsSyncTmrCtrl); + if (pNode->pRoleTimer == NULL) { + sError("vgId:%d, failed to allocate role timer", pNode->vgId); + syncStop(pNode->rid); + return -1; } syncAddArbitrator(pNode); - taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *)); + taosHashPut(tsVgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *)); if (pNode->notifyRole) { (*pNode->notifyRole)(pNode->ahandle, nodeRole); } - return pNode; + return pNode->rid; } -void syncStop(void *param) { - SSyncNode *pNode = param; +void syncStop(int64_t rid) { SSyncPeer *pPeer; + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); if (pNode == NULL) return; + sInfo("vgId:%d, cleanup sync", pNode->vgId); pthread_mutex_lock(&(pNode->mutex)); - if (vgIdHash) taosHashRemove(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t)); + if (tsVgIdHash) taosHashRemove(tsVgIdHash, (const char *)&pNode->vgId, sizeof(int32_t)); if (pNode->pFwdTimer) taosTmrStop(pNode->pFwdTimer); + if (pNode->pRoleTimer) taosTmrStop(pNode->pRoleTimer); - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; if (pPeer) syncRemovePeer(pPeer); } @@ -228,14 +285,16 @@ void syncStop(void *param) { pthread_mutex_unlock(&(pNode->mutex)); - syncDecNodeRef(pNode); + taosReleaseRef(tsSyncRefId, rid); + taosRemoveRef(tsSyncRefId, rid); } -int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { - SSyncNode *pNode = param; - int i, j; +int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { + int32_t i, j; + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); if (pNode == NULL) return TSDB_CODE_SYN_INVALID_CONFIG; + sInfo("vgId:%d, reconfig, role:%s replica:%d old:%d", pNode->vgId, syncRole[nodeRole], pNewCfg->replica, pNode->replica); @@ -294,109 +353,61 @@ int32_t syncReconfig(void *param, const SSyncCfg *pNewCfg) { pthread_mutex_unlock(&(pNode->mutex)); - sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, - syncRole[nodeRole]); + sInfo("vgId:%d, %d replicas are configured, quorum:%d", pNode->vgId, pNode->replica, pNode->quorum); syncBroadcastStatus(pNode); + taosReleaseRef(tsSyncRefId, rid); return 0; } -int32_t syncForwardToPeer(void *param, void *data, void *mhandle, int qtype) { - SSyncNode *pNode = param; - SSyncPeer *pPeer; - SSyncHead *pSyncHead; - SWalHead * pWalHead = data; - int fwdLen; - int code = 0; - - if (pNode == NULL) return 0; - - if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pWalHead->version != nodeVersion + 1) { - sError("vgId:%d, received ver:%" PRIu64 ", inconsistent with last ver:%" PRIu64 ", restart connection", pNode->vgId, - pWalHead->version, nodeVersion); - for (int i = 0; i < pNode->replica; ++i) { - pPeer = pNode->peerInfo[i]; - syncRestartConnection(pPeer); - } - return TSDB_CODE_SYN_INVALID_VERSION; - } - - // always update version - nodeVersion = pWalHead->version; - sDebug("vgId:%d, replica:%d nodeRole:%s qtype:%d ver:%" PRIu64, pNode->vgId, pNode->replica, syncRole[nodeRole], - qtype, pWalHead->version); - - if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER) return 0; - - // only pkt from RPC or CQ can be forwarded - if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0; - - // a hacker way to improve the performance - pSyncHead = (SSyncHead *)(((char *)pWalHead) - sizeof(SSyncHead)); - pSyncHead->type = TAOS_SMSG_FORWARD; - pSyncHead->pversion = 0; - pSyncHead->len = sizeof(SWalHead) + pWalHead->len; - fwdLen = pSyncHead->len + sizeof(SSyncHead); // include the WAL and SYNC head - - pthread_mutex_lock(&(pNode->mutex)); - - for (int i = 0; i < pNode->replica; ++i) { - pPeer = pNode->peerInfo[i]; - if (pPeer == NULL || pPeer->peerFd < 0) continue; - if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; - - if (pNode->quorum > 1 && code == 0) { - syncSaveFwdInfo(pNode, pWalHead->version, mhandle); - code = 1; - } +int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype) { + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); + if (pNode == NULL) return 0; - int retLen = write(pPeer->peerFd, pSyncHead, fwdLen); - if (retLen == fwdLen) { - sDebug("%s, forward is sent, ver:%" PRIu64 " contLen:%d", pPeer->id, pWalHead->version, pWalHead->len); - } else { - sError("%s, failed to forward, ver:%" PRIu64 " retLen:%d", pPeer->id, pWalHead->version, retLen); - syncRestartConnection(pPeer); - } - } + int32_t code = syncForwardToPeerImpl(pNode, data, mhandle, qtype); - pthread_mutex_unlock(&(pNode->mutex)); + taosReleaseRef(tsSyncRefId, rid); return code; } -void syncConfirmForward(void *param, uint64_t version, int32_t code) { - SSyncNode *pNode = param; +void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) { + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); if (pNode == NULL) return; - if (pNode->quorum <= 1) return; SSyncPeer *pPeer = pNode->pMaster; - if (pPeer == NULL) return; - - char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0}; + if (pPeer && pNode->quorum > 1) { + char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0}; - SSyncHead *pHead = (SSyncHead *)msg; - pHead->type = TAOS_SMSG_FORWARD_RSP; - pHead->len = sizeof(SFwdRsp); + SSyncHead *pHead = (SSyncHead *)msg; + pHead->type = TAOS_SMSG_FORWARD_RSP; + pHead->len = sizeof(SFwdRsp); - SFwdRsp *pFwdRsp = (SFwdRsp *)(msg + sizeof(SSyncHead)); - pFwdRsp->version = version; - pFwdRsp->code = code; + SFwdRsp *pFwdRsp = (SFwdRsp *)(msg + sizeof(SSyncHead)); + pFwdRsp->version = version; + pFwdRsp->code = code; - int msgLen = sizeof(SSyncHead) + sizeof(SFwdRsp); - int retLen = write(pPeer->peerFd, msg, msgLen); + int32_t msgLen = sizeof(SSyncHead) + sizeof(SFwdRsp); + int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, msgLen); - if (retLen == msgLen) { - sDebug("%s, forward-rsp is sent, ver:%" PRIu64, pPeer->id, version); - } else { - sDebug("%s, failed to send forward ack, restart", pPeer->id); - syncRestartConnection(pPeer); + if (retLen == msgLen) { + sTrace("%s, forward-rsp is sent, code:%x hver:%" PRIu64, pPeer->id, code, version); + } else { + sDebug("%s, failed to send forward ack, restart", pPeer->id); + syncRestartConnection(pPeer); + } } + + taosReleaseRef(tsSyncRefId, rid); } -void syncRecover(void *param) { - SSyncNode *pNode = param; +#if 0 +void syncRecover(int64_t rid) { SSyncPeer *pPeer; + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); + if (pNode == NULL) return; + // to do: add a few lines to check if recover is OK // if take this node to unsync state, the whole system may not work @@ -406,25 +417,31 @@ void syncRecover(void *param) { pthread_mutex_lock(&(pNode->mutex)); - for (int i = 0; i < pNode->replica; ++i) { - pPeer = (SSyncPeer *)pNode->peerInfo[i]; + for (int32_t i = 0; i < pNode->replica; ++i) { + pPeer = pNode->peerInfo[i]; if (pPeer->peerFd >= 0) { syncRestartConnection(pPeer); } } pthread_mutex_unlock(&(pNode->mutex)); + + taosReleaseRef(tsSyncRefId, rid); } +#endif -int syncGetNodesRole(void *param, SNodesRole *pNodesRole) { - SSyncNode *pNode = param; +int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) { + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); + if (pNode == NULL) return -1; pNodesRole->selfIndex = pNode->selfIndex; - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { pNodesRole->nodeId[i] = pNode->peerInfo[i]->nodeId; pNodesRole->role[i] = pNode->peerInfo[i]->role; } + taosReleaseRef(tsSyncRefId, rid); + return 0; } @@ -440,7 +457,7 @@ static void syncAddArbitrator(SSyncNode *pNode) { SNodeInfo nodeInfo; nodeInfo.nodeId = 0; - int ret = taosGetFqdnPortFromEp(tsArbitrator, nodeInfo.nodeFqdn, &nodeInfo.nodePort); + int32_t ret = taosGetFqdnPortFromEp(tsArbitrator, nodeInfo.nodeFqdn, &nodeInfo.nodePort); if (-1 == ret) { nodeInfo.nodePort = tsArbitratorPort; } @@ -457,26 +474,24 @@ static void syncAddArbitrator(SSyncNode *pNode) { pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo); } -static void syncAddNodeRef(SSyncNode *pNode) { atomic_add_fetch_8(&pNode->refCount, 1); } +static void syncFreeNode(void *param) { + SSyncNode *pNode = param; -static void syncDecNodeRef(SSyncNode *pNode) { - if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) { - pthread_mutex_destroy(&pNode->mutex); - taosTFree(pNode->pRecv); - taosTFree(pNode->pSyncFwds); - taosTFree(pNode); - } + pthread_mutex_destroy(&pNode->mutex); + tfree(pNode->pRecv); + tfree(pNode->pSyncFwds); + tfree(pNode); } -void syncAddPeerRef(SSyncPeer *pPeer) { atomic_add_fetch_8(&pPeer->refCount, 1); } +void syncAddPeerRef(SSyncPeer *pPeer) { atomic_add_fetch_32(&pPeer->refCount, 1); } -int syncDecPeerRef(SSyncPeer *pPeer) { - if (atomic_sub_fetch_8(&pPeer->refCount, 1) == 0) { - syncDecNodeRef(pPeer->pSyncNode); +int32_t syncDecPeerRef(SSyncPeer *pPeer) { + if (atomic_sub_fetch_32(&pPeer->refCount, 1) == 0) { + taosReleaseRef(tsSyncRefId, pPeer->pSyncNode->rid); sDebug("%s, resource is freed", pPeer->id); - taosTFree(pPeer->watchFd); - taosTFree(pPeer); + tfree(pPeer->watchFd); + tfree(pPeer); return 0; } @@ -502,7 +517,11 @@ static void syncRemovePeer(SSyncPeer *pPeer) { static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn); - if (ip == -1) return NULL; + if (ip == 0xFFFFFFFF) { + sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno)); + terrno = TSDB_CODE_RPC_FQDN_ERROR; + return NULL; + } SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer)); if (pPeer == NULL) return NULL; @@ -512,7 +531,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { pPeer->ip = ip; pPeer->port = pInfo->nodePort; pPeer->fqdn[sizeof(pPeer->fqdn) - 1] = 0; - snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d peer:%s:%u", pNode->vgId, pPeer->fqdn, pPeer->port); + snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d, peer:%s:%u", pNode->vgId, pPeer->fqdn, pPeer->port); pPeer->peerFd = -1; pPeer->syncFd = -1; @@ -521,30 +540,30 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { pPeer->refCount = 1; sInfo("%s, it is configured", pPeer->id); - int ret = strcmp(pPeer->fqdn, tsNodeFqdn); + int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { int32_t checkMs = 100 + (pNode->vgId * 10) % 100; - if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs; - sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs); - taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer); + if (pNode->vgId > 1) checkMs = tsStatusInterval * 1000 + checkMs; + sDebug("%s, check peer connection after %d ms", pPeer->id, checkMs); + taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, tsSyncTmrCtrl, &pPeer->timer); } - syncAddNodeRef(pNode); + taosAcquireRef(tsSyncRefId, pNode->rid); return pPeer; } void syncBroadcastStatus(SSyncNode *pNode) { SSyncPeer *pPeer; - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { if (i == pNode->selfIndex) continue; pPeer = pNode->peerInfo[i]; - syncSendPeersStatusMsgToPeer(pPeer, 1); + syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_BROADCAST, syncGenTranId()); } } static void syncResetFlowCtrl(SSyncNode *pNode) { - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { pNode->peerInfo[i]->numOfRetrieves = 0; } @@ -555,13 +574,11 @@ static void syncResetFlowCtrl(SSyncNode *pNode) { static void syncChooseMaster(SSyncNode *pNode) { SSyncPeer *pPeer; - int onlineNum = 0; - int index = -1; - int replica = pNode->replica; + int32_t onlineNum = 0; + int32_t index = -1; + int32_t replica = pNode->replica; - sDebug("vgId:%d, choose master", pNode->vgId); - - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; } @@ -570,7 +587,7 @@ static void syncChooseMaster(SSyncNode *pNode) { if (onlineNum == pNode->replica) { // if all peers are online, peer with highest version shall be master index = 0; - for (int i = 1; i < pNode->replica; ++i) { + for (int32_t i = 1; i < pNode->replica; ++i) { if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) { index = i; } @@ -586,7 +603,7 @@ static void syncChooseMaster(SSyncNode *pNode) { if (index < 0 && onlineNum > replica / 2.0) { // over half of nodes are online - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { // slave with highest version shall be master pPeer = pNode->peerInfo[i]; if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) { @@ -601,6 +618,18 @@ static void syncChooseMaster(SSyncNode *pNode) { if (index == pNode->selfIndex) { sInfo("vgId:%d, start to work as master", pNode->vgId); nodeRole = TAOS_SYNC_ROLE_MASTER; + +#if 0 + for (int32_t i = 0; i < pNode->replica; ++i) { + if (i == index) continue; + pPeer = pNode->peerInfo[i]; + if (pPeer->version == nodeVersion) { + pPeer->role = TAOS_SYNC_ROLE_SLAVE; + pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; + sInfo("%s, it shall work as slave", pPeer->id); + } + } +#endif syncResetFlowCtrl(pNode); (*pNode->notifyRole)(pNode->ahandle, nodeRole); } else { @@ -613,12 +642,12 @@ static void syncChooseMaster(SSyncNode *pNode) { } static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { - int onlineNum = 0; - int index = -1; - int replica = pNode->replica; + int32_t onlineNum = 0; + int32_t masterIndex = -1; + int32_t replica = pNode->replica; - for (int i = 0; i < pNode->replica; ++i) { - if (pNode->peerInfo[i]->role != TAOS_SYNC_ROLE_OFFLINE) { + for (int32_t index = 0; index < pNode->replica; ++index) { + if (pNode->peerInfo[index]->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; } } @@ -633,18 +662,17 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { if (onlineNum <= replica * 0.5) { if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) { nodeRole = TAOS_SYNC_ROLE_UNSYNCED; - // pNode->peerInfo[pNode->selfIndex]->role = nodeRole; (*pNode->notifyRole)(pNode->ahandle, nodeRole); - sInfo("vgId:%d, change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica); + sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica); } } else { - for (int i = 0; i < pNode->replica; ++i) { - SSyncPeer *pTemp = pNode->peerInfo[i]; + for (int32_t index = 0; index < pNode->replica; ++index) { + SSyncPeer *pTemp = pNode->peerInfo[index]; if (pTemp->role != TAOS_SYNC_ROLE_MASTER) continue; - if (index < 0) { - index = i; + if (masterIndex < 0) { + masterIndex = index; } else { // multiple masters, it shall not happen - if (i == pNode->selfIndex) { + if (masterIndex == pNode->selfIndex) { sError("%s, peer is master, work as slave instead", pTemp->id); nodeRole = TAOS_SYNC_ROLE_SLAVE; (*pNode->notifyRole)(pNode->ahandle, nodeRole); @@ -653,77 +681,80 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { } } - SSyncPeer *pMaster = (index >= 0) ? pNode->peerInfo[index] : NULL; + SSyncPeer *pMaster = (masterIndex >= 0) ? pNode->peerInfo[masterIndex] : NULL; return pMaster; } -static int syncValidateMaster(SSyncPeer *pPeer) { +static int32_t syncValidateMaster(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - int code = 0; + int32_t code = 0; if (nodeRole == TAOS_SYNC_ROLE_MASTER && nodeVersion < pPeer->version) { - sDebug("%s, slave has higher version, restart all connections!!!", pPeer->id); + sDebug("%s, peer has higher sver:%" PRIu64 ", restart all peer connections", pPeer->id, pPeer->version); nodeRole = TAOS_SYNC_ROLE_UNSYNCED; (*pNode->notifyRole)(pNode->ahandle, nodeRole); code = -1; - for (int i = 0; i < pNode->replica; ++i) { - if (i == pNode->selfIndex) continue; - syncRestartPeer(pNode->peerInfo[i]); + for (int32_t index = 0; index < pNode->replica; ++index) { + if (index == pNode->selfIndex) continue; + syncRestartPeer(pNode->peerInfo[index]); } } return code; } -static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t newRole) { +static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t newPeerRole) { SSyncNode *pNode = pPeer->pSyncNode; - int8_t peerOldRole = pPeer->role; - int8_t selfOldRole = nodeRole; - int8_t i, syncRequired = 0; - - // pNode->peerInfo[pNode->selfIndex]->version = nodeVersion; - pPeer->role = newRole; + int8_t oldPeerRole = pPeer->role; + int8_t oldSelfRole = nodeRole; + int8_t syncRequired = 0; - sDebug("%s, own role:%s, new peer role:%s", pPeer->id, syncRole[nodeRole], syncRole[pPeer->role]); + pPeer->role = newPeerRole; + sDebug("%s, peer role:%s change to %s", pPeer->id, syncRole[oldPeerRole], syncRole[newPeerRole]); SSyncPeer *pMaster = syncCheckMaster(pNode); if (pMaster) { // master is there pNode->pMaster = pMaster; - sDebug("%s, it is the master, ver:%" PRIu64, pMaster->id, pMaster->version); + sDebug("%s, it is the master, sver:%" PRIu64, pMaster->id, pMaster->version); if (syncValidateMaster(pPeer) < 0) return; if (nodeRole == TAOS_SYNC_ROLE_UNSYNCED) { if (nodeVersion < pMaster->version) { + sDebug("%s, is master, sync required, self sver:%" PRIu64, pMaster->id, nodeVersion); syncRequired = 1; } else { - sInfo("%s is master, work as slave, ver:%" PRIu64, pMaster->id, pMaster->version); + sInfo("%s, is master, work as slave, self sver:%" PRIu64, pMaster->id, nodeVersion); nodeRole = TAOS_SYNC_ROLE_SLAVE; (*pNode->notifyRole)(pNode->ahandle, nodeRole); } } else if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pMaster == pPeer) { - // nodeVersion = pMaster->version; + sDebug("%s, is master, continue work as slave, self sver:%" PRIu64, pMaster->id, nodeVersion); } } else { // master not there, if all peer's state and version are consistent, choose the master - int consistent = 0; - if (peersStatus) { - for (i = 0; i < pNode->replica; ++i) { - SSyncPeer *pTemp = pNode->peerInfo[i]; - if (pTemp->role != peersStatus[i].role) break; - if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[i].version)) break; + int32_t consistent = 0; + int32_t index = 0; + if (peersStatus != NULL) { + for (index = 0; index < pNode->replica; ++index) { + SSyncPeer *pTemp = pNode->peerInfo[index]; + if (pTemp->role != peersStatus[index].role) break; + if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[index].version)) break; } - if (i >= pNode->replica) consistent = 1; + if (index >= pNode->replica) consistent = 1; } else { if (pNode->replica == 2) consistent = 1; } if (consistent) { + sDebug("vgId:%d, choose master", pNode->vgId); syncChooseMaster(pNode); + } else { + sDebug("vgId:%d, version inconsistent, cannot choose master", pNode->vgId); } } @@ -731,7 +762,8 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne syncRecoverFromMaster(pMaster); } - if (peerOldRole != newRole || nodeRole != selfOldRole) { + if (oldPeerRole != newPeerRole || nodeRole != oldSelfRole) { + sDebug("vgId:%d, roles changed, broadcast status", pNode->vgId); syncBroadcastStatus(pNode); } @@ -741,15 +773,16 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne } static void syncRestartPeer(SSyncPeer *pPeer) { - sDebug("%s, restart connection", pPeer->id); + sDebug("%s, restart peer connection", pPeer->id); syncClosePeerConn(pPeer); pPeer->sstatus = TAOS_SYNC_STATUS_INIT; - int ret = strcmp(pPeer->fqdn, tsNodeFqdn); + int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) { - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); + sDebug("%s, check peer connection in 1000 ms", pPeer->id); + taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); } } @@ -783,11 +816,11 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { pthread_t thread; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); - int ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer); + int32_t ret = pthread_create(&thread, &thattr, syncRetrieveData, pPeer); pthread_attr_destroy(&thattr); if (ret != 0) { - sError("%s, failed to create sync thread(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to create sync thread since %s", pPeer->id, strerror(errno)); syncDecPeerRef(pPeer); } else { pPeer->sstatus = TAOS_SYNC_STATUS_START; @@ -828,7 +861,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { // Ensure the sync of mnode not interrupted if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); - taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, tsSyncTmrCtrl, &pPeer->timer); return; } @@ -841,9 +874,9 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; - taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); - if (write(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { + if (taosWriteMsg(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { sError("%s, failed to send sync-req to peer", pPeer->id); } else { nodeSStatus = TAOS_SYNC_STATUS_START; @@ -857,12 +890,12 @@ static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; SFwdInfo * pFwdInfo; - sDebug("%s, forward-rsp is received, ver:%" PRIu64, pPeer->id, pFwdRsp->version); + sTrace("%s, forward-rsp is received, code:%x hver:%" PRIu64, pPeer->id, pFwdRsp->code, pFwdRsp->version); SFwdInfo *pFirst = pSyncFwds->fwdInfo + pSyncFwds->first; if (pFirst->version <= pFwdRsp->version && pSyncFwds->fwds > 0) { // find the forwardInfo from first - for (int i = 0; i < pSyncFwds->fwds; ++i) { + for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { pFwdInfo = pSyncFwds->fwdInfo + (i + pSyncFwds->first) % tsMaxFwdInfo; if (pFwdRsp->version == pFwdInfo->version) break; } @@ -876,16 +909,16 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; SWalHead * pHead = (SWalHead *)cont; - sDebug("%s, forward is received, ver:%" PRIu64, pPeer->id, pHead->version); + sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { // nodeVersion = pHead->version; - (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD); + (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD, NULL); } else { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { syncSaveIntoBuffer(pPeer, pHead); } else { - sError("%s, forward discarded, ver:%" PRIu64, pPeer->id, pHead->version); + sError("%s, forward discarded, hver:%" PRIu64, pPeer->id, pHead->version); } } } @@ -894,21 +927,21 @@ static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) { SSyncNode * pNode = pPeer->pSyncNode; SPeersStatus *pPeersStatus = (SPeersStatus *)cont; - sDebug("%s, status msg is received, self:%s ver:%" PRIu64 " peer:%s ver:%" PRIu64 ", ack:%d", pPeer->id, - syncRole[nodeRole], nodeVersion, syncRole[pPeersStatus->role], pPeersStatus->version, pPeersStatus->ack); + sDebug("%s, status msg is received, self:%s sver:%" PRIu64 " peer:%s sver:%" PRIu64 ", ack:%d tranId:%u type:%s", pPeer->id, + syncRole[nodeRole], nodeVersion, syncRole[pPeersStatus->role], pPeersStatus->version, pPeersStatus->ack, pPeersStatus->tranId, statusType[pPeersStatus->type]); pPeer->version = pPeersStatus->version; syncCheckRole(pPeer, pPeersStatus->peersStatus, pPeersStatus->role); if (pPeersStatus->ack) { - syncSendPeersStatusMsgToPeer(pPeer, 0); + syncSendPeersStatusMsgToPeer(pPeer, 0, pPeersStatus->type + 1, pPeersStatus->tranId); } } -static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { +static int32_t syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { if (pPeer->peerFd < 0) return -1; - int hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead)); + int32_t hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead)); if (hlen != sizeof(SSyncHead)) { sDebug("%s, failed to read msg, hlen:%d", pPeer->id, hlen); return -1; @@ -916,11 +949,12 @@ static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { // head.len = htonl(head.len); if (pHead->len < 0) { - sError("%s, invalid pkt length, len:%d", pPeer->id, pHead->len); + sError("%s, invalid pkt length, hlen:%d", pPeer->id, pHead->len); return -1; } - int bytes = taosReadMsg(pPeer->peerFd, cont, pHead->len); + assert(pHead->len <= TSDB_MAX_WAL_SIZE); + int32_t bytes = taosReadMsg(pPeer->peerFd, cont, pHead->len); if (bytes != pHead->len) { sError("%s, failed to read, bytes:%d len:%d", pPeer->id, bytes, pHead->len); return -1; @@ -929,7 +963,7 @@ static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { return 0; } -static int syncProcessPeerMsg(void *param, void *buffer) { +static int32_t syncProcessPeerMsg(void *param, void *buffer) { SSyncPeer *pPeer = param; SSyncHead head; char * cont = buffer; @@ -937,7 +971,7 @@ static int syncProcessPeerMsg(void *param, void *buffer) { SSyncNode *pNode = pPeer->pSyncNode; pthread_mutex_lock(&(pNode->mutex)); - int code = syncReadPeerMsg(pPeer, &head, cont); + int32_t code = syncReadPeerMsg(pPeer, &head, cont); if (code == 0) { if (head.type == TAOS_SMSG_FORWARD) { @@ -958,7 +992,7 @@ static int syncProcessPeerMsg(void *param, void *buffer) { #define statusMsgLen sizeof(SSyncHead) + sizeof(SPeersStatus) + sizeof(SPeerStatus) * TAOS_SYNC_MAX_REPLICA -static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack) { +static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) { SSyncNode *pNode = pPeer->pSyncNode; char msg[statusMsgLen] = {0}; @@ -973,22 +1007,22 @@ static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack) { pPeersStatus->version = nodeVersion; pPeersStatus->role = nodeRole; pPeersStatus->ack = ack; + pPeersStatus->type = type; + pPeersStatus->tranId = tranId; - for (int i = 0; i < pNode->replica; ++i) { + for (int32_t i = 0; i < pNode->replica; ++i) { pPeersStatus->peersStatus[i].role = pNode->peerInfo[i]->role; pPeersStatus->peersStatus[i].version = pNode->peerInfo[i]->version; } - int retLen = write(pPeer->peerFd, msg, statusMsgLen); + int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, statusMsgLen); if (retLen == statusMsgLen) { - sDebug("%s, status msg is sent, self:%s ver:%" PRIu64 ", ack:%d", pPeer->id, syncRole[pPeersStatus->role], - pPeersStatus->version, pPeersStatus->ack); + sDebug("%s, status msg is sent, self:%s sver:%" PRIu64 ", ack:%d tranId:%u type:%s", pPeer->id, syncRole[pPeersStatus->role], + pPeersStatus->version, pPeersStatus->ack, pPeersStatus->tranId, statusType[pPeersStatus->type]); } else { sDebug("%s, failed to send status msg, restart", pPeer->id); syncRestartConnection(pPeer); } - - return; } static void syncSetupPeerConnection(SSyncPeer *pPeer) { @@ -997,14 +1031,14 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { taosTmrStopA(&pPeer->timer); if (pPeer->peerFd >= 0) { sDebug("%s, send role version to peer", pPeer->id); - syncSendPeersStatusMsgToPeer(pPeer, 1); + syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_SETUP_CONN, syncGenTranId()); return; } - int connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); + int32_t connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); if (connFd < 0) { - sDebug("%s, failed to open tcp socket(%s)", pPeer->id, strerror(errno)); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); + sDebug("%s, failed to open tcp socket since %s", pPeer->id, strerror(errno)); + taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); return; } @@ -1016,16 +1050,16 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { firstPkt.port = tsSyncPort; firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId - if (write(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { + if (taosWriteMsg(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { sDebug("%s, connection to peer server is setup", pPeer->id); pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); syncAddPeerRef(pPeer); } else { - sDebug("try later"); - close(connFd); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, syncTmrCtrl, &pPeer->timer); + sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno)); + taosClose(connFd); + taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); } } @@ -1050,7 +1084,7 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); syncAddPeerRef(pPeer); - int ret = pthread_create(&(thread), &thattr, (void *)syncRestoreData, pPeer); + int32_t ret = pthread_create(&(thread), &thattr, (void *)syncRestoreData, pPeer); pthread_attr_destroy(&thattr); if (ret < 0) { @@ -1062,22 +1096,22 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { } } -static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { - char ipstr[24]; - int i; +static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { + char ipstr[24]; + int32_t i; tinet_ntoa(ipstr, sourceIp); sDebug("peer TCP connection from ip:%s", ipstr); SFirstPkt firstPkt; if (taosReadMsg(connFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { - sError("failed to read peer first pkt from ip:%s(%s)", ipstr, strerror(errno)); + sError("failed to read peer first pkt from ip:%s since %s", ipstr, strerror(errno)); taosCloseSocket(connFd); return; } int32_t vgId = firstPkt.syncHead.vgId; - SSyncNode **ppNode = (SSyncNode **)taosHashGet(vgIdHash, (const char *)&vgId, sizeof(int32_t)); + SSyncNode **ppNode = (SSyncNode **)taosHashGet(tsVgIdHash, (const char *)&vgId, sizeof(int32_t)); if (ppNode == NULL || *ppNode == NULL) { sError("vgId:%d, vgId could not be found", vgId); taosCloseSocket(connFd); @@ -1110,7 +1144,7 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) { pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); syncAddPeerRef(pPeer); sDebug("%s, ready to exchange data", pPeer->id); - syncSendPeersStatusMsgToPeer(pPeer, 1); + syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_EXCHANGE_DATA, syncGenTranId()); } } @@ -1122,10 +1156,10 @@ static void syncProcessBrokenLink(void *param) { SSyncPeer *pPeer = param; SSyncNode *pNode = pPeer->pSyncNode; - syncAddNodeRef(pNode); + if (taosAcquireRef(tsSyncRefId, pNode->rid) == NULL) return; pthread_mutex_lock(&(pNode->mutex)); - sDebug("%s, TCP link is broken(%s)", pPeer->id, strerror(errno)); + sDebug("%s, TCP link is broken since %s", pPeer->id, strerror(errno)); pPeer->peerFd = -1; if (syncDecPeerRef(pPeer) != 0) { @@ -1133,7 +1167,7 @@ static void syncProcessBrokenLink(void *param) { } pthread_mutex_unlock(&(pNode->mutex)); - syncDecNodeRef(pNode); + taosReleaseRef(tsSyncRefId, pNode->rid); } static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { @@ -1150,35 +1184,34 @@ static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { } SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last; + memset(pFwdInfo, 0, sizeof(SFwdInfo)); pFwdInfo->version = version; pFwdInfo->mhandle = mhandle; - pFwdInfo->acks = 0; - pFwdInfo->confirmed = 0; pFwdInfo->time = time; pSyncFwds->fwds++; - sDebug("vgId:%d, fwd info is saved, ver:%" PRIu64 " fwds:%d ", pNode->vgId, version, pSyncFwds->fwds); + sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, version, pSyncFwds->fwds); } static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; - int fwds = pSyncFwds->fwds; - for (int i = 0; i < fwds; ++i) { + int32_t fwds = pSyncFwds->fwds; + for (int32_t i = 0; i < fwds; ++i) { SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->first; if (pFwdInfo->confirmed == 0) break; pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; pSyncFwds->fwds--; if (pSyncFwds->fwds == 0) pSyncFwds->first = pSyncFwds->last; - // sDebug("vgId:%d, fwd info is removed, ver:%d, fwds:%d", + // sDebug("vgId:%d, fwd info is removed, hver:%d, fwds:%d", // pNode->vgId, pFwdInfo->version, pSyncFwds->fwds); memset(pFwdInfo, 0, sizeof(SFwdInfo)); } } static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code) { - int confirm = 0; + int32_t confirm = 0; if (pFwdInfo->code == 0) pFwdInfo->code = code; if (code == 0) { @@ -1194,30 +1227,123 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code } if (confirm && pFwdInfo->confirmed == 0) { - sDebug("vgId:%d, forward is confirmed, ver:%" PRIu64 " code:%x", pNode->vgId, pFwdInfo->version, pFwdInfo->code); + sTrace("vgId:%d, forward is confirmed, hver:%" PRIu64 " code:%x", pNode->vgId, pFwdInfo->version, pFwdInfo->code); (*pNode->confirmForward)(pNode->ahandle, pFwdInfo->mhandle, pFwdInfo->code); pFwdInfo->confirmed = 1; } } +static void syncMonitorNodeRole(void *param, void *tmrId) { + int64_t rid = (int64_t)param; + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); + if (pNode == NULL) return; + + for (int32_t index = 0; index < pNode->replica; index++) { + if (index == pNode->selfIndex) continue; + + SSyncPeer *pPeer = pNode->peerInfo[index]; + if (pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue; + if (pPeer->sstatus > TAOS_SYNC_STATUS_INIT || nodeSStatus > TAOS_SYNC_STATUS_INIT) continue; + + syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId()); + } + + pNode->pRoleTimer = taosTmrStart(syncMonitorNodeRole, SYNC_ROLE_TIMER, (void *)pNode->rid, tsSyncTmrCtrl); + taosReleaseRef(tsSyncRefId, rid); +} + static void syncMonitorFwdInfos(void *param, void *tmrId) { - SSyncNode *pNode = param; + int64_t rid = (int64_t)param; + SSyncNode *pNode = taosAcquireRef(tsSyncRefId, rid); + if (pNode == NULL) return; + SSyncFwds *pSyncFwds = pNode->pSyncFwds; - if (pSyncFwds == NULL) return; - uint64_t time = taosGetTimestampMs(); + if (pSyncFwds) { + int64_t time = taosGetTimestampMs(); - if (pSyncFwds->fwds > 0) { - pthread_mutex_lock(&(pNode->mutex)); - for (int i = 0; i < pSyncFwds->fwds; ++i) { - SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo; - if (time - pFwdInfo->time < 2000) break; - syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL); + if (pSyncFwds->fwds > 0) { + pthread_mutex_lock(&(pNode->mutex)); + for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { + SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo; + if (ABS(time - pFwdInfo->time) < 2000) break; + + sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId, + pFwdInfo->version, time, pFwdInfo->time); + syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL); + } + + syncRemoveConfirmedFwdInfo(pNode); + pthread_mutex_unlock(&(pNode->mutex)); } - syncRemoveConfirmedFwdInfo(pNode); - pthread_mutex_unlock(&(pNode->mutex)); + pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, SYNC_FWD_TIMER, (void *)pNode->rid, tsSyncTmrCtrl); + } + + taosReleaseRef(tsSyncRefId, rid); +} + +static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle, int32_t qtype) { + SSyncPeer *pPeer; + SSyncHead *pSyncHead; + SWalHead * pWalHead = data; + int32_t fwdLen; + int32_t code = 0; + + + if (pWalHead->version > nodeVersion + 1) { + sError("vgId:%d, hver:%" PRIu64 ", inconsistent with sver:%" PRIu64, pNode->vgId, pWalHead->version, nodeVersion); + if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { + sInfo("vgId:%d, restart connection", pNode->vgId); + for (int32_t i = 0; i < pNode->replica; ++i) { + pPeer = pNode->peerInfo[i]; + syncRestartConnection(pPeer); + } + } + + return TSDB_CODE_SYN_INVALID_VERSION; + } + + // always update version + nodeVersion = pWalHead->version; + sTrace("vgId:%d, forward to peer, replica:%d role:%s qtype:%s hver:%" PRIu64, pNode->vgId, pNode->replica, + syncRole[nodeRole], qtypeStr[qtype], pWalHead->version); + + if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER) return 0; + + // only pkt from RPC or CQ can be forwarded + if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0; + + // a hacker way to improve the performance + pSyncHead = (SSyncHead *)(((char *)pWalHead) - sizeof(SSyncHead)); + pSyncHead->type = TAOS_SMSG_FORWARD; + pSyncHead->pversion = 0; + pSyncHead->len = sizeof(SWalHead) + pWalHead->len; + fwdLen = pSyncHead->len + sizeof(SSyncHead); // include the WAL and SYNC head + + pthread_mutex_lock(&(pNode->mutex)); + + for (int32_t i = 0; i < pNode->replica; ++i) { + pPeer = pNode->peerInfo[i]; + if (pPeer == NULL || pPeer->peerFd < 0) continue; + if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; + + if (pNode->quorum > 1 && code == 0) { + syncSaveFwdInfo(pNode, pWalHead->version, mhandle); + code = 1; + } + + int32_t retLen = write(pPeer->peerFd, pSyncHead, fwdLen); + if (retLen == fwdLen) { + sTrace("%s, forward is sent, hver:%" PRIu64 " contLen:%d", pPeer->id, pWalHead->version, pWalHead->len); + } else { + sError("%s, failed to forward, hver:%" PRIu64 " retLen:%d", pPeer->id, pWalHead->version, retLen); + syncRestartConnection(pPeer); + } } - pNode->pFwdTimer = taosTmrStart(syncMonitorFwdInfos, 300, pNode, syncTmrCtrl); + pthread_mutex_unlock(&(pNode->mutex)); + + return code; } + diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index ebb6c3a0a9edff5acfc5f2ce7da8b58f03d8ab4a..cc2315fb15c6845a763a95928f5e66b13bbbca40 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -13,7 +13,9 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" +#include "taoserror.h" #include "tlog.h" #include "tutil.h" #include "ttimer.h" @@ -48,20 +50,21 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex } } -static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { +static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { SSyncNode *pNode = pPeer->pSyncNode; SFileInfo minfo; memset(&minfo, 0, sizeof(minfo)); /* = {0}; */ // master file info SFileInfo sinfo; memset(&sinfo, 0, sizeof(sinfo)); /* = {0}; */ // slave file info SFileAck fileAck; - int code = -1; + int32_t code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; uint32_t pindex = 0; // index in last restore + bool fileChanged = false; *fversion = 0; sinfo.index = 0; while (1) { // read file info - int ret = taosReadMsg(pPeer->syncFd, &(minfo), sizeof(minfo)); + int32_t ret = taosReadMsg(pPeer->syncFd, &(minfo), sizeof(minfo)); if (ret < 0) break; // if no more file from master, break; @@ -103,7 +106,7 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { minfo.name[sizeof(minfo.name) - 1] = 0; snprintf(name, sizeof(name), "%s/%s", pNode->path, minfo.name); - int dfd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + int32_t dfd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); if (dfd < 0) { sError("%s, failed to open file:%s", pPeer->id, name); break; @@ -114,30 +117,32 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { close(dfd); if (ret < 0) break; + fileChanged = true; sDebug("%s, %s is received, size:%" PRId64, pPeer->id, minfo.name, minfo.size); } - if (code == 0 && (minfo.fversion != sinfo.fversion)) { + if (code == 0 && fileChanged) { // data file is changed, code shall be set to 1 *fversion = minfo.fversion; code = 1; } if (code < 0) { - sError("%s, failed to restore %s(%s)", pPeer->id, name, strerror(errno)); + sError("%s, failed to restore %s since %s", pPeer->id, name, strerror(errno)); } return code; } -static int syncRestoreWal(SSyncPeer *pPeer) { +static int32_t syncRestoreWal(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - int ret, code = -1; + int32_t ret, code = -1; - void *buffer = calloc(1024000, 1); // size for one record + void *buffer = calloc(SYNC_MAX_SIZE, 1); // size for one record if (buffer == NULL) return -1; SWalHead *pHead = (SWalHead *)buffer; + uint64_t lastVer = 0; while (1) { ret = taosReadMsg(pPeer->syncFd, pHead, sizeof(SWalHead)); @@ -151,12 +156,19 @@ static int syncRestoreWal(SSyncPeer *pPeer) { ret = taosReadMsg(pPeer->syncFd, pHead->cont, pHead->len); if (ret < 0) break; - sDebug("%s, restore a record, ver:%" PRIu64, pPeer->id, pHead->version); - (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_WAL); + sTrace("%s, restore a record, qtype:wal len:%d hver:%" PRIu64, pPeer->id, pHead->len, pHead->version); + + if (lastVer == pHead->version) { + sError("%s, failed to restore record, same hver:%" PRIu64 ", wal sync failed" PRIu64, pPeer->id, lastVer); + break; + } + lastVer = pHead->version; + + (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_WAL, NULL); } if (code < 0) { - sError("%s, failed to restore wal(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to restore wal from syncFd:%d since %s", pPeer->id, pPeer->syncFd, strerror(errno)); } free(buffer); @@ -167,16 +179,16 @@ static char *syncProcessOneBufferedFwd(SSyncPeer *pPeer, char *offset) { SSyncNode *pNode = pPeer->pSyncNode; SWalHead * pHead = (SWalHead *)offset; - (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD); + (*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD, NULL); offset += pHead->len + sizeof(SWalHead); return offset; } -static int syncProcessBufferedFwd(SSyncPeer *pPeer) { +static int32_t syncProcessBufferedFwd(SSyncPeer *pPeer) { SSyncNode * pNode = pPeer->pSyncNode; SRecvBuffer *pRecv = pNode->pRecv; - int forwards = 0; + int32_t forwards = 0; sDebug("%s, number of buffered forwards:%d", pPeer->id, pRecv->forwards); @@ -201,18 +213,18 @@ static int syncProcessBufferedFwd(SSyncPeer *pPeer) { return pRecv->code; } -int syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) { +int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) { SSyncNode * pNode = pPeer->pSyncNode; SRecvBuffer *pRecv = pNode->pRecv; if (pRecv == NULL) return -1; - int len = pHead->len + sizeof(SWalHead); + int32_t len = pHead->len + sizeof(SWalHead); if (pRecv->bufferSize - (pRecv->offset - pRecv->buffer) >= len) { memcpy(pRecv->offset, pHead, len); pRecv->offset += len; pRecv->forwards++; - sDebug("%s, fwd is saved into queue, ver:%" PRIu64 " fwds:%d", pPeer->id, pHead->version, pRecv->forwards); + sTrace("%s, fwd is saved into queue, hver:%" PRIu64 " fwds:%d", pPeer->id, pHead->version, pRecv->forwards); } else { sError("%s, buffer size:%d is too small", pPeer->id, pRecv->bufferSize); pRecv->code = -1; // set error code @@ -223,19 +235,19 @@ int syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) { static void syncCloseRecvBuffer(SSyncNode *pNode) { if (pNode->pRecv) { - taosTFree(pNode->pRecv->buffer); + tfree(pNode->pRecv->buffer); } - taosTFree(pNode->pRecv); + tfree(pNode->pRecv); } -static int syncOpenRecvBuffer(SSyncNode *pNode) { +static int32_t syncOpenRecvBuffer(SSyncNode *pNode) { syncCloseRecvBuffer(pNode); SRecvBuffer *pRecv = calloc(sizeof(SRecvBuffer), 1); if (pRecv == NULL) return -1; - pRecv->bufferSize = 5000000; + pRecv->bufferSize = SYNC_RECV_BUFFER_SIZE; pRecv->buffer = malloc(pRecv->bufferSize); if (pRecv->buffer == NULL) { free(pRecv); @@ -250,13 +262,13 @@ static int syncOpenRecvBuffer(SSyncNode *pNode) { return 0; } -static int syncRestoreDataStepByStep(SSyncPeer *pPeer) { +static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; nodeSStatus = TAOS_SYNC_STATUS_FILE; uint64_t fversion = 0; sDebug("%s, start to restore file", pPeer->id); - int code = syncRestoreFile(pPeer, &fversion); + int32_t code = syncRestoreFile(pPeer, &fversion); if (code < 0) { sError("%s, failed to restore file", pPeer->id); return -1; @@ -289,7 +301,7 @@ static int syncRestoreDataStepByStep(SSyncPeer *pPeer) { } void *syncRestoreData(void *param) { - SSyncPeer *pPeer = (SSyncPeer *)param; + SSyncPeer *pPeer = param; SSyncNode *pNode = pPeer->pSyncNode; taosBlockSIGPIPE(); @@ -298,7 +310,8 @@ void *syncRestoreData(void *param) { (*pNode->notifyRole)(pNode->ahandle, TAOS_SYNC_ROLE_SYNCING); if (syncOpenRecvBuffer(pNode) < 0) { - sError("%s, failed to allocate recv buffer", pPeer->id); + sError("%s, failed to allocate recv buffer, restart connection", pPeer->id); + syncRestartConnection(pPeer); } else { if (syncRestoreDataStepByStep(pPeer) == 0) { sInfo("%s, it is synced successfully", pPeer->id); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 60625d75eccdbe6bbb29f97b31ecd8e9855480a7..82f40854e8fc85632274f4566450963c03e40680 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -13,10 +13,8 @@ * along with this program. If not, see . */ -#include -#include +#define _DEFAULT_SOURCE #include -#include #include "os.h" #include "tlog.h" #include "tutil.h" @@ -27,38 +25,38 @@ #include "tsync.h" #include "syncInt.h" -static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) { +static int32_t syncAddIntoWatchList(SSyncPeer *pPeer, char *name) { sDebug("%s, start to monitor:%s", pPeer->id, name); if (pPeer->notifyFd <= 0) { pPeer->watchNum = 0; pPeer->notifyFd = inotify_init1(IN_NONBLOCK); if (pPeer->notifyFd < 0) { - sError("%s, failed to init inotify(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to init inotify since %s", pPeer->id, strerror(errno)); return -1; } - if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int) * tsMaxWatchFiles); + if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int32_t) * tsMaxWatchFiles); if (pPeer->watchFd == NULL) { sError("%s, failed to allocate watchFd", pPeer->id); return -1; } - memset(pPeer->watchFd, -1, sizeof(int) * tsMaxWatchFiles); + memset(pPeer->watchFd, -1, sizeof(int32_t) * tsMaxWatchFiles); } - int *wd = pPeer->watchFd + pPeer->watchNum; + int32_t *wd = pPeer->watchFd + pPeer->watchNum; if (*wd >= 0) { if (inotify_rm_watch(pPeer->notifyFd, *wd) < 0) { - sError("%s, failed to remove wd:%d(%s)", pPeer->id, *wd, strerror(errno)); + sError("%s, failed to remove wd:%d since %s", pPeer->id, *wd, strerror(errno)); return -1; } } *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY | IN_DELETE); if (*wd == -1) { - sError("%s, failed to add %s(%s)", pPeer->id, name, strerror(errno)); + sError("%s, failed to add %s since %s", pPeer->id, name, strerror(errno)); return -1; } else { sDebug("%s, monitor %s, wd:%d watchNum:%d", pPeer->id, name, *wd, pPeer->watchNum); @@ -69,17 +67,17 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) { return 0; } -static int syncAreFilesModified(SSyncPeer *pPeer) { +static int32_t syncAreFilesModified(SSyncPeer *pPeer) { if (pPeer->notifyFd <= 0) return 0; - char buf[2048]; - int len = read(pPeer->notifyFd, buf, sizeof(buf)); + char buf[2048]; + int32_t len = read(pPeer->notifyFd, buf, sizeof(buf)); if (len < 0 && errno != EAGAIN) { - sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to read notify FD since %s", pPeer->id, strerror(errno)); return -1; } - int code = 0; + int32_t code = 0; if (len > 0) { const struct inotify_event *event; char *ptr; @@ -97,11 +95,11 @@ static int syncAreFilesModified(SSyncPeer *pPeer) { return code; } -static int syncRetrieveFile(SSyncPeer *pPeer) { +static int32_t syncRetrieveFile(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; SFileInfo fileInfo; SFileAck fileAck; - int code = -1; + int32_t code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; memset(&fileInfo, 0, sizeof(fileInfo)); @@ -146,10 +144,10 @@ static int syncRetrieveFile(SSyncPeer *pPeer) { } // send the file to peer - int sfd = open(name, O_RDONLY); + int32_t sfd = open(name, O_RDONLY); if (sfd < 0) break; - ret = taosTSendFile(pPeer->syncFd, sfd, NULL, fileInfo.size); + ret = taosSendFile(pPeer->syncFd, sfd, NULL, fileInfo.size); close(sfd); if (ret < 0) break; @@ -161,7 +159,7 @@ static int syncRetrieveFile(SSyncPeer *pPeer) { } if (code < 0) { - sError("%s, failed to retrieve file(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to retrieve file since %s", pPeer->id, strerror(errno)); } return code; @@ -169,8 +167,8 @@ static int syncRetrieveFile(SSyncPeer *pPeer) { /* if only a partial record is read out, set the IN_MODIFY flag in event, so upper layer will reload the file to get a complete record */ -static int syncReadOneWalRecord(int sfd, SWalHead *pHead, uint32_t *pEvent) { - int ret; +static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead, uint32_t *pEvent) { + int32_t ret; ret = read(sfd, pHead, sizeof(SWalHead)); if (ret < 0) return -1; @@ -182,6 +180,8 @@ static int syncReadOneWalRecord(int sfd, SWalHead *pHead, uint32_t *pEvent) { return 0; } + assert(pHead->len <= TSDB_MAX_WAL_SIZE); + ret = read(sfd, pHead->cont, pHead->len); if (ret < 0) return -1; @@ -194,27 +194,27 @@ static int syncReadOneWalRecord(int sfd, SWalHead *pHead, uint32_t *pEvent) { return sizeof(SWalHead) + pHead->len; } -static int syncMonitorLastWal(SSyncPeer *pPeer, char *name) { +static int32_t syncMonitorLastWal(SSyncPeer *pPeer, char *name) { pPeer->watchNum = 0; taosClose(pPeer->notifyFd); pPeer->notifyFd = inotify_init1(IN_NONBLOCK); if (pPeer->notifyFd < 0) { - sError("%s, failed to init inotify(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to init inotify since %s", pPeer->id, strerror(errno)); return -1; } - if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int) * tsMaxWatchFiles); + if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int32_t) * tsMaxWatchFiles); if (pPeer->watchFd == NULL) { sError("%s, failed to allocate watchFd", pPeer->id); return -1; } - memset(pPeer->watchFd, -1, sizeof(int) * tsMaxWatchFiles); - int *wd = pPeer->watchFd; + memset(pPeer->watchFd, -1, sizeof(int32_t) * tsMaxWatchFiles); + int32_t *wd = pPeer->watchFd; *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY | IN_CLOSE_WRITE); if (*wd == -1) { - sError("%s, failed to watch last wal(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to watch last wal since %s", pPeer->id, strerror(errno)); return -1; } @@ -222,10 +222,10 @@ static int syncMonitorLastWal(SSyncPeer *pPeer, char *name) { } static int32_t syncCheckLastWalChanges(SSyncPeer *pPeer, uint32_t *pEvent) { - char buf[2048]; - int len = read(pPeer->notifyFd, buf, sizeof(buf)); + char buf[2048]; + int32_t len = read(pPeer->notifyFd, buf, sizeof(buf)); if (len < 0 && errno != EAGAIN) { - sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to read notify FD since %s", pPeer->id, strerror(errno)); return -1; } @@ -243,11 +243,11 @@ static int32_t syncCheckLastWalChanges(SSyncPeer *pPeer, uint32_t *pEvent) { return 0; } -static int syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset, uint32_t *pEvent) { - SWalHead *pHead = malloc(640000); - int code = -1; +static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset, uint32_t *pEvent) { + SWalHead *pHead = malloc(SYNC_MAX_SIZE); + int32_t code = -1; int32_t bytes = 0; - int sfd; + int32_t sfd; sfd = open(name, O_RDONLY); if (sfd < 0) { @@ -256,18 +256,18 @@ static int syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, } (void)lseek(sfd, offset, SEEK_SET); - sDebug("%s, retrieve last wal, offset:%" PRId64 " fversion:%" PRIu64, pPeer->id, offset, fversion); + sDebug("%s, retrieve last wal, offset:%" PRId64 " fver:%" PRIu64, pPeer->id, offset, fversion); while (1) { - int wsize = syncReadOneWalRecord(sfd, pHead, pEvent); + int32_t wsize = syncReadOneWalRecord(sfd, pHead, pEvent); if (wsize < 0) break; if (wsize == 0) { code = 0; break; } - sDebug("%s, last wal is forwarded, ver:%" PRIu64, pPeer->id, pHead->version); - int ret = taosWriteMsg(pPeer->syncFd, pHead, wsize); + sTrace("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version); + int32_t ret = taosWriteMsg(pPeer->syncFd, pHead, wsize); if (ret != wsize) break; pPeer->sversion = pHead->version; @@ -287,9 +287,9 @@ static int syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, return -1; } -static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) { +static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { SSyncNode *pNode = pPeer->pSyncNode; - int code = -1; + int32_t code = -1; char fname[TSDB_FILENAME_LEN * 2]; // full path to wal file if (syncAreFilesModified(pPeer) != 0) return -1; @@ -325,7 +325,7 @@ static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) { // if all data up to fversion is read out, it is over if (pPeer->sversion >= fversion && fversion > 0) { code = 0; - sDebug("%s, data up to fversion:%" PRId64 " has been read out, bytes:%d", pPeer->id, fversion, bytes); + sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%d", pPeer->id, fversion, bytes); break; } @@ -370,14 +370,14 @@ static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) { return code; } -static int syncRetrieveWal(SSyncPeer *pPeer) { +static int32_t syncRetrieveWal(SSyncPeer *pPeer) { SSyncNode * pNode = pPeer->pSyncNode; char fname[TSDB_FILENAME_LEN * 3]; char wname[TSDB_FILENAME_LEN * 2]; int32_t size; struct stat fstat; - int code = -1; - uint32_t index = 0; + int32_t code = -1; + int64_t index = 0; while (1) { // retrieve wal info @@ -403,10 +403,10 @@ static int syncRetrieveWal(SSyncPeer *pPeer) { size = fstat.st_size; sDebug("%s, retrieve wal:%s size:%d", pPeer->id, fname, size); - int sfd = open(fname, O_RDONLY); + int32_t sfd = open(fname, O_RDONLY); if (sfd < 0) break; - code = taosTSendFile(pPeer->syncFd, sfd, NULL, size); + code = taosSendFile(pPeer->syncFd, sfd, NULL, size); close(sfd); if (code < 0) break; @@ -416,19 +416,19 @@ static int syncRetrieveWal(SSyncPeer *pPeer) { } if (code == 0) { - sDebug("%s, wal retrieve is finished", pPeer->id); + sInfo("%s, wal retrieve is finished", pPeer->id); pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; SWalHead walHead; memset(&walHead, 0, sizeof(walHead)); code = taosWriteMsg(pPeer->syncFd, &walHead, sizeof(walHead)); } else { - sError("%s, failed to send wal(%s)", pPeer->id, strerror(errno)); + sError("%s, failed to send wal since %s", pPeer->id, strerror(errno)); } return code; } -static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) { +static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; SFirstPkt firstPkt; @@ -445,7 +445,7 @@ static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) { pPeer->sversion = 0; pPeer->sstatus = TAOS_SYNC_STATUS_FILE; - sDebug("%s, start to retrieve file", pPeer->id); + sInfo("%s, start to retrieve file", pPeer->id); if (syncRetrieveFile(pPeer) < 0) { sError("%s, failed to retrieve file", pPeer->id); return -1; @@ -454,7 +454,7 @@ static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) { // if no files are synced, there must be wal to sync, sversion must be larger than one if (pPeer->sversion == 0) pPeer->sversion = 1; - sDebug("%s, start to retrieve wal", pPeer->id); + sInfo("%s, start to retrieve wal", pPeer->id); if (syncRetrieveWal(pPeer) < 0) { sError("%s, failed to retrieve wal", pPeer->id); return -1; @@ -476,7 +476,7 @@ void *syncRetrieveData(void *param) { sInfo("%s, sync tcp is setup", pPeer->id); if (syncRetrieveDataStepByStep(pPeer) == 0) { - sDebug("%s, sync retrieve process is successful", pPeer->id); + sInfo("%s, sync retrieve process is successful", pPeer->id); } else { sError("%s, failed to retrieve data, restart connection", pPeer->id); syncRestartConnection(pPeer); diff --git a/src/sync/src/taosTcpPool.c b/src/sync/src/taosTcpPool.c index 6a210a136ffe67b2e1394d26bac4cb5083452c8c..d1d9815f4af2e787f251dd15e41c2d8eccfbebe9 100644 --- a/src/sync/src/taosTcpPool.c +++ b/src/sync/src/taosTcpPool.c @@ -13,18 +13,22 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "tulog.h" #include "tutil.h" #include "tsocket.h" #include "taoserror.h" #include "taosTcpPool.h" +#include "twal.h" +#include "tsync.h" +#include "syncInt.h" typedef struct SThreadObj { pthread_t thread; bool stop; - int pollFd; - int numOfFds; + int32_t pollFd; + int32_t numOfFds; struct SPoolObj *pPool; } SThreadObj; @@ -32,15 +36,15 @@ typedef struct SPoolObj { SPoolInfo info; SThreadObj **pThread; pthread_t thread; - int nextId; - int acceptFd; // FD for accept new connection + int32_t nextId; + int32_t acceptFd; // FD for accept new connection } SPoolObj; typedef struct { SThreadObj *pThread; - void *ahandle; - int fd; - int closedByApp; + void * ahandle; + int32_t fd; + int32_t closedByApp; } SConnObj; static void *taosAcceptPeerTcpConnection(void *argv); @@ -53,66 +57,66 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) { SPoolObj *pPool = calloc(sizeof(SPoolObj), 1); if (pPool == NULL) { - uError("TCP server, no enough memory"); + sError("failed to alloc pool for TCP server since no enough memory"); return NULL; } pPool->info = *pInfo; - pPool->pThread = (SThreadObj **)calloc(sizeof(SThreadObj *), pInfo->numOfThreads); + pPool->pThread = calloc(sizeof(SThreadObj *), pInfo->numOfThreads); if (pPool->pThread == NULL) { - uError("TCP server, no enough memory"); - free(pPool); + sError("failed to alloc pool thread for TCP server since no enough memory"); + tfree(pPool); return NULL; } pPool->acceptFd = taosOpenTcpServerSocket(pInfo->serverIp, pInfo->port); if (pPool->acceptFd < 0) { - free(pPool->pThread); - free(pPool); - uError("failed to create TCP server socket, port:%d (%s)", pInfo->port, strerror(errno)); + tfree(pPool->pThread); + tfree(pPool); + sError("failed to create TCP server socket, port:%d (%s)", pInfo->port, strerror(errno)); return NULL; } pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); if (pthread_create(&(pPool->thread), &thattr, (void *)taosAcceptPeerTcpConnection, pPool) != 0) { - uError("TCP server, failed to create accept thread, reason:%s", strerror(errno)); + sError("failed to create accept thread for TCP server since %s", strerror(errno)); close(pPool->acceptFd); - free(pPool->pThread); - free(pPool); + tfree(pPool->pThread); + tfree(pPool); return NULL; } pthread_attr_destroy(&thattr); - uDebug("%p TCP pool is created", pPool); + sDebug("%p TCP pool is created", pPool); return pPool; } void taosCloseTcpThreadPool(void *param) { - SPoolObj * pPool = (SPoolObj *)param; + SPoolObj * pPool = param; SThreadObj *pThread; shutdown(pPool->acceptFd, SHUT_RD); pthread_join(pPool->thread, NULL); - for (int i = 0; i < pPool->info.numOfThreads; ++i) { + for (int32_t i = 0; i < pPool->info.numOfThreads; ++i) { pThread = pPool->pThread[i]; if (pThread) taosStopPoolThread(pThread); } - uDebug("%p TCP pool is closed", pPool); + sDebug("%p TCP pool is closed", pPool); - taosTFree(pPool->pThread); - free(pPool); + tfree(pPool->pThread); + tfree(pPool); } -void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) { +void *taosAllocateTcpConn(void *param, void *pPeer, int32_t connFd) { struct epoll_event event; - SPoolObj *pPool = (SPoolObj *)param; + SPoolObj *pPool = param; - SConnObj *pConn = (SConnObj *)calloc(sizeof(SConnObj), 1); + SConnObj *pConn = calloc(sizeof(SConnObj), 1); if (pConn == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return NULL; @@ -120,7 +124,7 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) { SThreadObj *pThread = taosGetTcpThread(pPool); if (pThread == NULL) { - free(pConn); + tfree(pConn); return NULL; } @@ -133,23 +137,23 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) { event.data.ptr = pConn; if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) { - uError("failed to add fd:%d(%s)", connFd, strerror(errno)); + sError("failed to add fd:%d since %s", connFd, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); - free(pConn); + tfree(pConn); pConn = NULL; } else { pThread->numOfFds++; - uDebug("%p fd:%d is added to epoll thread, num:%d", pThread, connFd, pThread->numOfFds); + sDebug("%p fd:%d is added to epoll thread, num:%d", pThread, connFd, pThread->numOfFds); } return pConn; } void taosFreeTcpConn(void *param) { - SConnObj * pConn = (SConnObj *)param; + SConnObj * pConn = param; SThreadObj *pThread = pConn->pThread; - uDebug("%p TCP connection will be closed, fd:%d", pThread, pConn->fd); + sDebug("%p TCP connection will be closed, fd:%d", pThread, pConn->fd); pConn->closedByApp = 1; shutdown(pConn->fd, SHUT_WR); } @@ -164,9 +168,9 @@ static void taosProcessBrokenLink(SConnObj *pConn) { pThread->numOfFds--; epoll_ctl(pThread->pollFd, EPOLL_CTL_DEL, pConn->fd, NULL); - uDebug("%p fd:%d is removed from epoll thread, num:%d", pThread, pConn->fd, pThread->numOfFds); + sDebug("%p fd:%d is removed from epoll thread, num:%d", pThread, pConn->fd, pThread->numOfFds); taosClose(pConn->fd); - free(pConn); + tfree(pConn); } #define maxEvents 10 @@ -183,18 +187,18 @@ static void *taosProcessTcpData(void *param) { while (1) { if (pThread->stop) break; - int fdNum = epoll_wait(pThread->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); + int32_t fdNum = epoll_wait(pThread->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); if (pThread->stop) { - uDebug("%p TCP epoll thread is exiting...", pThread); + sDebug("%p TCP epoll thread is exiting...", pThread); break; } if (fdNum < 0) { - uError("epoll_wait failed (%s)", strerror(errno)); + sError("epoll_wait failed since %s", strerror(errno)); continue; } - for (int i = 0; i < fdNum; ++i) { + for (int32_t i = 0; i < fdNum; ++i) { pConn = events[i].data.ptr; assert(pConn); @@ -219,17 +223,16 @@ static void *taosProcessTcpData(void *param) { continue; } } - } if (pThread->stop) break; } - uDebug("%p TCP epoll thread exits", pThread); + sDebug("%p TCP epoll thread exits", pThread); close(pThread->pollFd); - free(pThread); - free(buffer); + tfree(pThread); + tfree(buffer); return NULL; } @@ -242,18 +245,18 @@ static void *taosAcceptPeerTcpConnection(void *argv) { while (1) { struct sockaddr_in clientAddr; socklen_t addrlen = sizeof(clientAddr); - int connFd = accept(pPool->acceptFd, (struct sockaddr *)&clientAddr, &addrlen); + int32_t connFd = accept(pPool->acceptFd, (struct sockaddr *)&clientAddr, &addrlen); if (connFd < 0) { if (errno == EINVAL) { - uDebug("%p TCP server accept is exiting...", pPool); + sDebug("%p TCP server accept is exiting...", pPool); break; } else { - uError("TCP accept failure, reason:%s", strerror(errno)); + sError("TCP accept failure since %s", strerror(errno)); continue; } } - // uDebug("TCP connection from: 0x%x:%d", clientAddr.sin_addr.s_addr, clientAddr.sin_port); + // sDebug("TCP connection from: 0x%x:%d", clientAddr.sin_addr.s_addr, clientAddr.sin_port); taosKeepTcpAlive(connFd); (*pInfo->processIncomingConn)(connFd, clientAddr.sin_addr.s_addr); } @@ -273,23 +276,23 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { pThread->pPool = pPool; pThread->pollFd = epoll_create(10); // size does not matter if (pThread->pollFd < 0) { - free(pThread); + tfree(pThread); return NULL; } pthread_attr_t thattr; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - int ret = pthread_create(&(pThread->thread), &thattr, (void *)taosProcessTcpData, pThread); + int32_t ret = pthread_create(&(pThread->thread), &thattr, (void *)taosProcessTcpData, pThread); pthread_attr_destroy(&thattr); if (ret != 0) { close(pThread->pollFd); - free(pThread); + tfree(pThread); return NULL; } - uDebug("%p TCP epoll thread is created", pThread); + sDebug("%p TCP epoll thread is created", pThread); pPool->pThread[pPool->nextId] = pThread; pPool->nextId++; pPool->nextId = pPool->nextId % pPool->info.numOfThreads; @@ -298,31 +301,14 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { } static void taosStopPoolThread(SThreadObj *pThread) { + pthread_t thread = pThread->thread; + if (!taosCheckPthreadValid(thread)) { + return; + } pThread->stop = true; - - if (pThread->thread == pthread_self()) { + if (taosComparePthread(thread, pthread_self())) { pthread_detach(pthread_self()); return; } - - // save thread ID into a local variable, since pThread is freed when the thread exits - pthread_t thread = pThread->thread; - - // signal the thread to stop, try graceful method first, - // and use pthread_cancel when failed - struct epoll_event event = {.events = EPOLLIN}; - eventfd_t fd = eventfd(1, 0); - if (fd == -1) { - // failed to create eventfd, call pthread_cancel instead, which may result in data corruption - uError("failed to create eventfd(%s)", strerror(errno)); - pthread_cancel(pThread->thread); - pThread->stop = true; - } else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { - // failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption - uError("failed to call epoll_ctl(%s)", strerror(errno)); - pthread_cancel(pThread->thread); - } - pthread_join(thread, NULL); - if (fd >= 0) taosClose(fd); } diff --git a/src/sync/src/tarbitrator.c b/src/sync/src/tarbitrator.c index 360ea93f6c3fcd41e1df03702a0c963029deaacc..4016042de2135f732dfeb2bbc3b0fdc65b1b63f6 100644 --- a/src/sync/src/tarbitrator.c +++ b/src/sync/src/tarbitrator.c @@ -28,22 +28,22 @@ #include "syncInt.h" static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context); -static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp); +static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp); static void arbProcessBrokenLink(void *param); -static int arbProcessPeerMsg(void *param, void *buffer); +static int32_t arbProcessPeerMsg(void *param, void *buffer); static tsem_t tsArbSem; static ttpool_h tsArbTcpPool; typedef struct { - char id[TSDB_EP_LEN + 24]; - int nodeFd; - void *pConn; + char id[TSDB_EP_LEN + 24]; + int32_t nodeFd; + void * pConn; } SNodeConn; -int main(int argc, char *argv[]) { +int32_t main(int32_t argc, char *argv[]) { char arbLogPath[TSDB_FILENAME_LEN + 16] = {0}; - for (int i = 1; i < argc; ++i) { + for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-p") == 0 && i < argc - 1) { tsArbitratorPort = atoi(argv[++i]); } else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) { @@ -86,7 +86,7 @@ int main(int argc, char *argv[]) { info.numOfThreads = 1; info.serverIp = 0; info.port = tsArbitratorPort; - info.bufferSize = 640000; + info.bufferSize = SYNC_MAX_SIZE; info.processBrokenLink = arbProcessBrokenLink; info.processIncomingMsg = arbProcessPeerMsg; info.processIncomingConn = arbProcessIncommingConnection; @@ -108,30 +108,30 @@ int main(int argc, char *argv[]) { return 0; } -static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp) { +static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { char ipstr[24]; tinet_ntoa(ipstr, sourceIp); sDebug("peer TCP connection from ip:%s", ipstr); SFirstPkt firstPkt; if (taosReadMsg(connFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { - sError("failed to read peer first pkt from ip:%s(%s)", ipstr, strerror(errno)); + sError("failed to read peer first pkt from ip:%s since %s", ipstr, strerror(errno)); taosCloseSocket(connFd); return; } - SNodeConn *pNode = (SNodeConn *)calloc(sizeof(SNodeConn), 1); + SNodeConn *pNode = calloc(sizeof(SNodeConn), 1); if (pNode == NULL) { - sError("failed to allocate memory(%s)", strerror(errno)); + sError("failed to allocate memory since %s", strerror(errno)); taosCloseSocket(connFd); return; } firstPkt.fqdn[sizeof(firstPkt.fqdn) - 1] = 0; - snprintf(pNode->id, sizeof(pNode->id), "vgId:%d peer:%s:%d", firstPkt.sourceId, firstPkt.fqdn, firstPkt.port); + snprintf(pNode->id, sizeof(pNode->id), "vgId:%d, peer:%s:%d", firstPkt.sourceId, firstPkt.fqdn, firstPkt.port); if (firstPkt.syncHead.vgId) { sDebug("%s, vgId in head is not zero, close the connection", pNode->id); - taosTFree(pNode); + tfree(pNode); taosCloseSocket(connFd); return; } @@ -146,17 +146,17 @@ static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp) { static void arbProcessBrokenLink(void *param) { SNodeConn *pNode = param; - sDebug("%s, TCP link is broken(%s), close connection", pNode->id, strerror(errno)); - taosTFree(pNode); + sDebug("%s, TCP link is broken since %s, close connection", pNode->id, strerror(errno)); + tfree(pNode); } -static int arbProcessPeerMsg(void *param, void *buffer) { +static int32_t arbProcessPeerMsg(void *param, void *buffer) { SNodeConn *pNode = param; SSyncHead head; - int bytes = 0; + int32_t bytes = 0; char * cont = (char *)buffer; - int hlen = taosReadMsg(pNode->nodeFd, &head, sizeof(head)); + int32_t hlen = taosReadMsg(pNode->nodeFd, &head, sizeof(head)); if (hlen != sizeof(head)) { sDebug("%s, failed to read msg, hlen:%d", pNode->id, hlen); return -1; diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 23264dc8a0d969e238f35951b0a02e10261ab0c3..23ea54ee0c19b6ad2f93d7577d8d711874b10968 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -57,7 +57,7 @@ void *sendRequest(void *param) { rpcMsg.ahandle = pInfo; rpcMsg.msgType = 1; uDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); - rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); + rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if (pInfo->num % 20000 == 0) { uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); } diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index 380b971fa89bd1726e138c973a974bc995500693..9dd3feb4614105c13ba6a2d1069931345167b472 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -30,7 +30,7 @@ int dataFd = -1; void * qhandle = NULL; int walNum = 0; uint64_t tversion = 0; -void * syncHandle; +int64_t syncHandle; int role; int nodeId; char path[256]; @@ -254,7 +254,7 @@ uint32_t getFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex return magic; } -int getWalInfo(void *ahandle, char *name, uint32_t *index) { +int getWalInfo(void *ahandle, char *name, int64_t *index) { struct stat fstat; char aname[280]; diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 256b8189f8fc69345b27fdf702fb705d22ac3c10..5c978abd1da5cc5caa17450661d980ca648763c1 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -208,6 +208,18 @@ typedef struct { } SFileGroupIter; // ------------------ tsdbMain.c +typedef struct { + int32_t totalLen; + int32_t len; + SDataRow row; +} SSubmitBlkIter; + +typedef struct { + int32_t totalLen; + int32_t len; + void * pMsg; +} SSubmitMsgIter; + typedef struct { int8_t state; @@ -220,10 +232,10 @@ typedef struct { SMemTable* mem; SMemTable* imem; STsdbFileH* tsdbFileH; - int commit; - pthread_t commitThread; + sem_t readyToCommit; pthread_mutex_t mutex; bool repoLocked; + int32_t code; // Commit code } STsdbRepo; // ------------------ tsdbRWHelper.c @@ -320,6 +332,15 @@ typedef struct { void* compBuffer; // Buffer for temperary compress/decompress purpose } SRWHelper; +typedef struct { + int rowsInserted; + int rowsUpdated; + int rowsDeleteSucceed; + int rowsDeleteFailed; + int nOperations; + TSKEY keyFirst; + TSKEY keyLast; +} SMergeInfo; // ------------------ tsdbScan.c typedef struct { SFileGroup fGroup; @@ -422,7 +443,6 @@ void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); // ------------------ tsdbMemTable.c -int tsdbInsertRowToMem(STsdbRepo* pRepo, SDataRow row, STable* pTable); int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem); @@ -430,7 +450,8 @@ void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem) void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); int tsdbAsyncCommit(STsdbRepo* pRepo); int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols, - TSKEY* filterKeys, int nFilterKeys); + TKEY* filterKeys, int nFilterKeys, bool keepDup, SMergeInfo* pMergeInfo); +void* tsdbCommitData(STsdbRepo* pRepo); static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { if (pIter == NULL) return NULL; @@ -438,16 +459,23 @@ static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { SSkipListNode* node = tSkipListIterGet(pIter); if (node == NULL) return NULL; - return *(SDataRow *)SL_GET_NODE_DATA(node); + return (SDataRow)SL_GET_NODE_DATA(node); } static FORCE_INLINE TSKEY tsdbNextIterKey(SSkipListIterator* pIter) { SDataRow row = tsdbNextIterRow(pIter); - if (row == NULL) return -1; + if (row == NULL) return TSDB_DATA_TIMESTAMP_NULL; return dataRowKey(row); } +static FORCE_INLINE TKEY tsdbNextIterTKey(SSkipListIterator* pIter) { + SDataRow row = tsdbNextIterRow(pIter); + if (row == NULL) return TKEY_NULL; + + return dataRowTKey(row); +} + static FORCE_INLINE STsdbBufBlock* tsdbGetCurrBufBlock(STsdbRepo* pRepo) { ASSERT(pRepo != NULL); if (pRepo->mem == NULL) return NULL; @@ -572,6 +600,9 @@ int tsdbScanSCompBlock(STsdbScanHandle* pScanHandle, int idx); int tsdbCloseScanFile(STsdbScanHandle* pScanHandle); void tsdbFreeScanHandle(STsdbScanHandle* pScanHandle); +// ------------------ tsdbCommitQueue.c +int tsdbScheduleCommit(STsdbRepo *pRepo); + #ifdef __cplusplus } #endif diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 2e097c6ff73543f2dd858f8424b1e942ebdd8c3e..7cea27658c80d689972e3cb0f5dda3269a34b720 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -110,7 +110,7 @@ void tsdbCloseBufPool(STsdbRepo *pRepo) { } } - tsdbDebug("vgId:%d buffer pool is closed", REPO_ID(pRepo)); + tsdbDebug("vgId:%d, buffer pool is closed", REPO_ID(pRepo)); } SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { @@ -134,7 +134,7 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { pBufBlock->offset = 0; pBufBlock->remain = pBufPool->bufBlockSize; - tsdbDebug("vgId:%d buffer block is allocated, blockId:%" PRId64, REPO_ID(pRepo), pBufBlock->blockId); + tsdbDebug("vgId:%d, buffer block is allocated, blockId:%" PRId64, REPO_ID(pRepo), pBufBlock->blockId); return pNode; } @@ -157,4 +157,4 @@ _err: return NULL; } -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { taosTFree(pBufBlock); } \ No newline at end of file +static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c new file mode 100644 index 0000000000000000000000000000000000000000..637b02cd32ae8ad8e4077684609dcac23922d8a0 --- /dev/null +++ b/src/tsdb/src/tsdbCommit.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "tsdbMain.h" + +static int tsdbCommitTSData(STsdbRepo *pRepo); +static int tsdbCommitMeta(STsdbRepo *pRepo); +static void tsdbEndCommit(STsdbRepo *pRepo, int eno); +static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey); +static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols); +static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); +static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); + +void *tsdbCommitData(STsdbRepo *pRepo) { + SMemTable * pMem = pRepo->imem; + + tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d", + REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList)); + + pRepo->code = TSDB_CODE_SUCCESS; + + // Commit to update meta file + if (tsdbCommitMeta(pRepo) < 0) { + tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Create the iterator to read from cache + if (tsdbCommitTSData(pRepo) < 0) { + tsdbError("vgId:%d error occurs while committing TS data since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + tsdbFitRetention(pRepo); + + tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo)); + tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS); + + return NULL; + +_err: + ASSERT(terrno != TSDB_CODE_SUCCESS); + pRepo->code = terrno; + tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo)); + tsdbEndCommit(pRepo, terrno); + + return NULL; +} + +static int tsdbCommitTSData(STsdbRepo *pRepo) { + SMemTable * pMem = pRepo->imem; + SDataCols * pDataCols = NULL; + STsdbMeta * pMeta = pRepo->tsdbMeta; + SCommitIter *iters = NULL; + SRWHelper whelper = {0}; + STsdbCfg * pCfg = &(pRepo->config); + + if (pMem->numOfRows <= 0) return 0; + + iters = tsdbCreateCommitIters(pRepo); + if (iters == NULL) { + tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if (tsdbInitWriteHelper(&whelper, pRepo) < 0) { + tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s", + REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno)); + goto _err; + } + + int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision)); + int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision)); + + // Loop to commit to each file + for (int fid = sfid; fid <= efid; fid++) { + if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) { + tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + } + + tdFreeDataCols(pDataCols); + tsdbDestroyCommitIters(iters, pMem->maxTables); + tsdbDestroyHelper(&whelper); + + return 0; + +_err: + tdFreeDataCols(pDataCols); + tsdbDestroyCommitIters(iters, pMem->maxTables); + tsdbDestroyHelper(&whelper); + + return -1; +} + +static int tsdbCommitMeta(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + STsdbMeta *pMeta = pRepo->tsdbMeta; + SActObj * pAct = NULL; + SActCont * pCont = NULL; + + if (listNEles(pMem->actList) <= 0) return 0; + + if (tdKVStoreStartCommit(pMeta->pStore) < 0) { + tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + SListNode *pNode = NULL; + + while ((pNode = tdListPopHead(pMem->actList)) != NULL) { + pAct = (SActObj *)pNode->data; + if (pAct->act == TSDB_UPDATE_META) { + pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); + if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { + tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + tstrerror(terrno)); + tdKVStoreEndCommit(pMeta->pStore); + goto _err; + } + } else if (pAct->act == TSDB_DROP_META) { + if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) { + tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + tstrerror(terrno)); + tdKVStoreEndCommit(pMeta->pStore); + goto _err; + } + } else { + ASSERT(false); + } + } + + if (tdKVStoreEndCommit(pMeta->pStore) < 0) { + tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + return 0; + +_err: + return -1; +} + +static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno); + sem_post(&(pRepo->readyToCommit)); +} + +static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { + for (int i = 0; i < nIters; i++) { + TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter); + if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1; + } + return 0; +} + +static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) { + char * dataDir = NULL; + STsdbCfg * pCfg = &pRepo->config; + STsdbFileH *pFileH = pRepo->tsdbFileH; + SFileGroup *pGroup = NULL; + SMemTable * pMem = pRepo->imem; + bool newLast = false; + + TSKEY minKey = 0, maxKey = 0; + tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); + + // Check if there are data to commit to this file + int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey); + if (!hasDataToCommit) { + tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid); + return 0; + } + + // Create and open files for commit + dataDir = tsdbGetDataDirName(pRepo->rootDir); + if (dataDir == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) { + tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + + // Open files for write/read + if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { + tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + newLast = TSDB_NLAST_FILE_OPENED(pHelper); + + if (tsdbLoadCompIdx(pHelper, NULL) < 0) { + tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Loop to commit data in each table + for (int tid = 1; tid < pMem->maxTables; tid++) { + SCommitIter *pIter = iters + tid; + if (pIter->pTable == NULL) continue; + + taosRLockLatch(&(pIter->pTable->latch)); + + if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err; + + if (pIter->pIter != NULL) { + if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) { + taosRUnLockLatch(&(pIter->pTable->latch)); + tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), + tstrerror(terrno)); + goto _err; + } + } + + taosRUnLockLatch(&(pIter->pTable->latch)); + + // Move the last block to the new .l file if neccessary + if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { + tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Write the SCompBlock part + if (tsdbWriteCompInfo(pHelper) < 0) { + tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + } + + if (tsdbWriteCompIdx(pHelper) < 0) { + tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + + tfree(dataDir); + tsdbCloseHelperFile(pHelper, 0, pGroup); + + pthread_rwlock_wrlock(&(pFileH->fhlock)); + + (void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); + pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; + + if (newLast) { + (void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname); + pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info; + } else { + pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info; + } + + pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info; + + pthread_rwlock_unlock(&(pFileH->fhlock)); + + return 0; + +_err: + tfree(dataDir); + tsdbCloseHelperFile(pHelper, 1, pGroup); + return -1; +} + +static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + STsdbMeta *pMeta = pRepo->tsdbMeta; + + SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); + if (iters == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return NULL; + } + + if (tsdbRLockRepoMeta(pRepo) < 0) goto _err; + + // reference all tables + for (int i = 0; i < pMem->maxTables; i++) { + if (pMeta->tables[i] != NULL) { + tsdbRefTable(pMeta->tables[i]); + iters[i].pTable = pMeta->tables[i]; + } + } + + if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err; + + for (int i = 0; i < pMem->maxTables; i++) { + if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) { + if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + tSkipListIterNext(iters[i].pIter); + } + } + + return iters; + +_err: + tsdbDestroyCommitIters(iters, pMem->maxTables); + return NULL; +} + +static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) { + if (iters == NULL) return; + + for (int i = 1; i < maxTables; i++) { + if (iters[i].pTable != NULL) { + tsdbUnRefTable(iters[i].pTable); + tSkipListDestroyIter(iters[i].pIter); + } + } + + free(iters); +} diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c new file mode 100644 index 0000000000000000000000000000000000000000..c86b8f32b7ff6bfb30e7b734c7b38504200e44e6 --- /dev/null +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "tlist.h" +#include "tref.h" +#include "tsdbMain.h" + +typedef struct { + bool stop; + pthread_mutex_t lock; + pthread_cond_t queueNotEmpty; + int nthreads; + int refCount; + SList * queue; + pthread_t * threads; +} SCommitQueue; + +typedef struct { + STsdbRepo *pRepo; +} SCommitReq; + +static void *tsdbLoopCommit(void *arg); + +SCommitQueue tsCommitQueue = {0}; + +int tsdbInitCommitQueue(int nthreads) { + SCommitQueue *pQueue = &tsCommitQueue; + + if (nthreads < 1) nthreads = 1; + + pQueue->stop = false; + pQueue->nthreads = nthreads; + + pQueue->queue = tdListNew(0); + if (pQueue->queue == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + pQueue->threads = (pthread_t *)calloc(nthreads, sizeof(pthread_t)); + if (pQueue->threads == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tdListFree(pQueue->queue); + return -1; + } + + pthread_mutex_init(&(pQueue->lock), NULL); + pthread_cond_init(&(pQueue->queueNotEmpty), NULL); + + for (int i = 0; i < nthreads; i++) { + pthread_create(pQueue->threads + i, NULL, tsdbLoopCommit, NULL); + } + + return 0; +} + +void tsdbDestroyCommitQueue() { + SCommitQueue *pQueue = &tsCommitQueue; + + pthread_mutex_lock(&(pQueue->lock)); + + if (pQueue->stop) { + pthread_mutex_unlock(&(pQueue->lock)); + return; + } + + pQueue->stop = true; + pthread_cond_broadcast(&(pQueue->queueNotEmpty)); + + pthread_mutex_unlock(&(pQueue->lock)); + + for (size_t i = 0; i < pQueue->nthreads; i++) { + pthread_join(pQueue->threads[i], NULL); + } + + free(pQueue->threads); + tdListFree(pQueue->queue); + pthread_cond_destroy(&(pQueue->queueNotEmpty)); + pthread_mutex_destroy(&(pQueue->lock)); +} + +int tsdbScheduleCommit(STsdbRepo *pRepo) { + SCommitQueue *pQueue = &tsCommitQueue; + + SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SCommitReq)); + if (pNode == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + ((SCommitReq *)pNode->data)->pRepo = pRepo; + + pthread_mutex_lock(&(pQueue->lock)); + + // ASSERT(pQueue->stop); + + tdListAppendNode(pQueue->queue, pNode); + pthread_cond_signal(&(pQueue->queueNotEmpty)); + + pthread_mutex_unlock(&(pQueue->lock)); + return 0; +} + +static void *tsdbLoopCommit(void *arg) { + SCommitQueue *pQueue = &tsCommitQueue; + SListNode * pNode = NULL; + STsdbRepo * pRepo = NULL; + + while (true) { + pthread_mutex_lock(&(pQueue->lock)); + + while (true) { + pNode = tdListPopHead(pQueue->queue); + if (pNode == NULL) { + if (pQueue->stop && pQueue->refCount <= 0) { + pthread_mutex_unlock(&(pQueue->lock)); + goto _exit; + } else { + pthread_cond_wait(&(pQueue->queueNotEmpty), &(pQueue->lock)); + } + } else { + break; + } + } + + pthread_mutex_unlock(&(pQueue->lock)); + + pRepo = ((SCommitReq *)pNode->data)->pRepo; + + tsdbCommitData(pRepo); + listNodeFree(pNode); + } + +_exit: + return NULL; +} + +void tsdbIncCommitRef(int vgId) { + int refCount = atomic_add_fetch_32(&tsCommitQueue.refCount, 1); + tsdbDebug("vgId:%d, inc commit queue ref to %d", vgId, refCount); +} + +void tsdbDecCommitRef(int vgId) { + int refCount = atomic_sub_fetch_32(&tsCommitQueue.refCount, 1); + pthread_cond_broadcast(&(tsCommitQueue.queueNotEmpty)); + tsdbDebug("vgId:%d, dec commit queue ref to %d", vgId, refCount); +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 626ad77da2eab4be9e94516c4e5c7c0e5a45837e..03c50d42f7324c1cbe7889138bced9d9d986c424 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -13,10 +13,8 @@ * along with this program. If not, see . */ #define _DEFAULT_SOURCE -#include - #define TAOS_RANDOM_FILE_FAIL_TEST - +#include #include "os.h" #include "talgo.h" #include "tchecksum.h" @@ -67,7 +65,7 @@ _err: void tsdbFreeFileH(STsdbFileH *pFileH) { if (pFileH) { pthread_rwlock_destroy(&pFileH->fhlock); - taosTFree(pFileH->pFGroup); + tfree(pFileH->pFGroup); free(pFileH); } } @@ -79,7 +77,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { DIR * dir = NULL; int fid = 0; int vid = 0; - regex_t regex1, regex2; + regex_t regex1 = {0}, regex2 = {0}; int code = 0; char fname[TSDB_FILENAME_LEN] = "\0"; @@ -95,9 +93,27 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { dir = opendir(tDataDir); if (dir == NULL) { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; + if (errno == ENOENT) { + tsdbError("vgId:%d directory %s not exist", REPO_ID(pRepo), tDataDir); + terrno = TAOS_SYSTEM_ERROR(errno); + + if (taosMkDir(tDataDir, 0755) < 0) { + tsdbError("vgId:%d failed to create directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + dir = opendir(tDataDir); + if (dir == NULL) { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + } else { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } } code = regcomp(®ex1, "^v[0-9]+f[0-9]+\\.(head|data|last|stat)$", REG_EXTENDED); @@ -183,7 +199,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { regfree(®ex1); regfree(®ex2); - taosTFree(tDataDir); + tfree(tDataDir); closedir(dir); return 0; @@ -193,7 +209,7 @@ _err: regfree(®ex1); regfree(®ex2); - taosTFree(tDataDir); + tfree(tDataDir); if (dir != NULL) closedir(dir); tsdbCloseFileH(pRepo); return -1; @@ -240,7 +256,8 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { pFileH->pFGroup[pFileH->nFGroups++] = fGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); pthread_rwlock_unlock(&pFileH->fhlock); - return tsdbSearchFGroup(pFileH, fid, TD_EQ); + pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); + ASSERT(pGroup != NULL); } return pGroup; @@ -410,7 +427,7 @@ int tsdbUpdateFileHeader(SFile *pFile) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; } - if (taosTWrite(pFile->fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { + if (taosWrite(pFile->fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { tsdbError("failed to write %d bytes to file %s since %s", TSDB_FILE_HEAD_SIZE, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -475,7 +492,7 @@ int tsdbLoadFileHeader(SFile *pFile, uint32_t *version) { return -1; } - if (taosTRead(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { + if (taosRead(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { tsdbError("failed to read file %s header part with %d bytes, reason:%s", pFile->fname, TSDB_FILE_HEAD_SIZE, strerror(errno)); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; @@ -500,7 +517,7 @@ void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) { SFile file; SFile * pFile = &file; - strncpy(pFile->fname, fname, TSDB_FILENAME_LEN); + strncpy(pFile->fname, fname, TSDB_FILENAME_LEN - 1); pFile->fd = -1; if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index a1e6376304a54ea810f3662bd30e3f97fd53765c..3990c0c516d1768135934b098efbcd7746ab424c 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -32,18 +32,6 @@ #define TSDB_DEFAULT_COMPRESSION TWO_STAGE_COMP #define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP)) -typedef struct { - int32_t totalLen; - int32_t len; - SDataRow row; -} SSubmitBlkIter; - -typedef struct { - int32_t totalLen; - int32_t len; - void * pMsg; -} SSubmitMsgIter; - static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg); static int32_t tsdbUnsetRepoEnv(char *rootDir); @@ -52,20 +40,13 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg); static char * tsdbGetCfgFname(char *rootDir); static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg); static void tsdbFreeRepo(STsdbRepo *pRepo); -static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); -static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows); -static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbRestoreInfo(STsdbRepo *pRepo); -static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression); static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep); static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks); static int keyFGroupCompFunc(const void *key, const void *fgroup); static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg); static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg); -static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); -static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static void tsdbStartStream(STsdbRepo *pRepo); static void tsdbStopStream(STsdbRepo *pRepo); @@ -153,17 +134,20 @@ _err: } // Note: all working thread and query thread must stopped when calling this function -void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { - if (repo == NULL) return; +int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { + if (repo == NULL) return 0; STsdbRepo *pRepo = (STsdbRepo *)repo; int vgId = REPO_ID(pRepo); + terrno = TSDB_CODE_SUCCESS; + tsdbStopStream(pRepo); if (toCommit) { tsdbAsyncCommit(pRepo); - if (pRepo->commit) pthread_join(pRepo->commitThread, NULL); + sem_wait(&(pRepo->readyToCommit)); + terrno = pRepo->code; } tsdbUnRefMemTable(pRepo, pRepo->mem); tsdbUnRefMemTable(pRepo, pRepo->imem); @@ -175,40 +159,12 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { tsdbCloseMeta(pRepo); tsdbFreeRepo(pRepo); tsdbDebug("vgId:%d repository is closed", vgId); -} -int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { - STsdbRepo * pRepo = (STsdbRepo *)repo; - SSubmitMsgIter msgIter = {0}; - - if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { - if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { - tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); - } + if (terrno != TSDB_CODE_SUCCESS) { return -1; + } else { + return 0; } - - if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) { - tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); - return -1; - } - - SSubmitBlk *pBlock = NULL; - int32_t affectedrows = 0; - - TSKEY now = taosGetTimestamp(pRepo->config.precision); - while (true) { - tsdbGetSubmitMsgNext(&msgIter, &pBlock); - if (pBlock == NULL) break; - if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) { - return -1; - } - } - - if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); - - if (tsdbCheckCommit(pRepo) < 0) return -1; - return 0; } uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) { @@ -228,7 +184,7 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ int prefixLen = (int)strlen(prefix); if (name[0] == 0) { // get the file from index or after, but not larger than eindex - taosTFree(sdup); + tfree(sdup); int fid = (*index) / TSDB_FILE_TYPE_MAX; if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { @@ -260,8 +216,8 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ fname = malloc(prefixLen + strlen(name) + 2); sprintf(fname, "%s/%s", prefix, name); if (access(fname, F_OK) != 0) { - taosFree(fname); - taosFree(sdup); + tfree(fname); + tfree(sdup); return 0; } if (*index == TSDB_META_FILE_INDEX) { // get meta file @@ -269,20 +225,20 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ } else { tsdbGetFileInfoImpl(fname, &magic, size); } - taosFree(fname); - taosFree(sdup); + tfree(fname); + tfree(sdup); return magic; } if (stat(fname, &fState) < 0) { - taosTFree(fname); + tfree(fname); return 0; } *size = fState.st_size; // magic = *size; - taosTFree(fname); + tfree(fname); return magic; } @@ -294,6 +250,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo) { int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { // TODO: think about multithread cases STsdbRepo *pRepo = (STsdbRepo *)repo; + STsdbCfg config = pRepo->config; STsdbCfg * pRCfg = &pRepo->config; if (tsdbCheckAndSetDefaultCfg(pCfg) < 0) return -1; @@ -308,22 +265,25 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { bool configChanged = false; if (pRCfg->compression != pCfg->compression) { tsdbAlterCompression(pRepo, pCfg->compression); + config.compression = pCfg->compression; configChanged = true; } if (pRCfg->keep != pCfg->keep) { if (tsdbAlterKeep(pRepo, pCfg->keep) < 0) { tsdbError("vgId:%d failed to configure repo when alter keep since %s", REPO_ID(pRepo), tstrerror(terrno)); + config.keep = pCfg->keep; return -1; } configChanged = true; } if (pRCfg->totalBlocks != pCfg->totalBlocks) { tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks); + config.totalBlocks = pCfg->totalBlocks; configChanged = true; } if (configChanged) { - if (tsdbSaveConfig(pRepo->rootDir, &pRepo->config) < 0) { + if (tsdbSaveConfig(pRepo->rootDir, &config) < 0) { tsdbError("vgId:%d failed to configure repository while save config since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } @@ -512,6 +472,9 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } } + // update check + if (pCfg->update != 0) pCfg->update = 1; + return 0; _err: @@ -579,7 +542,7 @@ static int32_t tsdbSaveConfig(char *rootDir, STsdbCfg *pCfg) { taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); - if (taosTWrite(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { + if (taosWrite(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", pCfg->tsdbId, TSDB_FILE_HEAD_SIZE, fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -597,7 +560,7 @@ static int32_t tsdbSaveConfig(char *rootDir, STsdbCfg *pCfg) { return 0; _err: - taosTFree(fname); + tfree(fname); if (fd >= 0) close(fd); return -1; } @@ -620,7 +583,7 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg) { goto _err; } - if (taosTRead(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { + if (taosRead(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { tsdbError("failed to read %d bytes from file %s since %s", TSDB_FILE_HEAD_SIZE, fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -634,13 +597,13 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg) { tsdbDecodeCfg(buf, pCfg); - taosTFree(fname); + tfree(fname); close(fd); return 0; _err: - taosTFree(fname); + tfree(fname); if (fd >= 0) close(fd); return -1; } @@ -665,6 +628,7 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) { } pRepo->state = TSDB_STATE_OK; + pRepo->code = TSDB_CODE_SUCCESS; int code = pthread_mutex_init(&pRepo->mutex, NULL); if (code != 0) { @@ -672,6 +636,12 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) { goto _err; } + code = sem_init(&(pRepo->readyToCommit), 0, 1); + if (code != 0) { + terrno = TAOS_SYSTEM_ERROR(code); + goto _err; + } + pRepo->repoLocked = false; pRepo->rootDir = strdup(rootDir); @@ -715,99 +685,13 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) { tsdbFreeMeta(pRepo->tsdbMeta); // tsdbFreeMemTable(pRepo->mem); // tsdbFreeMemTable(pRepo->imem); - taosTFree(pRepo->rootDir); + tfree(pRepo->rootDir); + sem_destroy(&(pRepo->readyToCommit)); pthread_mutex_destroy(&pRepo->mutex); free(pRepo); } } -static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { - if (pMsg == NULL) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; - } - - pIter->totalLen = pMsg->length; - pIter->len = 0; - pIter->pMsg = pMsg; - if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; - } - - return 0; -} - -static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) { - STsdbMeta *pMeta = pRepo->tsdbMeta; - int64_t points = 0; - - ASSERT(pBlock->tid < pMeta->maxTables); - STable *pTable = pMeta->tables[pBlock->tid]; - ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); - - SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; - - TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; - TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; - - tsdbInitSubmitBlkIter(pBlock, &blkIter); - while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { - if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { - tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 - " maxKey %" PRId64, - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey); - terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; - return -1; - } - - if (tsdbInsertRowToMem(pRepo, row, pTable) < 0) return -1; - - (*affectedrows)++; - points++; - } - - STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); - pRepo->stat.pointsWritten += points * schemaNCols(pSchema); - pRepo->stat.totalStorage += points * schemaVLen(pSchema); - - return 0; -} - -static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { - if (pIter->len == 0) { - pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; - } else { - SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); - pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen); - } - - if (pIter->len > pIter->totalLen) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - *pPBlock = NULL; - return -1; - } - - *pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); - - return 0; -} - -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { - SDataRow row = pIter->row; - if (row == NULL) return NULL; - - pIter->len += dataRowLen(row); - if (pIter->len >= pIter->totalLen) { - pIter->row = NULL; - } else { - pIter->row = (char *)row + dataRowLen(row); - } - - return row; -} - static int tsdbRestoreInfo(STsdbRepo *pRepo) { STsdbMeta * pMeta = pRepo->tsdbMeta; STsdbFileH *pFileH = pRepo->tsdbFileH; @@ -841,14 +725,6 @@ _err: return -1; } -static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { - if (pBlock->dataLen <= 0) return -1; - pIter->totalLen = pBlock->dataLen; - pIter->len = 0; - pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); - return 0; -} - static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) { int8_t ocompression = pRepo->config.compression; pRepo->config.compression = compression; @@ -923,6 +799,7 @@ static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) { tlen += taosEncodeVariantI32(buf, pCfg->maxRowsPerFileBlock); tlen += taosEncodeFixedI8(buf, pCfg->precision); tlen += taosEncodeFixedI8(buf, pCfg->compression); + tlen += taosEncodeFixedI8(buf, pCfg->update); return tlen; } @@ -939,138 +816,11 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) { buf = taosDecodeVariantI32(buf, &(pCfg->maxRowsPerFileBlock)); buf = taosDecodeFixedI8(buf, &(pCfg->precision)); buf = taosDecodeFixedI8(buf, &(pCfg->compression)); + buf = taosDecodeFixedI8(buf, &(pCfg->update)); return buf; } -static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { - ASSERT(pTable != NULL); - - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); - int sversion = schemaVersion(pSchema); - - if (pBlock->sversion == sversion) { - return 0; - } else { - if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - return -1; - } - } - - if (pBlock->sversion > sversion) { // may need to update table schema - if (pBlock->schemaLen > 0) { - tsdbDebug( - "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...", - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); - ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0); - int numOfCols = pBlock->schemaLen / sizeof(STColumn); - STColumn *pTCol = (STColumn *)pBlock->data; - - STSchemaBuilder schemaBuilder = {0}; - if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - return -1; - } - - for (int i = 0; i < numOfCols; i++) { - if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - tdDestroyTSchemaBuilder(&schemaBuilder); - return -1; - } - } - - STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder); - if (pNSchema == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tdDestroyTSchemaBuilder(&schemaBuilder); - return -1; - } - - tdDestroyTSchemaBuilder(&schemaBuilder); - tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true); - } else { - tsdbDebug( - "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...", - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); - terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE; - return -1; - } - } else { - ASSERT(pBlock->sversion >= 0); - if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { - tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), - pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); - } - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - return -1; - } - - return 0; -} - -static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { - ASSERT(pMsg != NULL); - STsdbMeta * pMeta = pRepo->tsdbMeta; - SSubmitMsgIter msgIter = {0}; - SSubmitBlk * pBlock = NULL; - - terrno = TSDB_CODE_SUCCESS; - pMsg->length = htonl(pMsg->length); - pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); - - if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; - while (true) { - if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; - if (pBlock == NULL) break; - - pBlock->uid = htobe64(pBlock->uid); - pBlock->tid = htonl(pBlock->tid); - pBlock->sversion = htonl(pBlock->sversion); - pBlock->dataLen = htonl(pBlock->dataLen); - pBlock->schemaLen = htonl(pBlock->schemaLen); - pBlock->numOfRows = htons(pBlock->numOfRows); - - if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, - pBlock->tid); - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - - STable *pTable = pMeta->tables[pBlock->tid]; - if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, - pBlock->tid); - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - - if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - // Check schema version and update schema if needed - if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { - if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) { - continue; - } else { - return -1; - } - } - } - - if (terrno != TSDB_CODE_SUCCESS) return -1; - return 0; -} - static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { // TODO // STsdbCache *pCache = pRepo->tsdbCache; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 4cf8ddd4bd8df396352ad66b8499552018d5d322..71944c87c6d68d530e07a13a05bf5ac89ee3754d 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -18,116 +18,56 @@ #define TSDB_DATA_SKIPLIST_LEVEL 5 -static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes); static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); static void tsdbFreeMemTable(SMemTable *pMemTable); static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); static void tsdbFreeTableData(STableData *pTableData); static char * tsdbGetTsTupleKey(const void *data); -static void * tsdbCommitData(void *arg); -static int tsdbCommitMeta(STsdbRepo *pRepo); -static void tsdbEndCommit(STsdbRepo *pRepo); -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey); -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols); -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); - -// ---------------- INTERNAL FUNCTIONS ---------------- -int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) { - STsdbCfg * pCfg = &pRepo->config; - STsdbMeta * pMeta = pRepo->tsdbMeta; - int32_t level = 0; - int32_t headSize = 0; - TSKEY key = dataRowKey(row); - SMemTable * pMemTable = pRepo->mem; - STableData *pTableData = NULL; - SSkipList * pSList = NULL; - - if (pMemTable != NULL && TABLE_TID(pTable) < pMemTable->maxTables && pMemTable->tData[TABLE_TID(pTable)] != NULL && - pMemTable->tData[TABLE_TID(pTable)]->uid == TABLE_UID(pTable)) { - pTableData = pMemTable->tData[TABLE_TID(pTable)]; - pSList = pTableData->pData; - } - - tSkipListNewNodeInfo(pSList, &level, &headSize); - - SSkipListNode *pNode = (SSkipListNode *)malloc(headSize + sizeof(SDataRow *)); - if (pNode == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); - if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); - free(pNode); - return -1; - } - - pNode->level = level; - dataRowCpy(pRow, row); - *(SDataRow *)SL_GET_NODE_DATA(pNode) = pRow; - - // Operations above may change pRepo->mem, retake those values - ASSERT(pRepo->mem != NULL); - pMemTable = pRepo->mem; - - if (TABLE_TID(pTable) >= pMemTable->maxTables) { - if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { - tsdbFreeBytes(pRepo, pRow, dataRowLen(row)); - free(pNode); - return -1; +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); +static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); +static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); +static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow); +static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); +static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); +static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); +static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); + +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, + TSKEY now); + +int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { + STsdbRepo * pRepo = (STsdbRepo *)repo; + SSubmitMsgIter msgIter = {0}; + SSubmitBlk * pBlock = NULL; + int32_t affectedrows = 0; + + if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { + if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { + tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); } + return -1; } - pTableData = pMemTable->tData[TABLE_TID(pTable)]; - - if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { - if (pTableData != NULL) { - taosWLockLatch(&(pMemTable->latch)); - pMemTable->tData[TABLE_TID(pTable)] = NULL; - tsdbFreeTableData(pTableData); - taosWUnLockLatch(&(pMemTable->latch)); - } - pTableData = tsdbNewTableData(pCfg, pTable); - if (pTableData == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 - " to table %s while create new table data object since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), tstrerror(terrno)); - tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row)); - free(pNode); + tsdbInitSubmitMsgIter(pMsg, &msgIter); + while (true) { + tsdbGetSubmitMsgNext(&msgIter, &pBlock); + if (pBlock == NULL) break; + if (tsdbInsertDataToTable(pRepo, pBlock, &affectedrows) < 0) { return -1; } - - pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; } - ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); - - if (tSkipListPut(pTableData->pData, pNode) == NULL) { - tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row)); - free(pNode); - } else { - if (TABLE_LASTKEY(pTable) < key) TABLE_LASTKEY(pTable) = key; - if (pMemTable->keyFirst > key) pMemTable->keyFirst = key; - if (pMemTable->keyLast < key) pMemTable->keyLast = key; - pMemTable->numOfRows++; - - if (pTableData->keyFirst > key) pTableData->keyFirst = key; - if (pTableData->keyLast < key) pTableData->keyLast = key; - pTableData->numOfRows++; - - ASSERT(pTableData->numOfRows == tSkipListGetSize(pTableData->pData)); - } - - tsdbTrace("vgId:%d a row is inserted to table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), - TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), key); + if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); + if (tsdbCheckCommit(pRepo) < 0) return -1; return 0; } +// ---------------- INTERNAL FUNCTIONS ---------------- int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; int ref = T_REF_INC(pMemTable); @@ -151,7 +91,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { } int code = pthread_cond_signal(&pBufPool->poolNotEmpty); if (code != 0) { - tsdbUnlockRepo(pRepo); + if (tsdbUnlockRepo(pRepo) < 0) return -1; tsdbError("vgId:%d failed to signal pool not empty since %s", REPO_ID(pRepo), strerror(code)); terrno = TAOS_SYSTEM_ERROR(code); return -1; @@ -188,6 +128,8 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) { } void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) { + tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); + if (pMem != NULL) { taosRUnLockLatch(&(pMem->latch)); tsdbUnRefMemTable(pRepo, pMem); @@ -227,6 +169,10 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { ASSERT(pRepo->mem->extraBuffList != NULL); SListNode *pNode = (SListNode *)malloc(sizeof(SListNode) + bytes); if (pNode == NULL) { + if (listNEles(pRepo->mem->extraBuffList) == 0) { + tdListFree(pRepo->mem->extraBuffList); + pRepo->mem->extraBuffList = NULL; + } terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } @@ -257,123 +203,165 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { } int tsdbAsyncCommit(STsdbRepo *pRepo) { + if (pRepo->mem == NULL) return 0; + SMemTable *pIMem = pRepo->imem; - int code = 0; - if (pIMem != NULL) { - ASSERT(pRepo->commit); - tsdbDebug("vgId:%d waiting for the commit thread", REPO_ID(pRepo)); - code = pthread_join(pRepo->commitThread, NULL); - tsdbDebug("vgId:%d commit thread is finished", REPO_ID(pRepo)); - if (code != 0) { - tsdbError("vgId:%d failed to thread join since %s", REPO_ID(pRepo), strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - pRepo->commit = 0; - } + sem_wait(&(pRepo->readyToCommit)); - ASSERT(pRepo->commit == 0); - if (pRepo->mem != NULL) { - if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START); - if (tsdbLockRepo(pRepo) < 0) return -1; - pRepo->imem = pRepo->mem; - pRepo->mem = NULL; - pRepo->commit = 1; - code = pthread_create(&pRepo->commitThread, NULL, tsdbCommitData, (void *)pRepo); - if (code != 0) { - tsdbError("vgId:%d failed to create commit thread since %s", REPO_ID(pRepo), strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(code); - tsdbUnlockRepo(pRepo); - return -1; - } - if (tsdbUnlockRepo(pRepo) < 0) return -1; + if (pRepo->code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno)); } - if (pIMem && tsdbUnRefMemTable(pRepo, pIMem) < 0) return -1; + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START, TSDB_CODE_SUCCESS); + if (tsdbLockRepo(pRepo) < 0) return -1; + pRepo->imem = pRepo->mem; + pRepo->mem = NULL; + tsdbScheduleCommit(pRepo); + if (tsdbUnlockRepo(pRepo) < 0) return -1; + + if (tsdbUnRefMemTable(pRepo, pIMem) < 0) return -1; return 0; } +int tsdbSyncCommit(TSDB_REPO_T *repo) { + STsdbRepo *pRepo = (STsdbRepo *)repo; + + tsdbAsyncCommit(pRepo); + sem_wait(&(pRepo->readyToCommit)); + sem_post(&(pRepo->readyToCommit)); + + if (pRepo->code != TSDB_CODE_SUCCESS) { + terrno = pRepo->code; + return -1; + } else { + terrno = TSDB_CODE_SUCCESS; + return 0; + } +} + +/** + * This is an important function to load data or try to load data from memory skiplist iterator. + * + * This function load memory data until: + * 1. iterator ends + * 2. data key exceeds maxKey + * 3. rowsIncreased = rowsInserted - rowsDeleteSucceed >= maxRowsToRead + * 4. operations in pCols not exceeds its max capacity if pCols is given + * + * The function tries to procceed AS MUSH AS POSSIBLE. + */ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, - TSKEY *filterKeys, int nFilterKeys) { - ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0); + TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { + ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0 && pMergeInfo != NULL); if (pIter == NULL) return 0; STSchema *pSchema = NULL; - int numOfRows = 0; - TSKEY keyNext = 0; + TSKEY rowKey = 0; + TSKEY fKey = 0; + bool isRowDel = false; int filterIter = 0; + SDataRow row = NULL; + + memset(pMergeInfo, 0, sizeof(*pMergeInfo)); + pMergeInfo->keyFirst = INT64_MAX; + pMergeInfo->keyLast = INT64_MIN; + if (pCols) tdResetDataCols(pCols); + + row = tsdbNextIterRow(pIter); + if (row == NULL || dataRowKey(row) > maxKey) { + rowKey = INT64_MAX; + isRowDel = false; + } else { + rowKey = dataRowKey(row); + isRowDel = dataRowDeleted(row); + } + + if (filterIter >= nFilterKeys) { + fKey = INT64_MAX; + } else { + fKey = tdGetKey(filterKeys[filterIter]); + } - if (nFilterKeys != 0) { // for filter purpose - ASSERT(filterKeys != NULL); - keyNext = tsdbNextIterKey(pIter); - if (keyNext < 0 || keyNext > maxKey) return numOfRows; - void *ptr = taosbsearch((void *)(&keyNext), (void *)filterKeys, nFilterKeys, sizeof(TSKEY), compTSKEY, TD_GE); - filterIter = (ptr == NULL) ? nFilterKeys : (int)((POINTER_DISTANCE(ptr, filterKeys) / sizeof(TSKEY))); - } - - do { - SDataRow row = tsdbNextIterRow(pIter); - if (row == NULL) break; - - keyNext = dataRowKey(row); - if (keyNext > maxKey) break; - - bool keyFiltered = false; - if (nFilterKeys != 0) { - while (true) { - if (filterIter >= nFilterKeys) break; - if (keyNext == filterKeys[filterIter]) { - keyFiltered = true; - filterIter++; - break; - } else if (keyNext < filterKeys[filterIter]) { - break; + while (true) { + if (fKey == INT64_MAX && rowKey == INT64_MAX) break; + + if (fKey < rowKey) { + pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, fKey); + pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, fKey); + + filterIter++; + if (filterIter >= nFilterKeys) { + fKey = INT64_MAX; + } else { + fKey = tdGetKey(filterKeys[filterIter]); + } + } else if (fKey > rowKey) { + if (isRowDel) { + pMergeInfo->rowsDeleteFailed++; + } else { + if (pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed >= maxRowsToRead) break; + if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; + pMergeInfo->rowsInserted++; + pMergeInfo->nOperations++; + pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, rowKey); + pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, rowKey); + tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); + } + + tSkipListIterNext(pIter); + row = tsdbNextIterRow(pIter); + if (row == NULL || dataRowKey(row) > maxKey) { + rowKey = INT64_MAX; + isRowDel = false; + } else { + rowKey = dataRowKey(row); + isRowDel = dataRowDeleted(row); + } + } else { + if (isRowDel) { + ASSERT(!keepDup); + if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; + pMergeInfo->rowsDeleteSucceed++; + pMergeInfo->nOperations++; + tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); + } else { + if (keepDup) { + if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; + pMergeInfo->rowsUpdated++; + pMergeInfo->nOperations++; + pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, rowKey); + pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, rowKey); + tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); } else { - filterIter++; + pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, fKey); + pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, fKey); } } - } - if (!keyFiltered) { - if (numOfRows >= maxRowsToRead) break; - if (pCols) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row)); - if (pSchema == NULL) { - ASSERT(0); - } - } + tSkipListIterNext(pIter); + row = tsdbNextIterRow(pIter); + if (row == NULL || dataRowKey(row) > maxKey) { + rowKey = INT64_MAX; + isRowDel = false; + } else { + rowKey = dataRowKey(row); + isRowDel = dataRowDeleted(row); + } - tdAppendDataRowToDataCol(row, pSchema, pCols); + filterIter++; + if (filterIter >= nFilterKeys) { + fKey = INT64_MAX; + } else { + fKey = tdGetKey(filterKeys[filterIter]); } - numOfRows++; } - } while (tSkipListIterNext(pIter)); + } - return numOfRows; + return 0; } // ---------------- LOCAL FUNCTIONS ---------------- -static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) { - ASSERT(pRepo->mem != NULL); - if (pRepo->mem->extraBuffList == NULL) { - STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); - ASSERT(pBufBlock != NULL); - pBufBlock->offset -= bytes; - pBufBlock->remain += bytes; - ASSERT(ptr == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); - tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, - listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); - } else { - SListNode *pNode = (SListNode *)POINTER_SHIFT(ptr, -(int)(sizeof(SListNode))); - ASSERT(listTail(pRepo->mem->extraBuffList) == pNode); - tdListPopNode(pRepo->mem->extraBuffList, pNode); - free(pNode); - tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); - } -} - static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -423,7 +411,7 @@ static void tsdbFreeMemTable(SMemTable* pMemTable) { tdListFree(pMemTable->extraBuffList); tdListFree(pMemTable->bufBlockList); tdListFree(pMemTable->actList); - taosTFree(pMemTable->tData); + tfree(pMemTable->tData); free(pMemTable); } } @@ -440,8 +428,9 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) { pTableData->keyLast = 0; pTableData->numOfRows = 0; - pTableData->pData = tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], 0, 0, 1, tsdbGetTsTupleKey); + pTableData->pData = + tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], + tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey); if (pTableData->pData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; @@ -461,323 +450,443 @@ static void tsdbFreeTableData(STableData *pTableData) { } } -static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple(*(SDataRow *)data); } - -static void *tsdbCommitData(void *arg) { - STsdbRepo * pRepo = (STsdbRepo *)arg; - SMemTable * pMem = pRepo->imem; - STsdbCfg * pCfg = &pRepo->config; - SDataCols * pDataCols = NULL; - STsdbMeta * pMeta = pRepo->tsdbMeta; - SCommitIter *iters = NULL; - SRWHelper whelper = {0}; - ASSERT(pRepo->commit == 1); - ASSERT(pMem != NULL); - - tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64, REPO_ID(pRepo), - pMem->keyFirst, pMem->keyLast, pMem->numOfRows); - - // Create the iterator to read from cache - if (pMem->numOfRows > 0) { - iters = tsdbCreateCommitIters(pRepo); - if (iters == NULL) { - tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } +static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); } - if (tsdbInitWriteHelper(&whelper, pRepo) < 0) { - tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } - - if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s", - REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno)); - goto _exit; - } +void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) { + *minKey = fileId * daysPerFile * tsMsPerDay[precision]; + *maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1; +} - int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision)); - int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision)); +static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { + ASSERT(pMemTable->maxTables < maxTables); - // Loop to commit to each file - for (int fid = sfid; fid <= efid; fid++) { - if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) { - tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _exit; - } - } + STableData **pTableData = (STableData **)calloc(maxTables, sizeof(STableData *)); + if (pTableData == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; } + memcpy((void *)pTableData, (void *)pMemTable->tData, sizeof(STableData *) * pMemTable->maxTables); - // Commit to update meta file - if (tsdbCommitMeta(pRepo) < 0) { - tsdbError("vgId:%d failed to commit data while committing meta data since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } + STableData **tData = pMemTable->tData; - tsdbFitRetention(pRepo); + taosWLockLatch(&(pMemTable->latch)); + pMemTable->maxTables = maxTables; + pMemTable->tData = pTableData; + taosWUnLockLatch(&(pMemTable->latch)); -_exit: - tdFreeDataCols(pDataCols); - tsdbDestroyCommitIters(iters, pMem->maxTables); - tsdbDestroyHelper(&whelper); - tsdbEndCommit(pRepo); - tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId); + tfree(tData); - return NULL; + return 0; } -static int tsdbCommitMeta(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; - SActObj * pAct = NULL; - SActCont * pCont = NULL; - - if (listNEles(pMem->actList) > 0) { - if (tdKVStoreStartCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - SListNode *pNode = NULL; - - while ((pNode = tdListPopHead(pMem->actList)) != NULL) { - pAct = (SActObj *)pNode->data; - if (pAct->act == TSDB_UPDATE_META) { - pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); - if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { - tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, - tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; - } - } else if (pAct->act == TSDB_DROP_META) { - if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) { - tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, - tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; - } - } else { +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row) { + if (pCols) { + if (*ppSchema == NULL || schemaVersion(*ppSchema) != dataRowVersion(row)) { + *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row)); + if (*ppSchema == NULL) { ASSERT(false); + return -1; } } - if (tdKVStoreEndCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } + tdAppendDataRowToDataCol(row, *ppSchema, pCols); } return 0; +} -_err: - return -1; +static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { + if (pBlock->dataLen <= 0) return -1; + pIter->totalLen = pBlock->dataLen; + pIter->len = 0; + pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); + return 0; } -static void tsdbEndCommit(STsdbRepo *pRepo) { - ASSERT(pRepo->commit == 1); - if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER); +static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { + SDataRow row = pIter->row; + if (row == NULL) return NULL; + + pIter->len += dataRowLen(row); + if (pIter->len >= pIter->totalLen) { + pIter->row = NULL; + } else { + pIter->row = (char *)row + dataRowLen(row); + } + + return row; } -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { - for (int i = 0; i < nIters; i++) { - TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter); - if (nextKey > 0 && (nextKey >= minKey && nextKey <= maxKey)) return 1; +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, + TSKEY now) { + if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { + tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 + " maxKey %" PRId64 " row key %" PRId64, + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey, + dataRowKey(row)); + terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; + return -1; } + return 0; } -void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) { - *minKey = fileId * daysPerFile * tsMsPerDay[precision]; - *maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1; -} +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { + ASSERT(pMsg != NULL); + STsdbMeta * pMeta = pRepo->tsdbMeta; + SSubmitMsgIter msgIter = {0}; + SSubmitBlk * pBlock = NULL; + SSubmitBlkIter blkIter = {0}; + SDataRow row = NULL; + TSKEY now = taosGetTimestamp(pRepo->config.precision); + TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; + TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; + + terrno = TSDB_CODE_SUCCESS; + pMsg->length = htonl(pMsg->length); + pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); + + if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; + while (true) { + if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; + if (pBlock == NULL) break; + + pBlock->uid = htobe64(pBlock->uid); + pBlock->tid = htonl(pBlock->tid); + pBlock->sversion = htonl(pBlock->sversion); + pBlock->dataLen = htonl(pBlock->dataLen); + pBlock->schemaLen = htonl(pBlock->schemaLen); + pBlock->numOfRows = htons(pBlock->numOfRows); + + if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) { - char * dataDir = NULL; - STsdbCfg * pCfg = &pRepo->config; - STsdbFileH *pFileH = pRepo->tsdbFileH; - SFileGroup *pGroup = NULL; - SMemTable * pMem = pRepo->imem; - bool newLast = false; - - TSKEY minKey = 0, maxKey = 0; - tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); - - // Check if there are data to commit to this file - int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey); - if (!hasDataToCommit) { - tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid); - return 0; + STable *pTable = pMeta->tables[pBlock->tid]; + if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } + + if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { + tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + return -1; + } + + // Check schema version and update schema if needed + if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { + if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + continue; + } else { + return -1; + } + } + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { + if (tsdbCheckRowRange(pRepo, pTable, row, minKey, maxKey, now) < 0) { + return -1; + } + } } - // Create and open files for commit - dataDir = tsdbGetDataDirName(pRepo->rootDir); - if (dataDir == NULL) { + if (terrno != TSDB_CODE_SUCCESS) return -1; + return 0; +} + +static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) { + STsdbMeta * pMeta = pRepo->tsdbMeta; + int64_t points = 0; + STable * pTable = NULL; + SSubmitBlkIter blkIter = {0}; + SDataRow row = NULL; + void ** rows = NULL; + int rowCounter = 0; + + ASSERT(pBlock->tid < pMeta->maxTables); + pTable = pMeta->tables[pBlock->tid]; + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + + rows = (void **)calloc(pBlock->numOfRows, sizeof(void *)); + if (rows == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) { - tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; - } + tsdbInitSubmitBlkIter(pBlock, &blkIter); + while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { + if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) { + tsdbFreeRows(pRepo, rows, rowCounter); + goto _err; + } - // Open files for write/read - if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { - tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } + (*affectedrows)++; + points++; - newLast = TSDB_NLAST_FILE_OPENED(pHelper); + if (rows[rowCounter] != NULL) { + rowCounter++; + } + } - if (tsdbLoadCompIdx(pHelper, NULL) < 0) { - tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno)); + if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { goto _err; } - // Loop to commit data in each table - for (int tid = 1; tid < pMem->maxTables; tid++) { - SCommitIter *pIter = iters + tid; - if (pIter->pTable == NULL) continue; - - taosRLockLatch(&(pIter->pTable->latch)); - - if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err; + STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); + pRepo->stat.pointsWritten += points * schemaNCols(pSchema); + pRepo->stat.totalStorage += points * schemaVLen(pSchema); - if (pIter->pIter != NULL) { - if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } + free(rows); + return 0; - if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) { - taosRUnLockLatch(&(pIter->pTable->latch)); - tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), - TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), - tstrerror(terrno)); - goto _err; - } - } +_err: + free(rows); + return -1; +} - taosRUnLockLatch(&(pIter->pTable->latch)); +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) { + STsdbCfg * pCfg = &pRepo->config; + TKEY tkey = dataRowTKey(row); + TSKEY key = dataRowKey(row); + bool isRowDelete = TKEY_IS_DELETED(tkey); - // Move the last block to the new .l file if neccessary - if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { - tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + if (isRowDelete) { + if (!pCfg->update) { + tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo)); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + return -1; } - // Write the SCompBlock part - if (tsdbWriteCompInfo(pHelper) < 0) { - tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + if (key > TABLE_LASTKEY(pTable)) { + tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64, + REPO_ID(pRepo), key, TABLE_LASTKEY(pTable)); + return 0; } } - if (tsdbWriteCompIdx(pHelper) < 0) { - tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; + void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); + if (pRow == NULL) { + tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", + REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); + return -1; } - taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 0, pGroup); + dataRowCpy(pRow, row); + ppRow[0] = pRow; - pthread_rwlock_wrlock(&(pFileH->fhlock)); + tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), + isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), + key); - (void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; + return 0; +} - if (newLast) { - (void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info; - } else { - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info; +static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { + if (pMsg == NULL) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + return -1; } - pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info; - - pthread_rwlock_unlock(&(pFileH->fhlock)); + pIter->totalLen = pMsg->length; + pIter->len = 0; + pIter->pMsg = pMsg; + if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + return -1; + } return 0; - -_err: - taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 1, NULL); - return -1; } -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { + if (pIter->len == 0) { + pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; + } else { + SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen); + } - SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); - if (iters == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return NULL; + if (pIter->len > pIter->totalLen) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + *pPBlock = NULL; + return -1; } - if (tsdbRLockRepoMeta(pRepo) < 0) goto _err; + *pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + + return 0; +} + +static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { + ASSERT(pTable != NULL); - // reference all tables - for (int i = 0; i < pMem->maxTables; i++) { - if (pMeta->tables[i] != NULL) { - tsdbRefTable(pMeta->tables[i]); - iters[i].pTable = pMeta->tables[i]; + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + int sversion = schemaVersion(pSchema); + + if (pBlock->sversion == sversion) { + return 0; + } else { + if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + return -1; } } - if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err; + if (pBlock->sversion > sversion) { // may need to update table schema + if (pBlock->schemaLen > 0) { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0); + int numOfCols = pBlock->schemaLen / sizeof(STColumn); + STColumn *pTCol = (STColumn *)pBlock->data; - for (int i = 0; i < pMem->maxTables; i++) { - if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) { - if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { + STSchemaBuilder schemaBuilder = {0}; + if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + return -1; } - tSkipListIterNext(iters[i].pIter); + for (int i = 0; i < numOfCols; i++) { + if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + } + + STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder); + if (pNSchema == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + + tdDestroyTSchemaBuilder(&schemaBuilder); + tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true); + } else { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE; + return -1; + } + } else { + ASSERT(pBlock->sversion >= 0); + if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { + tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), + pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); } + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + return -1; } - return iters; - -_err: - tsdbDestroyCommitIters(iters, pMem->maxTables); - return NULL; + return 0; } -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) { - if (iters == NULL) return; +static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) { + if (rowCounter < 1) return 0; - for (int i = 1; i < maxTables; i++) { - if (iters[i].pTable != NULL) { - tsdbUnRefTable(iters[i].pTable); - tSkipListDestroyIter(iters[i].pIter); + SMemTable * pMemTable = NULL; + STableData *pTableData = NULL; + STsdbMeta * pMeta = pRepo->tsdbMeta; + STsdbCfg * pCfg = &(pRepo->config); + + ASSERT(pRepo->mem != NULL); + pMemTable = pRepo->mem; + + if (TABLE_TID(pTable) >= pMemTable->maxTables) { + if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { + tsdbFreeRows(pRepo, rows, rowCounter); + return -1; } } + pTableData = pMemTable->tData[TABLE_TID(pTable)]; - free(iters); -} + if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { + if (pTableData != NULL) { + taosWLockLatch(&(pMemTable->latch)); + pMemTable->tData[TABLE_TID(pTable)] = NULL; + tsdbFreeTableData(pTableData); + taosWUnLockLatch(&(pMemTable->latch)); + } -static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { - ASSERT(pMemTable->maxTables < maxTables); + pTableData = tsdbNewTableData(pCfg, pTable); + if (pTableData == NULL) { + tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); + tsdbFreeRows(pRepo, rows, rowCounter); + return -1; + } - STableData **pTableData = (STableData **)calloc(maxTables, sizeof(STableData *)); - if (pTableData == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; + pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; } - memcpy((void *)pTableData, (void *)pMemTable->tData, sizeof(STableData *) * pMemTable->maxTables); - STableData **tData = pMemTable->tData; + ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); - taosWLockLatch(&(pMemTable->latch)); - pMemTable->maxTables = maxTables; - pMemTable->tData = pTableData; - taosWUnLockLatch(&(pMemTable->latch)); + int64_t osize = SL_SIZE(pTableData->pData); + tSkipListPutBatch(pTableData->pData, rows, rowCounter); + int64_t dsize = SL_SIZE(pTableData->pData) - osize; + + if (pMemTable->keyFirst > dataRowKey(rows[0])) pMemTable->keyFirst = dataRowKey(rows[0]); + if (pMemTable->keyLast < dataRowKey(rows[rowCounter - 1])) pMemTable->keyLast = dataRowKey(rows[rowCounter - 1]); + pMemTable->numOfRows += dsize; - taosTFree(tData); + if (pTableData->keyFirst > dataRowKey(rows[0])) pTableData->keyFirst = dataRowKey(rows[0]); + if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]); + pTableData->numOfRows += dsize; + + // TODO: impl delete row thing + if (TABLE_LASTKEY(pTable) < dataRowKey(rows[rowCounter-1])) TABLE_LASTKEY(pTable) = dataRowKey(rows[rowCounter-1]); return 0; +} + +static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { + ASSERT(pRepo->mem != NULL); + STsdbBufPool *pBufPool = pRepo->pPool; + + for (int i = rowCounter - 1; i >= 0; --i) { + SDataRow row = (SDataRow)rows[i]; + int bytes = (int)dataRowLen(row); + + if (pRepo->mem->extraBuffList == NULL) { + STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); + ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes); + + pBufBlock->offset -= bytes; + pBufBlock->remain += bytes; + ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); + tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, + listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); + + if (pBufBlock->offset == 0) { // return the block to buffer pool + tsdbLockRepo(pRepo); + SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList); + tdListPrependNode(pBufPool->bufBlockList, pNode); + tsdbUnlockRepo(pRepo); + } + } else { + ASSERT(listNEles(pRepo->mem->extraBuffList) > 0); + SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList); + ASSERT(row == pNode->data); + free(pNode); + tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); + + if (listNEles(pRepo->mem->extraBuffList) == 0) { + tdListFree(pRepo->mem->extraBuffList); + pRepo->mem->extraBuffList = NULL; + } + } + } } \ No newline at end of file diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index f3bd91f038cf209827b1c252160019a6b0aac27f..25c815b74e3bd5593bef9157cbd42ba869298cab 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -86,7 +86,8 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) { if (pTable != NULL) { tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable)); - return TSDB_CODE_TDB_TABLE_ALREADY_EXIST; + terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; + goto _err; } if (pCfg->type == TSDB_CHILD_TABLE) { @@ -191,7 +192,7 @@ int tsdbDropTable(TSDB_REPO_T *repo, STableId tableId) { return 0; _err: - taosTFree(tbname); + tfree(tbname); return -1; } @@ -461,7 +462,7 @@ void tsdbFreeMeta(STsdbMeta *pMeta) { if (pMeta) { taosHashCleanup(pMeta->uidMap); tdListFree(pMeta->superList); - taosTFree(pMeta->tables); + tfree(pMeta->tables); pthread_rwlock_destroy(&pMeta->rwLock); free(pMeta); } @@ -485,11 +486,11 @@ int tsdbOpenMeta(STsdbRepo *pRepo) { } tsdbDebug("vgId:%d open TSDB meta succeed", REPO_ID(pRepo)); - taosTFree(fname); + tfree(fname); return 0; _err: - taosTFree(fname); + tfree(fname); return -1; } @@ -562,12 +563,12 @@ int tsdbUnlockRepoMeta(STsdbRepo *pRepo) { void tsdbRefTable(STable *pTable) { int32_t ref = T_REF_INC(pTable); UNUSED(ref); - // tsdbDebug("ref table %"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref); + tsdbDebug("ref table %s uid %" PRIu64 " tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); } void tsdbUnRefTable(STable *pTable) { int32_t ref = T_REF_DEC(pTable); - tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref); + tsdbDebug("unref table %s uid:%"PRIu64" tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); if (ref == 0) { // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); @@ -643,7 +644,7 @@ static void tsdbOrgMeta(void *pHandle) { } static char *getTagIndexKey(const void *pData) { - STable *pTable = *(STable **)pData; + STable *pTable = (STable *)pData; STSchema *pSchema = tsdbGetTableTagSchema(pTable); STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN); @@ -700,7 +701,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper) { } pTable->tagVal = NULL; STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), 1, 0, 1, getTagIndexKey); + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); if (pTable->pIndex == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; @@ -745,7 +746,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper) { T_REF_INC(pTable); - tsdbTrace("table %s tid %d uid %" PRIu64 " is created", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), + tsdbDebug("table %s tid %d uid %" PRIu64 " is created", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable)); return pTable; @@ -760,7 +761,7 @@ static void tsdbFreeTable(STable *pTable) { if (pTable->name != NULL) tsdbTrace("table %s tid %d uid %" PRIu64 " is freed", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable)); - taosTFree(TABLE_NAME(pTable)); + tfree(TABLE_NAME(pTable)); if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { tdFreeSchema(pTable->schema[i]); @@ -774,7 +775,7 @@ static void tsdbFreeTable(STable *pTable) { kvRowFree(pTable->tagVal); tSkipListDestroy(pTable->pIndex); - taosTFree(pTable->sql); + tfree(pTable->sql); free(pTable); } } @@ -889,7 +890,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro } if (lock) tsdbUnlockRepoMeta(pRepo); - tsdbDebug("vgId:%d table %s is removed from meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); + tsdbDebug("vgId:%d table %s uid %" PRIu64 " is removed from meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_UID(pTable)); tsdbUnRefTable(pTable); } @@ -900,23 +901,8 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper pTable->pSuper = pSTable; - int32_t level = 0; - int32_t headSize = 0; - - tSkipListNewNodeInfo(pSTable->pIndex, &level, &headSize); - - // NOTE: do not allocate the space for key, since in each skip list node, only keep the pointer to pTable, not the - // actual key value, and the key value will be retrieved during query through the pTable and getTagIndexKey function - SSkipListNode *pNode = calloc(1, headSize + sizeof(STable *)); - if (pNode == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - pNode->level = level; - - memcpy(SL_GET_NODE_DATA(pNode), &pTable, sizeof(STable *)); + tSkipListPut(pSTable->pIndex, (void *)pTable); - tSkipListPut(pSTable->pIndex, pNode); if (refSuper) T_REF_INC(pSTable); return 0; } @@ -940,7 +926,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { SSkipListNode *pNode = taosArrayGetP(res, i); // STableIndexElem* pElem = (STableIndexElem*) SL_GET_NODE_DATA(pNode); - if (*(STable **)SL_GET_NODE_DATA(pNode) == pTable) { // this is the exact what we need + if ((STable *)SL_GET_NODE_DATA(pNode) == pTable) { // this is the exact what we need tSkipListRemoveNode(pSTable->pIndex, pNode); } } @@ -1080,9 +1066,9 @@ void tsdbClearTableCfg(STableCfg *config) { if (config->schema) tdFreeSchema(config->schema); if (config->tagSchema) tdFreeSchema(config->tagSchema); if (config->tagValues) kvRowFree(config->tagValues); - taosTFree(config->name); - taosTFree(config->sname); - taosTFree(config->sql); + tfree(config->name); + tfree(config->sname); + tfree(config->sql); free(config); } } @@ -1170,8 +1156,8 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { buf = tdDecodeSchema(buf, &(pTable->tagSchema)); STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = - tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), 1, 0, 1, getTagIndexKey); + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + SL_ALLOW_DUP_KEY, getTagIndexKey); if (pTable->pIndex == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbFreeTable(pTable); @@ -1197,7 +1183,7 @@ static int tsdbGetTableEncodeSize(int8_t act, STable *pTable) { tlen = sizeof(SListNode) + sizeof(SActObj) + sizeof(SActCont) + tsdbEncodeTable(NULL, pTable) + sizeof(TSCKSUM); } else { if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tlen = (int)((sizeof(SListNode) + sizeof(SActObj)) * (tSkipListGetSize(pTable->pIndex) + 1)); + tlen = (int)((sizeof(SListNode) + sizeof(SActObj)) * (SL_SIZE(pTable->pIndex) + 1)); } else { tlen = sizeof(SListNode) + sizeof(SActObj); } @@ -1244,7 +1230,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) { } while (tSkipListIterNext(pIter)) { - STable *tTable = *(STable **)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); + STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); ASSERT(TABLE_TYPE(tTable) == TSDB_CHILD_TABLE); pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, tTable); } @@ -1269,7 +1255,7 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) { tsdbWLockRepoMeta(pRepo); while (tSkipListIterNext(pIter)) { - STable *tTable = *(STable **)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); + STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); tsdbRemoveTableFromMeta(pRepo, tTable, false, false); } @@ -1304,7 +1290,7 @@ static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) { STable **tTables = pMeta->tables; pMeta->tables = tables; - taosTFree(tTables); + tfree(tTables); tsdbDebug("vgId:%d tsdb meta maxTables is adjusted as %d", REPO_ID(pRepo), maxTables); return 0; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index 357093bd9e125567ef3d3629c00877f54778b500..5b65b2185a873b9ea241b0b0e4147a4e9d522e48 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -14,9 +14,7 @@ */ #define _DEFAULT_SOURCE - #define TAOS_RANDOM_FILE_FAIL_TEST - #include "os.h" #include "talgo.h" #include "tchecksum.h" @@ -27,6 +25,7 @@ #define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM)) #define TSDB_KEY_COL_OFFSET 0 #define TSDB_GET_COMPBLOCK_IDX(h, b) (POINTER_DISTANCE(b, (h)->pCompInfo->blocks)/sizeof(SCompBlock)) +#define TSDB_IS_LAST_BLOCK(pb) ((pb)->last) static bool tsdbShouldCreateNewLast(SRWHelper *pHelper); static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, SCompBlock *pCompBlock, @@ -34,7 +33,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pD static int compareKeyBlock(const void *arg1, const void *arg2); static int tsdbAdjustInfoSizeIfNeeded(SRWHelper *pHelper, size_t esize); static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx); -static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, int rowsAdded); +static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, SMergeInfo *pMergeInfo); static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx); static void tsdbResetHelperFileImpl(SRWHelper *pHelper); static int tsdbInitHelperFile(SRWHelper *pHelper); @@ -61,8 +60,10 @@ static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pComp static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, SCompBlock *pCompBlock); static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey, int *blkIdx); -static int tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows); +static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, + TSKEY maxKey, int maxRows, int8_t update); +static bool tsdbCheckAddSubBlockCond(SRWHelper *pHelper, SCompBlock *pCompBlock, SMergeInfo *pMergeInfo, int maxOps); +static int tsdbDeleteSuperBlock(SRWHelper *pHelper, int blkIdx); // ---------------------- INTERNAL FUNCTIONS ---------------------- int tsdbInitReadHelper(SRWHelper *pHelper, STsdbRepo *pRepo) { @@ -279,7 +280,7 @@ int tsdbCommitTableData(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols while (true) { ASSERT(blkIdx <= (int)pIdx->numOfBlocks); TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - if (keyFirst < 0 || keyFirst > maxKey) break; // iter over + if (keyFirst == TSDB_DATA_TIMESTAMP_NULL || keyFirst > maxKey) break; // iter over if (pIdx->len <= 0 || keyFirst > pIdx->maxKey) { if (tsdbProcessAppendCommit(pHelper, pCommitIter, pDataCols, maxKey) < 0) return -1; @@ -335,7 +336,7 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) { return -1; } - if (taosTSendFile(helperNewLastF(pHelper)->fd, helperLastF(pHelper)->fd, NULL, pCompBlock->len) < pCompBlock->len) { + if (taosSendFile(helperNewLastF(pHelper)->fd, helperLastF(pHelper)->fd, NULL, pCompBlock->len) < pCompBlock->len) { tsdbError("vgId:%d failed to sendfile from file %s to file %s since %s", REPO_ID(pHelper->pRepo), helperLastF(pHelper)->fname, helperNewLastF(pHelper)->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -380,7 +381,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { pIdx->tid = pHelper->tableInfo.tid; ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE); - if (taosTWrite(pFile->fd, (void *)(pHelper->pCompInfo), pIdx->len) < (int)pIdx->len) { + if (taosWrite(pFile->fd, (void *)(pHelper->pCompInfo), pIdx->len) < (int)pIdx->len) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -432,7 +433,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { ASSERT(offset == pFile->info.size); - if (taosTWrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < (int)pFile->info.len) { + if (taosWrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < (int)pFile->info.len) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -454,7 +455,7 @@ int tsdbLoadCompIdxImpl(SFile *pFile, uint32_t offset, uint32_t len, void *buffe return -1; } - if (taosTRead(pFile->fd, buffer, len) < len) { + if (taosRead(pFile->fd, buffer, len) < len) { tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, offset, len, strerror(errno)); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; @@ -551,7 +552,7 @@ int tsdbLoadCompInfoImpl(SFile *pFile, SCompIdx *pIdx, SCompInfo **ppCompInfo) { return -1; } - if (taosTRead(pFile->fd, (void *)(*ppCompInfo), pIdx->len) < (int)pIdx->len) { + if (taosRead(pFile->fd, (void *)(*ppCompInfo), pIdx->len) < (int)pIdx->len) { tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, pIdx->offset, pIdx->len, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -608,7 +609,7 @@ int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) { return -1; } - if (taosTRead(pFile->fd, (void *)pHelper->pCompData, tsize) < tsize) { + if (taosRead(pFile->fd, (void *)pHelper->pCompData, tsize) < tsize) { tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -823,7 +824,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa sizeof(TSCKSUM)); // Write the whole block to file - if (taosTWrite(pFile->fd, (void *)pCompData, lsize) < lsize) { + if (taosWrite(pFile->fd, (void *)pCompData, lsize) < lsize) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(helperRepo(pHelper)), lsize, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -925,7 +926,7 @@ _err: return -1; } -static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, int rowsAdded) { +static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, SMergeInfo *pMergeInfo) { ASSERT(pCompBlock->numOfSubBlocks == 0); SCompIdx *pIdx = &(pHelper->curCompIdx); @@ -958,9 +959,9 @@ static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkId pSCompBlock->numOfSubBlocks++; ASSERT(pSCompBlock->numOfSubBlocks <= TSDB_MAX_SUBBLOCKS); pSCompBlock->len += sizeof(SCompBlock); - pSCompBlock->numOfRows += rowsAdded; - pSCompBlock->keyFirst = MIN(pSCompBlock->keyFirst, pCompBlock->keyFirst); - pSCompBlock->keyLast = MAX(pSCompBlock->keyLast, pCompBlock->keyLast); + pSCompBlock->numOfRows = pSCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; + pSCompBlock->keyFirst = pMergeInfo->keyFirst; + pSCompBlock->keyLast = pMergeInfo->keyLast; pIdx->len += sizeof(SCompBlock); } else { // Need to create two sub-blocks void *ptr = NULL; @@ -989,11 +990,11 @@ static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkId ((SCompBlock *)ptr)[1] = *pCompBlock; pSCompBlock->numOfSubBlocks = 2; - pSCompBlock->numOfRows += rowsAdded; + pSCompBlock->numOfRows = pSCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; pSCompBlock->offset = ((char *)ptr) - ((char *)pHelper->pCompInfo); pSCompBlock->len = sizeof(SCompBlock) * 2; - pSCompBlock->keyFirst = MIN(((SCompBlock *)ptr)[0].keyFirst, ((SCompBlock *)ptr)[1].keyFirst); - pSCompBlock->keyLast = MAX(((SCompBlock *)ptr)[0].keyLast, ((SCompBlock *)ptr)[1].keyLast); + pSCompBlock->keyFirst = pMergeInfo->keyFirst; + pSCompBlock->keyLast = pMergeInfo->keyLast; pIdx->len += (sizeof(SCompBlock) * 2); } @@ -1047,6 +1048,45 @@ static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int return 0; } +static int tsdbDeleteSuperBlock(SRWHelper *pHelper, int blkIdx) { + SCompIdx *pCompIdx = &(pHelper->curCompIdx); + + ASSERT(pCompIdx->numOfBlocks > 0 && blkIdx < pCompIdx->numOfBlocks); + + SCompBlock *pCompBlock= blockAtIdx(pHelper, blkIdx); + SCompBlock compBlock = *pCompBlock; + ASSERT(pCompBlock->numOfSubBlocks > 0 && pCompBlock->numOfSubBlocks <= TSDB_MAX_SUBBLOCKS); + + if (pCompIdx->numOfBlocks == 1) { + memset(pCompIdx, 0, sizeof(*pCompIdx)); + } else { + int tsize = 0; + + if (compBlock.numOfSubBlocks > 1) { + tsize = (int)(pCompIdx->len - (compBlock.offset + sizeof(SCompBlock) * compBlock.numOfSubBlocks)); + + ASSERT(tsize > 0); + memmove(POINTER_SHIFT(pHelper->pCompInfo, compBlock.offset), + POINTER_SHIFT(pHelper->pCompInfo, compBlock.offset + sizeof(SCompBlock) * compBlock.numOfSubBlocks), + tsize); + + pCompIdx->len = pCompIdx->len - sizeof(SCompBlock) * compBlock.numOfSubBlocks; + } + + tsize = (int)(pCompIdx->len - POINTER_DISTANCE(blockAtIdx(pHelper, blkIdx + 1), pHelper->pCompInfo)); + ASSERT(tsize > 0); + memmove((void *)blockAtIdx(pHelper, blkIdx), (void *)blockAtIdx(pHelper, blkIdx + 1), tsize); + + pCompIdx->len -= sizeof(SCompBlock); + + pCompIdx->numOfBlocks--; + pCompIdx->hasLast = (uint32_t)(blockAtIdx(pHelper, pCompIdx->numOfBlocks - 1)->last); + pCompIdx->maxKey = blockAtIdx(pHelper, pCompIdx->numOfBlocks - 1)->keyLast; + } + + return 0; +} + static void tsdbResetHelperFileImpl(SRWHelper *pHelper) { pHelper->idxH.numOfIdx = 0; pHelper->idxH.curIdx = 0; @@ -1210,7 +1250,7 @@ static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBl return -1; } - if (taosTRead(pFile->fd, pHelper->pBuffer, pCompCol->len) < pCompCol->len) { + if (taosRead(pFile->fd, pHelper->pBuffer, pCompCol->len) < pCompCol->len) { tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pCompCol->len, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -1325,7 +1365,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } - if (taosTRead(fd, (void *)pCompData, pCompBlock->len) < pCompBlock->len) { + if (taosRead(fd, (void *)pCompData, pCompBlock->len) < pCompBlock->len) { tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pCompBlock->len, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -1439,51 +1479,62 @@ static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) { } static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) { - STsdbCfg * pCfg = &(pHelper->pRepo->config); - STable * pTable = pCommitIter->pTable; - SCompIdx * pIdx = &(pHelper->curCompIdx); - TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; - SCompBlock compBlock = {0}; + STsdbCfg * pCfg = &(pHelper->pRepo->config); + STable * pTable = pCommitIter->pTable; + SCompIdx * pIdx = &(pHelper->curCompIdx); + TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); + int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; + SCompBlock compBlock = {0}; + SMergeInfo mergeInfo = {0}; + SMergeInfo *pMergeInfo = &mergeInfo; ASSERT(pIdx->len <= 0 || keyFirst > pIdx->maxKey); if (pIdx->hasLast) { // append to with last block ASSERT(pIdx->len > 0); SCompBlock *pCompBlock = blockAtIdx(pHelper, pIdx->numOfBlocks - 1); ASSERT(pCompBlock->last && pCompBlock->numOfRows < pCfg->minRowsPerFileBlock); - tdResetDataCols(pDataCols); - int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock - pCompBlock->numOfRows, - pDataCols, NULL, 0); - ASSERT(rowsRead > 0 && rowsRead == pDataCols->numOfRows); - if (rowsRead + pCompBlock->numOfRows < pCfg->minRowsPerFileBlock && - pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) { - if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1; - if (tsdbAddSubBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1, rowsRead) < 0) return -1; - } else { - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows); + tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock - pCompBlock->numOfRows, pDataCols, + NULL, 0, pCfg->update, pMergeInfo); - if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, pDataCols->numOfRows) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows + pDataCols->numOfRows); + ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); - if (tsdbWriteBlockToProperFile(pHelper, pHelper->pDataCols[0], &compBlock) < 0) return -1; - if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1; - } + if (pDataCols->numOfRows > 0) { + ASSERT((pMergeInfo->keyFirst == dataColsKeyFirst(pDataCols)) && (pMergeInfo->keyLast == dataColsKeyLast(pDataCols))); - if (pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; + if (pDataCols->numOfRows + pCompBlock->numOfRows < pCfg->minRowsPerFileBlock && + pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) { + if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1; + pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, pCompBlock->keyFirst); + pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, pCompBlock->keyLast); + if (tsdbAddSubBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1, pMergeInfo) < 0) return -1; + } else { + if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; + ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows); + + if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, pDataCols->numOfRows) < 0) return -1; + ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows + pDataCols->numOfRows); + + if (tsdbWriteBlockToProperFile(pHelper, pHelper->pDataCols[0], &compBlock) < 0) return -1; + if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1; + } + + if (pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; + } } else { ASSERT(!pHelper->hasOldLastBlock); - tdResetDataCols(pDataCols); - int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock, pDataCols, NULL, 0); - ASSERT(rowsRead > 0 && rowsRead == pDataCols->numOfRows); + tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock, pDataCols, NULL, 0, pCfg->update, pMergeInfo); + ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); - if (tsdbWriteBlockToProperFile(pHelper, pDataCols, &compBlock) < 0) return -1; - if (tsdbInsertSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks) < 0) return -1; + if (pDataCols->numOfRows > 0) { + ASSERT((pMergeInfo->keyFirst == dataColsKeyFirst(pDataCols)) && (pMergeInfo->keyLast == dataColsKeyLast(pDataCols))); + if (tsdbWriteBlockToProperFile(pHelper, pDataCols, &compBlock) < 0) return -1; + if (tsdbInsertSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks) < 0) return -1; + } } #ifndef NDEBUG TSKEY keyNext = tsdbNextIterKey(pCommitIter->pIter); - ASSERT(keyNext < 0 || keyNext > pIdx->maxKey); + ASSERT(keyNext == TSDB_DATA_TIMESTAMP_NULL || keyNext > pIdx->maxKey); #endif return 0; @@ -1491,13 +1542,16 @@ static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey, int *blkIdx) { - STsdbCfg * pCfg = &(pHelper->pRepo->config); - STable * pTable = pCommitIter->pTable; - SCompIdx * pIdx = &(pHelper->curCompIdx); - SCompBlock compBlock = {0}; - TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; - SDataCols *pDataCols0 = pHelper->pDataCols[0]; + STsdbCfg * pCfg = &(pHelper->pRepo->config); + STable * pTable = pCommitIter->pTable; + SCompIdx * pIdx = &(pHelper->curCompIdx); + SCompBlock compBlock = {0}; + TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); + int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; + SDataCols * pDataCols0 = pHelper->pDataCols[0]; + SMergeInfo mergeInfo = {0}; + SMergeInfo *pMergeInfo = &mergeInfo; + SCompBlock oBlock = {0}; SSkipListIterator slIter = {0}; @@ -1507,123 +1561,82 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, pIdx->numOfBlocks - *blkIdx, sizeof(SCompBlock), compareKeyBlock, TD_GE); ASSERT(pCompBlock != NULL); int tblkIdx = (int32_t)(TSDB_GET_COMPBLOCK_IDX(pHelper, pCompBlock)); + oBlock = *pCompBlock; + + ASSERT((!TSDB_IS_LAST_BLOCK(&oBlock)) || (tblkIdx == pIdx->numOfBlocks - 1)); - if (pCompBlock->last) { - ASSERT(pCompBlock->numOfRows < pCfg->minRowsPerFileBlock && tblkIdx == pIdx->numOfBlocks - 1); + if ((!TSDB_IS_LAST_BLOCK(&oBlock)) && keyFirst < pCompBlock->keyFirst) { + while (true) { + tsdbLoadDataFromCache(pTable, pCommitIter->pIter, oBlock.keyFirst-1, defaultRowsInBlock, pDataCols, NULL, 0, + pCfg->update, pMergeInfo); + ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); + if (pDataCols->numOfRows == 0) break; + + if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1; + if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; + tblkIdx++; + } + ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) == TSDB_DATA_TIMESTAMP_NULL || + tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); + } else { int16_t colId = 0; + if (tsdbLoadBlockDataCols(pHelper, &oBlock, NULL, &colId, 1) < 0) return -1; + + TSKEY keyLimit = (tblkIdx == pIdx->numOfBlocks - 1) ? maxKey : (blockAtIdx(pHelper, tblkIdx + 1)->keyFirst - 1); + slIter = *(pCommitIter->pIter); - if (tsdbLoadBlockDataCols(pHelper, pCompBlock, NULL, &colId, 1) < 0) return -1; - ASSERT(pDataCols0->numOfRows == pCompBlock->numOfRows); + tsdbLoadDataFromCache(pTable, &slIter, keyLimit, INT_MAX, NULL, pDataCols0->cols[0].pData, pDataCols0->numOfRows, + pCfg->update, pMergeInfo); - int rows1 = defaultRowsInBlock - pCompBlock->numOfRows; - int rows2 = - tsdbLoadDataFromCache(pTable, &slIter, maxKey, rows1, NULL, pDataCols0->cols[0].pData, pDataCols0->numOfRows); - if (rows2 == 0) { // all data filtered out + if (pMergeInfo->nOperations == 0) { + // Do nothing + ASSERT(pMergeInfo->rowsDeleteFailed >= 0); *(pCommitIter->pIter) = slIter; + tblkIdx++; + } else if (oBlock.numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed == 0) { + // Delete the block and do some stuff + // ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN); + if (tsdbDeleteSuperBlock(pHelper, tblkIdx) < 0) return -1; + *pCommitIter->pIter = slIter; + if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; + } else if (tsdbCheckAddSubBlockCond(pHelper, &oBlock, pMergeInfo, pDataCols->maxPoints)) { + // Append as a sub-block of the searched block + tsdbLoadDataFromCache(pTable, pCommitIter->pIter, keyLimit, INT_MAX, pDataCols, pDataCols0->cols[0].pData, + pDataCols0->numOfRows, pCfg->update, pMergeInfo); + ASSERT(memcmp(pCommitIter->pIter, &slIter, sizeof(slIter)) == 0); + if (tsdbWriteBlockToFile(pHelper, oBlock.last ? helperLastF(pHelper) : helperDataF(pHelper), pDataCols, + &compBlock, oBlock.last, false) < 0) { + return -1; + } + if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, pMergeInfo) < 0) { + return -1; + } + tblkIdx++; } else { - if (pCompBlock->numOfRows + rows2 < pCfg->minRowsPerFileBlock && - pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) { - tdResetDataCols(pDataCols); - int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols, - pDataCols0->cols[0].pData, pDataCols0->numOfRows); - ASSERT(rowsRead == rows2 && rowsRead == pDataCols->numOfRows); - if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1; - if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1; - tblkIdx++; - } else { - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - int round = 0; - int dIter = 0; - while (true) { - tdResetDataCols(pDataCols); - int rowsRead = - tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, maxKey, defaultRowsInBlock); - if (rowsRead == 0) break; + // load the block data, merge with the memory data + if (tsdbLoadBlockData(pHelper, &oBlock, NULL) < 0) return -1; + int round = 0; + int dIter = 0; + while (true) { + tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, keyLimit, defaultRowsInBlock, + pCfg->update); + if (pDataCols->numOfRows == 0) break; + if (tblkIdx == pIdx->numOfBlocks - 1) { if (tsdbWriteBlockToProperFile(pHelper, pDataCols, &compBlock) < 0) return -1; - if (round == 0) { - if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } else { - if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } - - tblkIdx++; - round++; + } else { + if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1; } - } - if (pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; - } - } else { - TSKEY keyLimit = (tblkIdx == pIdx->numOfBlocks - 1) ? maxKey : (pCompBlock[1].keyFirst - 1); - TSKEY blkKeyFirst = pCompBlock->keyFirst; - TSKEY blkKeyLast = pCompBlock->keyLast; - if (keyFirst < blkKeyFirst) { - while (true) { - tdResetDataCols(pDataCols); - int rowsRead = - tsdbLoadDataFromCache(pTable, pCommitIter->pIter, blkKeyFirst - 1, defaultRowsInBlock, pDataCols, NULL, 0); - if (rowsRead == 0) break; - - ASSERT(rowsRead == pDataCols->numOfRows); - if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1; - if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - tblkIdx++; - } - ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 || - tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); - } else { - ASSERT(keyFirst <= blkKeyLast); - int16_t colId = 0; - if (tsdbLoadBlockDataCols(pHelper, pCompBlock, NULL, &colId, 1) < 0) return -1; - - slIter = *(pCommitIter->pIter); - int rows1 = (pCfg->maxRowsPerFileBlock - pCompBlock->numOfRows); - int rows2 = tsdbLoadDataFromCache(pTable, &slIter, blkKeyLast, INT_MAX, NULL, pDataCols0->cols[0].pData, - pDataCols0->numOfRows); - - if (rows2 == 0) { // all filtered out - *(pCommitIter->pIter) = slIter; - ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 || - tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); - } else { - int rows3 = tsdbLoadDataFromCache(pTable, &slIter, keyLimit, INT_MAX, NULL, NULL, 0) + rows2; - - if (pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && rows1 >= rows2) { - int rows = (rows1 >= rows3) ? rows3 : rows2; - tdResetDataCols(pDataCols); - int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, keyLimit, rows, pDataCols, - pDataCols0->cols[0].pData, pDataCols0->numOfRows); - ASSERT(rowsRead == rows && rowsRead == pDataCols->numOfRows); - if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, false) < 0) - return -1; - if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1; - tblkIdx++; - ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 || - tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); + if (round == 0) { + if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; + if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; } else { - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - int round = 0; - int dIter = 0; - while (true) { - int rowsRead = - tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, keyLimit, defaultRowsInBlock); - if (rowsRead == 0) break; - - if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) - return -1; - if (round == 0) { - if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } else { - if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } - - round++; - tblkIdx++; - } - ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 || - tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); + if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; } + + round++; + tblkIdx++; } } } @@ -1632,9 +1645,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, return 0; } -static int tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows) { - int numOfRows = 0; +static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, + TSKEY maxKey, int maxRows, int8_t update) { TSKEY key1 = INT64_MAX; TSKEY key2 = INT64_MAX; STSchema *pSchema = NULL; @@ -1644,35 +1656,62 @@ static int tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIte while (true) { key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); + bool isRowDel = false; SDataRow row = tsdbNextIterRow(pCommitIter->pIter); - key2 = (row == NULL || dataRowKey(row) > maxKey) ? INT64_MAX : dataRowKey(row); + if (row == NULL || dataRowKey(row) > maxKey) { + key2 = INT64_MAX; + } else { + key2 = dataRowKey(row); + isRowDel = dataRowDeleted(row); + } if (key1 == INT64_MAX && key2 == INT64_MAX) break; - if (key1 <= key2) { + if (key1 < key2) { for (int i = 0; i < pDataCols->numOfCols; i++) { dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } + pTarget->numOfRows++; (*iter)++; - if (key1 == key2) tSkipListIterNext(pCommitIter->pIter); - } else { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); - ASSERT(pSchema != NULL); + } else if (key1 > key2) { + if (!isRowDel) { + if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendDataRowToDataCol(row, pSchema, pTarget); } - tdAppendDataRowToDataCol(row, pSchema, pTarget); + tSkipListIterNext(pCommitIter->pIter); + } else { + if (update) { + if (!isRowDel) { + if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendDataRowToDataCol(row, pSchema, pTarget); + } + } else { + ASSERT(!isRowDel); + + for (int i = 0; i < pDataCols->numOfCols; i++) { + dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, + pTarget->maxPoints); + } + + pTarget->numOfRows++; + } + (*iter)++; tSkipListIterNext(pCommitIter->pIter); } - numOfRows++; - if (numOfRows >= maxRows) break; - ASSERT(numOfRows == pTarget->numOfRows && numOfRows <= pTarget->maxPoints); + if (pTarget->numOfRows >= maxRows) break; } - - return numOfRows; } static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, SCompBlock *pCompBlock) { @@ -1695,3 +1734,20 @@ static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, return 0; } + +static bool tsdbCheckAddSubBlockCond(SRWHelper *pHelper, SCompBlock *pCompBlock, SMergeInfo *pMergeInfo, int maxOps) { + STsdbCfg *pCfg = &(pHelper->pRepo->config); + int mergeRows = pCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; + + ASSERT(mergeRows > 0); + + if (pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && pMergeInfo->nOperations <= maxOps) { + if (pCompBlock->last) { + if (!TSDB_NLAST_FILE_OPENED(pHelper) && mergeRows < pCfg->minRowsPerFileBlock) return true; + } else { + if (mergeRows < pCfg->maxRowsPerFileBlock) return true; + } + } + + return false; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ac6c2e0c5a9d5590e7dd57863dd18d8726912de5..d5cc566b5541a41e863d4c136746019b4b172ce0 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -20,7 +20,6 @@ #include "exception.h" #include "../../query/inc/qAst.h" // todo move to common module -#include "../../query/inc/qExecutor.h" // todo move to common module #include "tlosertree.h" #include "tsdb.h" #include "tsdbMain.h" @@ -72,8 +71,8 @@ typedef struct STableCheckInfo { STable* pTableObj; SCompInfo* pCompInfo; int32_t compSize; - int32_t numOfBlocks; // number of qualified data blocks not the original blocks - int32_t chosen; // indicate which iterator should move forward + int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks + int8_t chosen:2; // indicate which iterator should move forward bool initBuf; // whether to initialize the in-memory skip list iterator or not SSkipListIterator* iter; // mem buffer skip list iterator SSkipListIterator* iiter; // imem buffer skip list iterator @@ -120,8 +119,7 @@ typedef struct STsdbQueryHandle { SDataCols *pDataCols; // in order to hold current file data block int32_t allocSize; // allocated data block size - SMemTable *mem; // mem-table - SMemTable *imem; // imem-table, acquired from snapshot + SMemRef *pMemRef; SArray *defaultLoadColumn;// default load column SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */ SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */ @@ -184,27 +182,31 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS return pLocalIdList; } -static void tsdbMayTakeMemSnapshot(TsdbQueryHandleT pHandle) { - STsdbQueryHandle* pSecQueryHandle = (STsdbQueryHandle*) pHandle; - SQInfo *pQInfo = (SQInfo *)(pSecQueryHandle->qinfo); +static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { + assert(pQueryHandle != NULL && pQueryHandle->pMemRef != NULL); - if (pQInfo->memRef.ref++ == 0) { - tsdbTakeMemSnapshot(pSecQueryHandle->pTsdb, &pSecQueryHandle->mem, &pSecQueryHandle->imem); - pQInfo->memRef.mem = pSecQueryHandle->mem; - pQInfo->memRef.imem = pSecQueryHandle->imem; - } else { - pSecQueryHandle->mem = (SMemTable *)(pQInfo->memRef.mem); - pSecQueryHandle->imem = (SMemTable *)(pQInfo->memRef.imem); + SMemRef* pMemRef = pQueryHandle->pMemRef; + if (pQueryHandle->pMemRef->ref++ == 0) { + tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem)); } } -static void tsdbMayUnTakeMemSnapshot(TsdbQueryHandleT pHandle) { - STsdbQueryHandle* pSecQueryHandle = (STsdbQueryHandle*) pHandle; - SQInfo *pQInfo = (SQInfo *)(pSecQueryHandle->qinfo); - if (--pQInfo->memRef.ref == 0) { - tsdbUnTakeMemSnapShot(pSecQueryHandle->pTsdb, pSecQueryHandle->mem, pSecQueryHandle->imem); +static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { + assert(pQueryHandle != NULL); + SMemRef* pMemRef = pQueryHandle->pMemRef; + if (pMemRef == NULL) { // it has been freed + return; + } + + if (--pMemRef->ref == 0) { + tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pMemRef->mem, pMemRef->imem); + pMemRef->mem = NULL; + pMemRef->imem = NULL; } + + pQueryHandle->pMemRef = NULL; } + static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta) { size_t sizeOfGroup = taosArrayGetSize(pGroupList->pGroupList); assert(sizeOfGroup >= 1 && pMeta != NULL); @@ -270,7 +272,7 @@ static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey) return pNew; } -static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, void* qinfo) { +static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, void* qinfo, SMemRef* pMemRef) { STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); if (pQueryHandle == NULL) { goto out_of_memory; @@ -288,13 +290,14 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; pQueryHandle->locateStart = false; + pQueryHandle->pMemRef = pMemRef; if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { goto out_of_memory; } tsdbMayTakeMemSnapshot(pQueryHandle); - assert(pCond != NULL && pCond->numOfCols > 0); + assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL); if (ASCENDING_TRAVERSE(pCond->order)) { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); @@ -348,8 +351,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* return NULL; } -TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) { - STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qinfo); +TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo, SMemRef* pRef) { + STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qinfo, pRef); STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); @@ -366,7 +369,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab return (TsdbQueryHandleT) pQueryHandle; } -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) { +TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) { pCond->twindow = changeTableGroupByLastrow(groupList); // no qualified table @@ -374,7 +377,7 @@ TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STab return NULL; } - STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo); + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pMemRef); assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey); return pQueryHandle; @@ -396,8 +399,8 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle) { return res; } -TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo) { - STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo); +TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) { + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pRef); if (pQueryHandle != NULL) { pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; changeQueryHandleForInterpQuery(pQueryHandle); @@ -417,7 +420,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh int32_t order = pHandle->order; // no data in buffer, abort - if (pHandle->mem == NULL && pHandle->imem == NULL) { + if (pHandle->pMemRef->mem == NULL && pHandle->pMemRef->imem == NULL) { return false; } @@ -426,16 +429,19 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh STableData* pMem = NULL; STableData* pIMem = NULL; - if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables) { - pMem = pHandle->mem->tData[pCheckInfo->tableId.tid]; + SMemTable* pMemT = pHandle->pMemRef->mem; + SMemTable* pIMemT = pHandle->pMemRef->imem; + + if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { + pMem = pMemT->tData[pCheckInfo->tableId.tid]; if (pMem != NULL && pMem->uid == pCheckInfo->tableId.uid) { // check uid pCheckInfo->iter = tSkipListCreateIterFromVal(pMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } - if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables) { - pIMem = pHandle->imem->tData[pCheckInfo->tableId.tid]; + if (pIMemT && pCheckInfo->tableId.tid < pIMemT->maxTables) { + pIMem = pIMemT->tData[pCheckInfo->tableId.tid]; if (pIMem != NULL && pIMem->uid == pCheckInfo->tableId.uid) { // check uid pCheckInfo->iiter = tSkipListCreateIterFromVal(pIMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); @@ -457,7 +463,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); assert(node != NULL); - SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node); + SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p", @@ -479,7 +485,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); assert(node != NULL); - SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node); + SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p", @@ -504,19 +510,19 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) { tSkipListDestroyIter(pCheckInfo->iiter); } -static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order) { +static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { SDataRow rmem = NULL, rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); if (node != NULL) { - rmem = *(SDataRow *)SL_GET_NODE_DATA(node); + rmem = (SDataRow)SL_GET_NODE_DATA(node); } } if (pCheckInfo->iiter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); if (node != NULL) { - rimem = *(SDataRow *)SL_GET_NODE_DATA(node); + rimem = (SDataRow)SL_GET_NODE_DATA(node); } } @@ -538,9 +544,15 @@ static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order TSKEY r2 = dataRowKey(rimem); if (r1 == r2) { // data ts are duplicated, ignore the data in mem - tSkipListIterNext(pCheckInfo->iter); - pCheckInfo->chosen = 1; - return rimem; + if (!update) { + tSkipListIterNext(pCheckInfo->iter); + pCheckInfo->chosen = 1; + return rimem; + } else { + tSkipListIterNext(pCheckInfo->iiter); + pCheckInfo->chosen = 0; + return rmem; + } } else { if (ASCENDING_TRAVERSE(order)) { if (r1 < r2) { @@ -594,6 +606,7 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) { } static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { + STsdbCfg *pCfg = &pHandle->pTsdb->config; size_t size = taosArrayGetSize(pHandle->pTableCheckInfo); assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1); pHandle->cur.fid = -1; @@ -607,7 +620,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { initTableMemIterator(pHandle, pCheckInfo); } - SDataRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order); + SDataRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); if (row == NULL) { return false; } @@ -827,11 +840,12 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; + STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); + SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); assert(cur->pos >= 0 && cur->pos <= binfo.rows); @@ -1263,7 +1277,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl int32_t end = endPos; if (!ASCENDING_TRAVERSE(pQueryHandle->order)) { - assert(start >= end); SWAP(start, end, int32_t); } @@ -1317,6 +1330,7 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) { SQueryFilePos* cur = &pQueryHandle->cur; SDataBlockInfo blockInfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); + STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; initTableMemIterator(pQueryHandle, pCheckInfo); @@ -1354,7 +1368,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); + SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } @@ -1384,7 +1398,22 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it - moveToNextRowInMem(pCheckInfo); + if (pCfg->update) { + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable); + numOfRows += 1; + if (cur->win.skey == TSKEY_INITIAL_VAL) { + cur->win.skey = key; + } + + cur->win.ekey = key; + cur->lastKey = key + step; + cur->mixBlock = true; + + moveToNextRowInMem(pCheckInfo); + pos += step; + } else { + moveToNextRowInMem(pCheckInfo); + } } else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -1395,7 +1424,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* assert(end != -1); if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it - moveToNextRowInMem(pCheckInfo); + if (!pCfg->update) { + moveToNextRowInMem(pCheckInfo); + } else { + end -= step; + } } int32_t qstart = 0, qend = 0; @@ -1415,8 +1448,8 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* * copy them all to result buffer, since it may be overlapped with file data block. */ if (node == NULL || - ((dataRowKey(*(SDataRow *)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) || - ((dataRowKey(*(SDataRow *)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) { + ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) || + ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = tsArray[pos]; @@ -1513,15 +1546,15 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) { } static void cleanBlockOrderSupporter(SBlockOrderSupporter* pSupporter, int32_t numOfTables) { - taosTFree(pSupporter->numOfBlocksPerTable); - taosTFree(pSupporter->blockIndexArray); + tfree(pSupporter->numOfBlocksPerTable); + tfree(pSupporter->blockIndexArray); for (int32_t i = 0; i < numOfTables; ++i) { STableBlockInfo* pBlockInfo = pSupporter->pDataBlockInfo[i]; - taosTFree(pBlockInfo); + tfree(pBlockInfo); } - taosTFree(pSupporter->pDataBlockInfo); + tfree(pSupporter->pDataBlockInfo); } static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void* param) { @@ -1818,6 +1851,8 @@ static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) { pQueryHandle->activeIndex += 1; } + // no data in memtable or imemtable, decrease the memory reference. + tsdbMayUnTakeMemSnapshot(pQueryHandle); return false; } @@ -1863,13 +1898,14 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STsdbQueryHandle* pQueryHandle) { int numOfRows = 0; int32_t numOfCols = (int32_t)taosArrayGetSize(pQueryHandle->pColumns); + STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; win->skey = TSKEY_INITIAL_VAL; int64_t st = taosGetTimestampUs(); STable* pTable = pCheckInfo->pTableObj; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); + SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } @@ -1920,9 +1956,9 @@ static int32_t getAllTableList(STable* pSuperTable, SArray* list) { while (tSkipListIterNext(iter)) { SSkipListNode* pNode = tSkipListIterGet(iter); - STable** pTable = (STable**) SL_GET_NODE_DATA((SSkipListNode*) pNode); + STable* pTable = (STable*) SL_GET_NODE_DATA((SSkipListNode*) pNode); - STableKeyInfo info = {.pTable = *pTable, .lastKey = TSKEY_INITIAL_VAL}; + STableKeyInfo info = {.pTable = pTable, .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(list, &info); } @@ -1938,122 +1974,136 @@ static void destroyHelper(void* param) { tQueryInfo* pInfo = (tQueryInfo*)param; if (pInfo->optr != TSDB_RELATION_IN) { - taosTFree(pInfo->q); + tfree(pInfo->q); } free(param); } -// handle data in cache situation -bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { - STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle; +static bool getNeighborRows(STsdbQueryHandle* pQueryHandle) { + assert (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL); - int64_t stime = taosGetTimestampUs(); - int64_t elapsedTime = stime; + SDataBlockInfo blockInfo = {{0}, 0}; - size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); - assert(numOfTables > 0); + pQueryHandle->type = TSDB_QUERY_TYPE_ALL; + pQueryHandle->order = TSDB_ORDER_DESC; - SDataBlockInfo blockInfo = {{0}, 0}; - if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) { - pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - pQueryHandle->order = TSDB_ORDER_DESC; + if (!tsdbNextDataBlock((void*) pQueryHandle)) { + return false; + } - if (!tsdbNextDataBlock(pHandle)) { - return false; + tsdbRetrieveDataBlockInfo((void*) pQueryHandle, &blockInfo); + /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pQueryHandle, pQueryHandle->defaultLoadColumn); + if (terrno != TSDB_CODE_SUCCESS) { + return false; + } + + if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { + // data already retrieve, discard other data rows and return + int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); + memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); } - tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); - /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn); - if (terrno != TSDB_CODE_SUCCESS) { + pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey}; + pQueryHandle->window = pQueryHandle->cur.win; + pQueryHandle->cur.rows = 1; + pQueryHandle->type = TSDB_QUERY_TYPE_ALL; + return true; + } else { + STimeWindow win = (STimeWindow) {pQueryHandle->window.skey, INT64_MAX}; + STsdbQueryCond cond = { + .order = TSDB_ORDER_ASC, + .numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)) + }; + cond.twindow = win; + + cond.colList = calloc(cond.numOfCols, sizeof(SColumnInfo)); + if (cond.colList == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; return false; } - if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { - // data already retrieve, discard other data rows and return - int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); - memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); - } + for(int32_t i = 0; i < cond.numOfCols; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pQueryHandle->pColumns, i); + memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo)); + } - pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey}; - pQueryHandle->window = pQueryHandle->cur.win; - pQueryHandle->cur.rows = 1; - pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - return true; - } else { - STimeWindow win = (STimeWindow) {pQueryHandle->window.skey, INT64_MAX}; - STsdbQueryCond cond = { - .order = TSDB_ORDER_ASC, - .numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)) - }; - cond.twindow = win; - - cond.colList = calloc(cond.numOfCols, sizeof(SColumnInfo)); - if (cond.colList == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return false; - } + STsdbQueryHandle* pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pQueryHandle->pMemRef); - for(int32_t i = 0; i < cond.numOfCols; ++i) { - SColumnInfoData* pColInfoData = taosArrayGet(pQueryHandle->pColumns, i); - memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo)); - } + tfree(cond.colList); - STsdbQueryHandle* pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo); + pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey); + if (pSecQueryHandle->pTableCheckInfo == NULL) { + tsdbCleanupQueryHandle(pSecQueryHandle); + return false; + } - taosTFree(cond.colList); + if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { + tsdbCleanupQueryHandle(pSecQueryHandle); + return false; + } - pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey); - if (pSecQueryHandle->pTableCheckInfo == NULL) { - tsdbCleanupQueryHandle(pSecQueryHandle); - return false; - } + tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); + tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); - if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { - tsdbCleanupQueryHandle(pSecQueryHandle); - return false; - } + int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pSecQueryHandle)); + size_t si = taosArrayGetSize(pSecQueryHandle->pTableCheckInfo); - tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); - tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); + memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); - int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pSecQueryHandle)); - size_t si = taosArrayGetSize(pSecQueryHandle->pTableCheckInfo); + SColumnInfoData* pCol1 = taosArrayGet(pSecQueryHandle->pColumns, i); + assert(pCol->info.colId == pCol1->info.colId); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); - memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); + memcpy((char*)pCol->pData + pCol->info.bytes, pCol1->pData, pCol1->info.bytes); + } - SColumnInfoData* pCol1 = taosArrayGet(pSecQueryHandle->pColumns, i); - assert(pCol->info.colId == pCol1->info.colId); + SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0); - memcpy((char*)pCol->pData + pCol->info.bytes, pCol1->pData, pCol1->info.bytes); - } + // it is ascending order + pQueryHandle->order = TSDB_ORDER_DESC; + pQueryHandle->window = pQueryHandle->cur.win; + pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]}; + pQueryHandle->cur.rows = 2; + pQueryHandle->cur.mixBlock = true; - SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0); + int32_t step = -1;// one step for ascending order traverse + for (int32_t j = 0; j < si; ++j) { + STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); + pCheckInfo->lastKey = pQueryHandle->cur.win.ekey + step; + } - // it is ascending order - pQueryHandle->order = TSDB_ORDER_DESC; - pQueryHandle->window = pQueryHandle->cur.win; - pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]}; - pQueryHandle->cur.rows = 2; - pQueryHandle->cur.mixBlock = true; + tsdbCleanupQueryHandle(pSecQueryHandle); + } - int32_t step = -1;// one step for ascending order traverse - for (int32_t j = 0; j < si; ++j) { - STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); - pCheckInfo->lastKey = pQueryHandle->cur.win.ekey + step; - } + //disable it after retrieve data + pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; + pQueryHandle->checkFiles = false; + return true; +} - tsdbCleanupQueryHandle(pSecQueryHandle); - } +// handle data in cache situation +bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { + STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle; - //disable it after retrieve data - pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; - pQueryHandle->checkFiles = false; - return true; + int64_t stime = taosGetTimestampUs(); + int64_t elapsedTime = stime; + + size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); + assert(numOfTables > 0); + + if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) { + SMemRef* pMemRef = pQueryHandle->pMemRef; + tsdbMayTakeMemSnapshot(pQueryHandle); + bool ret = getNeighborRows(pQueryHandle); + tsdbMayUnTakeMemSnapshot(pQueryHandle); + + // restore the pMemRef + pQueryHandle->pMemRef = pMemRef; + return ret; } if (pQueryHandle->checkFiles) { @@ -2122,7 +2172,16 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) { } } - // clear current group + // clear current group, unref unused table + for (int32_t i = 0; i < numOfTables; ++i) { + STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); + + // keyInfo.pTable may be NULL here. + if (pKeyInfo->pTable != keyInfo.pTable) { + tsdbUnRefTable(pKeyInfo->pTable); + } + } + taosArrayClear(pGroup); // more than one table in each group, only one table left for each group @@ -2302,8 +2361,13 @@ void filterPrepare(void* expr, void* param) { if (pInfo->optr == TSDB_RELATION_IN) { pInfo->q = (char*) pCond->arr; - } else { - pInfo->q = calloc(1, pSchema->bytes + TSDB_NCHAR_SIZE); // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space. + } else if (pCond != NULL) { + uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; + if (size < (uint32_t)pSchema->bytes) { + size = pSchema->bytes; + } + // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space. + pInfo->q = calloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); tVariantDump(pCond, pInfo->q, pSchema->type, true); } } @@ -2452,7 +2516,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC static bool indexedNodeFilterFp(const void* pNode, void* param) { tQueryInfo* pInfo = (tQueryInfo*) param; - STable* pTable = *(STable**)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); + STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); char* val = NULL; @@ -2705,7 +2769,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); destroyTableMemIterator(pTableCheckInfo); - taosTFree(pTableCheckInfo->pCompInfo); + tfree(pTableCheckInfo->pCompInfo); } taosArrayDestroy(pQueryHandle->pTableCheckInfo); } @@ -2714,14 +2778,14 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { size_t cols = taosArrayGetSize(pQueryHandle->pColumns); for (int32_t i = 0; i < cols; ++i) { SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - taosTFree(pColInfo->pData); + tfree(pColInfo->pData); } taosArrayDestroy(pQueryHandle->pColumns); } taosArrayDestroy(pQueryHandle->defaultLoadColumn); - taosTFree(pQueryHandle->pDataBlockInfo); - taosTFree(pQueryHandle->statis); + tfree(pQueryHandle->pDataBlockInfo); + tfree(pQueryHandle->statis); // todo check error tsdbMayUnTakeMemSnapshot(pQueryHandle); @@ -2735,7 +2799,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %p", pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qinfo); - taosTFree(pQueryHandle); + tfree(pQueryHandle); } void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index 605586515b7b1626751c01a550925c6e9ac4e183..ef5ed6f04459a4213e761f94ad00363ede9ecd26 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -80,7 +80,7 @@ static int insertData(SInsertInfo *pInfo) { pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); if (tsdbInsertData(pInfo->pRepo, pMsg, NULL) < 0) { - taosTFree(pMsg); + tfree(pMsg); return -1; } } @@ -88,7 +88,7 @@ static int insertData(SInsertInfo *pInfo) { double etime = getCurTime(); printf("Spent %f seconds to write %d records\n", etime - stime, pInfo->totalRows); - taosTFree(pMsg); + tfree(pMsg); return 0; } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 89c8e3dc39211eca1b6c877f7789ad4313917ea2..78b9c90979fa74bdd0ce5ecfd53042db30e4e8fa 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,13 +1,14 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4 z) IF (TD_LINUX) TARGET_LINK_LIBRARIES(tutil m rt) - ADD_SUBDIRECTORY(tests) + # ADD_SUBDIRECTORY(tests) FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) IF (ICONV_INCLUDE_EXIST) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 2a58bec8830faab4cf9909bd417a44e2e8881570..42bc136584d618e62826c307b698746ca387ff41 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -31,14 +31,16 @@ extern "C" { typedef void (*_hash_free_fn_t)(void *param); typedef struct SHashNode { - char *key; -// struct SHashNode *prev; +// char *key; struct SHashNode *next; - uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash + uint32_t hashVal; // the hash value of key uint32_t keyLen; // length of the key - char *data; +// char *data; } SHashNode; +#define GET_HASH_NODE_KEY(_n) ((char*)(_n) + sizeof(SHashNode)) +#define GET_HASH_NODE_DATA(_n) ((char*)(_n) + sizeof(SHashNode) + (_n)->keyLen) + typedef enum SHashLockTypeE { HASH_NO_LOCK = 0, HASH_ENTRY_LOCK = 1, @@ -175,6 +177,8 @@ void* taosHashDestroyIter(SHashMutableIterator* iter); */ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj); +size_t taosHashGetMemSize(const SHashObj *pHashObj); + #ifdef __cplusplus } #endif diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 71838af15064ec3d84662dff031d40d2156924eb..bf922fe9c44b4f923d44f4a38c1c4c1a7fb20af3 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -70,6 +70,13 @@ void* taosArrayGet(const SArray* pArray, size_t index); */ void* taosArrayGetP(const SArray* pArray, size_t index); +/** + * get the last element in the array list + * @param pArray + * @return + */ +void* taosArrayGetLast(const SArray* pArray); + /** * return the size of array * @param pArray @@ -117,6 +124,13 @@ void taosArrayClear(SArray* pArray); */ void taosArrayDestroy(SArray* pArray); +/** + * + * @param pArray + * @param fp + */ +void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)); + /** * sort the array * @param pArray diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 0520cf29a87c9d4727ef6db48d8f5712ac845b89..33819f6a20ee64ada194d520ef09c6133d4dad96 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -53,7 +53,7 @@ enum { TAOS_CFG_UTYPE_NONE, TAOS_CFG_UTYPE_PERCENT, TAOS_CFG_UTYPE_GB, - TAOS_CFG_UTYPE_Mb, + TAOS_CFG_UTYPE_MB, TAOS_CFG_UTYPE_BYTE, TAOS_CFG_UTYPE_SECOND, TAOS_CFG_UTYPE_MS diff --git a/src/util/inc/tfile.h b/src/util/inc/tfile.h new file mode 100644 index 0000000000000000000000000000000000000000..10b7c1df35caaa675c9334106785b91af3fdeb05 --- /dev/null +++ b/src/util/inc/tfile.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TFILE_H +#define TDENGINE_TFILE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +// init taos file module +int32_t tfinit(); + +// clean up taos file module +void tfcleanup(); + +// the same syntax as UNIX standard open/close/read/write +// but FD is int64_t and will never be reused +int64_t tfopen(const char *pathname, int32_t flags); +int64_t tfclose(int64_t tfd); +int64_t tfwrite(int64_t tfd, void *buf, int64_t count); +int64_t tfread(int64_t tfd, void *buf, int64_t count); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TREF_H diff --git a/src/util/inc/tnettest.h b/src/util/inc/tnettest.h index 3fe1dfa9204fbbf85f193078b17e0bb4f9643848..426df5cbb28b9c0fcada049c7242730359d2a3fc 100644 --- a/src/util/inc/tnettest.h +++ b/src/util/inc/tnettest.h @@ -20,7 +20,27 @@ extern "C" { #endif -void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole); +typedef struct CmdArguments { + char* host; + char* password; + char* user; + char* auth; + char* database; + char* timezone; + bool is_raw_time; + bool is_use_passwd; + char file[TSDB_FILENAME_LEN]; + char dir[TSDB_FILENAME_LEN]; + int threadNum; + char* commands; + int abort; + int port; + int endPort; + int pktLen; + char* netTestRole; +} CmdArguments; + +void taosNetTest(CmdArguments* args); #ifdef __cplusplus } diff --git a/src/util/inc/tqueue.h b/src/util/inc/tqueue.h index 8493a64315966aa32b58359652d0f429e8e0916a..c3051464e556860178be36f3473f5e4686f6082e 100644 --- a/src/util/inc/tqueue.h +++ b/src/util/inc/tqueue.h @@ -20,6 +20,23 @@ extern "C" { #endif +/* + +This set of API for queue is designed specially for vnode/mnode. The main purpose is to +consume all the items instead of one item from a queue by one single read. Also, it can +combine multiple queues into a queue set, a consumer thread can consume a queue set via +a single API instead of looping every queue by itself. + +Notes: +1: taosOpenQueue/taosCloseQueue, taosOpenQset/taosCloseQset is NOT multi-thread safe +2: after taosCloseQueue/taosCloseQset is called, read/write operation APIs are not safe. +3: read/write operation APIs are multi-thread safe + +To remove the limitation and make this set of queue APIs multi-thread safe, REF(tref.c) +shall be used to set up the protection. + +*/ + typedef void* taos_queue; typedef void* taos_qset; typedef void* taos_qall; diff --git a/src/util/inc/tref.h b/src/util/inc/tref.h new file mode 100644 index 0000000000000000000000000000000000000000..cd5092f30a290c51de49e38b0226bbed637dc0e6 --- /dev/null +++ b/src/util/inc/tref.h @@ -0,0 +1,75 @@ + +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TREF_H +#define TDENGINE_TREF_H + +#ifdef __cplusplus +extern "C" { +#endif + +// open a reference set, max is the mod used by hash, fp is the pointer to free resource function +// return rsetId which will be used by other APIs. On error, -1 is returned, and terrno is set appropriately +int taosOpenRef(int max, void (*fp)(void *)); + +// close the reference set, refId is the return value by taosOpenRef +// return 0 if success. On error, -1 is returned, and terrno is set appropriately +int taosCloseRef(int refId); + +// add ref, p is the pointer to resource or pointer ID +// return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately +int64_t taosAddRef(int refId, void *p); + +// remove ref, rid is the reference ID returned by taosAddRef +// return 0 if success. On error, -1 is returned, and terrno is set appropriately +int taosRemoveRef(int rsetId, int64_t rid); + +// acquire ref, rid is the reference ID returned by taosAddRef +// return the resource p. On error, NULL is returned, and terrno is set appropriately +void *taosAcquireRef(int rsetId, int64_t rid); + +// release ref, rid is the reference ID returned by taosAddRef +// return 0 if success. On error, -1 is returned, and terrno is set appropriately +int taosReleaseRef(int rsetId, int64_t rid); + +// return the first reference if rid is 0, otherwise return the next after current reference. +// if return value is NULL, it means list is over(if terrno is set, it means error happens) +void *taosIterateRef(int rsetId, int64_t rid); + +// return the number of references in system +int taosListRef(); + +/* sample code to iterate the refs + +void demoIterateRefs(int rsetId) { + + void *p = taosIterateRef(refId, 0); + while (p) { + // process P + + // get the rid from p + + p = taosIterateRef(rsetId, rid); + } +} + +*/ + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TREF_H diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index a14a8565617bc08eefc989168a93f523f2e6caff..2c4d1a86ef7446f49a2c1a97e25db558f933bd57 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -27,33 +27,25 @@ extern "C" { #define MAX_SKIP_LIST_LEVEL 15 #define SKIP_LIST_RECORD_PERFORMANCE 0 +// For key property setting +#define SL_ALLOW_DUP_KEY (uint8_t)0x0 // Allow duplicate key exists (for tag index usage) +#define SL_DISCARD_DUP_KEY (uint8_t)0x1 // Discard duplicate key (for data update=0 case) +#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update=1 case) +// For thread safety setting +#define SL_THREAD_SAFE (uint8_t)0x4 + typedef char *SSkipListKey; typedef char *(*__sl_key_fn_t)(const void *); -/** - * the skip list node is located in a consecutive memory area, - * the format of skip list node is as follows: - * +------------+-----------------------+------------------------+-----+------+ - * | node level | forward pointer array | backward pointer array | key | data | - * +------------+-----------------------+------------------------+-----+------+ - */ typedef struct SSkipListNode { - uint8_t level; + uint8_t level; + void * pData; + struct SSkipListNode *forwards[]; } SSkipListNode; -#define SL_NODE_HEADER_SIZE(_l) (sizeof(SSkipListNode) + ((_l) << 1u) * POINTER_BYTES) - -#define SL_GET_FORWARD_POINTER(n, _l) ((SSkipListNode **)((char *)(n) + sizeof(SSkipListNode)))[(_l)] -#define SL_GET_BACKWARD_POINTER(n, _l) \ - ((SSkipListNode **)((char *)(n) + sizeof(SSkipListNode) + ((n)->level) * POINTER_BYTES))[(_l)] - -#define SL_GET_NODE_DATA(n) ((char *)(n) + SL_NODE_HEADER_SIZE((n)->level)) -#define SL_GET_NODE_KEY(s, n) ((s)->keyFn(SL_GET_NODE_DATA(n))) - -#define SL_GET_SL_MIN_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_FORWARD_POINTER((s)->pHead, 0))) -#define SL_GET_SL_MAX_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_BACKWARD_POINTER((s)->pTail, 0))) - -#define SL_GET_NODE_LEVEL(n) *(uint8_t *)((n)) +#define SL_GET_NODE_DATA(n) (n)->pData +#define SL_NODE_GET_FORWARD_POINTER(n, l) (n)->forwards[(l)] +#define SL_NODE_GET_BACKWARD_POINTER(n, l) (n)->forwards[(n)->level + (l)] /* * @version 0.3 @@ -103,34 +95,23 @@ typedef struct tSkipListState { uint64_t nTotalElapsedTimeForInsert; } tSkipListState; -typedef struct SSkipListKeyInfo { - uint8_t dupKey : 2; // if allow duplicated key in the skip list - uint8_t type : 4; // key type - uint8_t freeNode:2; // free node when destroy the skiplist - uint8_t len; // maximum key length, used in case of string key -} SSkipListKeyInfo; - typedef struct SSkipList { __compar_fn_t comparFn; __sl_key_fn_t keyFn; - uint32_t size; + pthread_rwlock_t *lock; + uint16_t len; uint8_t maxLevel; + uint8_t flags; + uint8_t type; // static info above uint8_t level; - SSkipListKeyInfo keyInfo; - pthread_rwlock_t *lock; - SSkipListNode * pHead; // point to the first element - SSkipListNode * pTail; // point to the last element + uint32_t size; + SSkipListNode * pHead; // point to the first element + SSkipListNode * pTail; // point to the last element #if SKIP_LIST_RECORD_PERFORMANCE tSkipListState state; // skiplist state #endif } SSkipList; -/* - * iterate the skiplist - * this will cause the multi-thread problem, when the skiplist is destroyed, the iterate may - * continue iterating the skiplist, so add the reference count for skiplist - * TODO add the ref for skip list when one iterator is created - */ typedef struct SSkipListIterator { SSkipList * pSkipList; SSkipListNode *cur; @@ -139,114 +120,27 @@ typedef struct SSkipListIterator { SSkipListNode *next; // next points to the true qualified node in skip list } SSkipListIterator; -/** - * - * @param nMaxLevel maximum skip list level - * @param keyType type of key - * @param dupKey allow the duplicated key in the skip list - * @return - */ -SSkipList *tSkipListCreate(uint8_t nMaxLevel, uint8_t keyType, uint8_t keyLen, uint8_t dupKey, uint8_t threadsafe, - uint8_t freeNode, __sl_key_fn_t fn); - -/** - * - * @param pSkipList - * @return NULL will always be returned - */ -void *tSkipListDestroy(SSkipList *pSkipList); - -/** - * - * @param pSkipList - * @param level - * @param headSize - */ -void tSkipListNewNodeInfo(SSkipList *pSkipList, int32_t *level, int32_t *headSize); - -/** - * put the skip list node into the skip list. - * If failed, NULL will be returned, otherwise, the pNode will be returned. - * - * @param pSkipList - * @param pNode - * @return - */ -SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode); - -/** - * get *all* nodes which key are equivalent to pKey - * - * @param pSkipList - * @param pKey - * @return - */ -SArray *tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey); - -/** - * get the size of skip list - * @param pSkipList - * @return - */ -size_t tSkipListGetSize(const SSkipList *pSkipList); - -/** - * display skip list of the given level, for debug purpose only - * @param pSkipList - * @param nlevel - */ -void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel); - -/** - * create skiplist iterator - * @param pSkipList - * @return - */ +#define SL_IS_THREAD_SAFE(s) (((s)->flags) & SL_THREAD_SAFE) +#define SL_DUP_MODE(s) (((s)->flags) & ((((uint8_t)1) << 2) - 1)) +#define SL_GET_NODE_KEY(s, n) ((s)->keyFn((n)->pData)) +#define SL_GET_MIN_KEY(s) SL_GET_NODE_KEY(s, SL_NODE_GET_FORWARD_POINTER((s)->pHead, 0)) +#define SL_GET_MAX_KEY(s) SL_GET_NODE_KEY((s), SL_NODE_GET_BACKWARD_POINTER((s)->pTail, 0)) +#define SL_SIZE(s) (s)->size + +SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, __compar_fn_t comparFn, uint8_t flags, + __sl_key_fn_t fn); +void tSkipListDestroy(SSkipList *pSkipList); +SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData); +void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata); +SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey); +void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel); SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList); - -/** - * create skip list iterator from the given node and specified the order - * @param pSkipList - * @param pNode start position, instead of the first node in skip list - * @param order traverse order of the iterator - * @return - */ -SSkipListIterator *tSkipListCreateIterFromVal(SSkipList* pSkipList, const char* val, int32_t type, int32_t order); - -/** - * forward the skip list iterator - * @param iter - * @return - */ -bool tSkipListIterNext(SSkipListIterator *iter); - -/** - * get the element of skip list node - * @param iter - * @return - */ -SSkipListNode *tSkipListIterGet(SSkipListIterator *iter); - -/** - * destroy the skip list node - * @param iter - * @return - */ -void *tSkipListDestroyIter(SSkipListIterator *iter); - -/* - * remove nodes of the pKey value. - * If more than one node has the same value, all will be removed - * - * @Return - * the count of removed nodes - */ -uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key); - -/* - * remove the specified node in parameters - */ -void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode); +SSkipListIterator *tSkipListCreateIterFromVal(SSkipList *pSkipList, const char *val, int32_t type, int32_t order); +bool tSkipListIterNext(SSkipListIterator *iter); +SSkipListNode * tSkipListIterGet(SSkipListIterator *iter); +void * tSkipListDestroyIter(SSkipListIterator *iter); +uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key); +void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode); #ifdef __cplusplus } diff --git a/src/util/inc/tsocket.h b/src/util/inc/tsocket.h index f14e8dbb356e131bbd580f8c5310bea944e71033..391cc44accadd785fb46e02aa79cde7617834c70 100644 --- a/src/util/inc/tsocket.h +++ b/src/util/inc/tsocket.h @@ -20,21 +20,21 @@ extern "C" { #endif -int taosReadn(SOCKET sock, char *buffer, int len); -int taosWriteMsg(SOCKET fd, void *ptr, int nbytes); -int taosReadMsg(SOCKET fd, void *ptr, int nbytes); -int taosNonblockwrite(SOCKET fd, char *ptr, int nbytes); -int taosCopyFds(SOCKET sfd, SOCKET dfd, int64_t len); -int taosSetNonblocking(SOCKET sock, int on); +int32_t taosReadn(SOCKET sock, char *buffer, int32_t len); +int32_t taosWriteMsg(SOCKET fd, void *ptr, int32_t nbytes); +int32_t taosReadMsg(SOCKET fd, void *ptr, int32_t nbytes); +int32_t taosNonblockwrite(SOCKET fd, char *ptr, int32_t nbytes); +int32_t taosCopyFds(SOCKET sfd, SOCKET dfd, int64_t len); +int32_t taosSetNonblocking(SOCKET sock, int32_t on); -SOCKET taosOpenUdpSocket(uint32_t localIp, uint16_t localPort); -SOCKET taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp); -SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port); -int taosKeepTcpAlive(SOCKET sockFd); +SOCKET taosOpenUdpSocket(uint32_t localIp, uint16_t localPort); +SOCKET taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp); +SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port); +int32_t taosKeepTcpAlive(SOCKET sockFd); -int taosGetFqdn(char *); +int32_t taosGetFqdn(char *); uint32_t taosGetIpFromFqdn(const char *); -void tinet_ntoa(char *ipstr, unsigned int ip); +void tinet_ntoa(char *ipstr, uint32_t ip); uint32_t ip2uint(const char *const ip_addr); #ifdef __cplusplus diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 625d4af1ac981c5d1f5f079f5b15533a4d63ef24..03a73424971ba7e4b77d508b12f71c729889a51a 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -22,14 +22,13 @@ #define DO_FREE_HASH_NODE(_n) \ do { \ - taosTFree((_n)->data); \ - taosTFree(_n); \ + tfree(_n); \ } while (0) #define FREE_HASH_NODE(_h, _n) \ do { \ if ((_h)->freeFp) { \ - (_h)->freeFp((_n)->data); \ + (_h)->freeFp(GET_HASH_NODE_DATA(_n)); \ } \ \ DO_FREE_HASH_NODE(_n); \ @@ -77,7 +76,7 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { SHashNode *pNode = pe->next; while (pNode) { - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + if ((pNode->keyLen == keyLen) && (memcmp(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); break; } @@ -115,11 +114,15 @@ static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *p * @param dsize size of actual data * @return hash node */ -static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNewNode) { +static FORCE_INLINE SHashNode *doUpdateHashNode(SHashEntry* pe, SHashNode* prev, SHashNode *pNode, SHashNode *pNewNode) { assert(pNode->keyLen == pNewNode->keyLen); - SWAP(pNode->key, pNewNode->key, void *); - SWAP(pNode->data, pNewNode->data, void *); + if (prev != NULL) { + prev->next = pNewNode; + } else { + pe->next = pNewNode; + } + pNewNode->next = pNode->next; return pNewNode; } @@ -208,12 +211,14 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da assert(pNode == NULL); } + SHashNode* prev = NULL; while (pNode) { - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + if ((pNode->keyLen == keyLen) && (memcmp(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); break; } + prev = pNode; pNode = pNode->next; } @@ -239,7 +244,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da } else { // not support the update operation, return error if (pHashObj->enableUpdate) { - doUpdateHashNode(pNode, pNewNode); + doUpdateHashNode(pe, prev, pNode, pNewNode); + DO_FREE_HASH_NODE(pNode); + } else { + DO_FREE_HASH_NODE(pNewNode); } if (pHashObj->type == HASH_ENTRY_LOCK) { @@ -249,7 +257,6 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // enable resize __rd_unlock(&pHashObj->lock, pHashObj->type); - DO_FREE_HASH_NODE(pNewNode); return pHashObj->enableUpdate ? 0 : -1; } } @@ -293,13 +300,13 @@ void* taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); if (pNode != NULL) { if (fp != NULL) { - fp(pNode->data); + fp(GET_HASH_NODE_DATA(pNode)); } if (d != NULL) { - memcpy(d, pNode->data, dsize); + memcpy(d, GET_HASH_NODE_DATA(pNode), dsize); } else { - data = pNode->data; + data = GET_HASH_NODE_DATA(pNode); } } @@ -357,13 +364,13 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe SHashNode *pRes = NULL; // remove it - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + if ((pNode->keyLen == keyLen) && (memcmp(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0)) { pe->num -= 1; pRes = pNode; pe->next = pNode->next; } else { while (pNode->next != NULL) { - if (((pNode->next)->keyLen == keyLen) && (memcmp((pNode->next)->key, key, keyLen) == 0)) { + if (((pNode->next)->keyLen == keyLen) && (memcmp(GET_HASH_NODE_KEY((pNode->next)), key, keyLen) == 0)) { assert((pNode->next)->hashVal == hashVal); break; } @@ -392,7 +399,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe __rd_unlock(&pHashObj->lock, pHashObj->type); if (data != NULL && pRes != NULL) { - memcpy(data, pRes->data, dsize); + memcpy(data, GET_HASH_NODE_DATA(pRes), dsize); } if (pRes != NULL) { @@ -426,7 +433,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi // todo remove the first node SHashNode *pNode = NULL; while((pNode = pEntry->next) != NULL) { - if (fp && (!fp(param, pNode->data))) { + if (fp && (!fp(param, GET_HASH_NODE_DATA(pNode)))) { pEntry->num -= 1; atomic_sub_fetch_64(&pHashObj->size, 1); @@ -451,7 +458,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi while ((pNext = pNode->next) != NULL) { // not qualified, remove it - if (fp && (!fp(param, pNext->data))) { + if (fp && (!fp(param, GET_HASH_NODE_DATA(pNext)))) { pNode->next = pNext->next; pEntry->num -= 1; atomic_sub_fetch_64(&pHashObj->size, 1); @@ -515,7 +522,7 @@ void taosHashCleanup(SHashObj *pHashObj) { size_t memBlock = taosArrayGetSize(pHashObj->pMemBlock); for (int32_t i = 0; i < memBlock; ++i) { void *p = taosArrayGetP(pHashObj->pMemBlock, i); - taosTFree(p); + tfree(p); } taosArrayDestroy(pHashObj->pMemBlock); @@ -605,7 +612,7 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { } } -void *taosHashIterGet(SHashMutableIterator *iter) { return (iter == NULL) ? NULL : iter->pCur->data; } +void *taosHashIterGet(SHashMutableIterator *iter) { return (iter == NULL) ? NULL : GET_HASH_NODE_DATA(iter->pCur); } void *taosHashDestroyIter(SHashMutableIterator *iter) { if (iter == NULL) { @@ -743,21 +750,19 @@ void taosHashTableResize(SHashObj *pHashObj) { } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - SHashNode *pNewNode = calloc(1, sizeof(SHashNode)); + SHashNode *pNewNode = calloc(1, sizeof(SHashNode) + keyLen + dsize); if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - pNewNode->data = malloc(dsize + keyLen); - memcpy(pNewNode->data, pData, dsize); - - pNewNode->key = pNewNode->data + dsize; - memcpy(pNewNode->key, key, keyLen); - pNewNode->keyLen = (uint32_t)keyLen; pNewNode->hashVal = hashVal; + + memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); + memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen); + return pNewNode; } @@ -798,3 +803,11 @@ SHashNode *getNextHashNode(SHashMutableIterator *pIter) { return NULL; } + +size_t taosHashGetMemSize(const SHashObj *pHashObj) { + if (pHashObj == NULL) { + return 0; + } + + return (pHashObj->capacity * (sizeof(SHashEntry) + POINTER_BYTES)) + sizeof(SHashNode) * taosHashGetSize(pHashObj) + sizeof(SHashObj); +} diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c index 4b96e62e91ff440684328d7e7ea8c8c6cd783caa..278683539e3247b4b6dcd43687ac281368a7d31d 100644 --- a/src/util/src/talgo.c +++ b/src/util/src/talgo.c @@ -153,7 +153,7 @@ static void tqsortImpl(void *src, int32_t start, int32_t end, size_t size, const void taosqsort(void *src, size_t numOfElem, size_t size, const void* param, __ext_compar_fn_t comparFn) { char *buf = calloc(1, size); // prepare the swap buffer tqsortImpl(src, 0, (int32_t)numOfElem - 1, (int32_t)size, param, comparFn, buf); - taosTFree(buf); + tfree(buf); } void * taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, __compar_fn_t compar, int flags) { diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 65147b38def067bdca5de7c403835f8505a9b98e..bec2fac7dfd03ada681b9011df7e465cd0a49f0c 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -99,6 +99,10 @@ void* taosArrayGetP(const SArray* pArray, size_t index) { return *(void**)d; } +void* taosArrayGetLast(const SArray* pArray) { + return TARRAY_GET_ELEM(pArray, pArray->size - 1); +} + size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; } void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { @@ -189,6 +193,23 @@ void taosArrayDestroy(SArray* pArray) { free(pArray); } +void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { + if (pArray == NULL) { + return; + } + + if (fp == NULL) { + taosArrayDestroy(pArray); + return; + } + + for(int32_t i = 0; i < pArray->size; ++i) { + fp(TARRAY_GET_ELEM(pArray, i)); + } + + taosArrayDestroy(pArray); +} + void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)) { assert(pArray != NULL); assert(compar != NULL); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 6e20c1708dfc81728c6b961b9259d50e953b4b9d..2571f11ba41b653e1fcc639cf1c22ef1f50b3448 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -228,7 +228,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v pCacheObj->freeFp(p->data); } - taosTFree(p); + tfree(p); } else { taosAddToTrashcan(pCacheObj, p); uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p->data); @@ -335,7 +335,7 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data) { } void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0) { + if (pCacheObj == NULL) { return; } @@ -343,7 +343,12 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { uError("cache:%s, NULL data to release", pCacheObj->name); return; } - + + + // The operation of removal from hash table and addition to trashcan is not an atomic operation, + // therefore the check for the empty of both the hash table and the trashcan has a race condition. + // It happens when there is only one object in the cache, and two threads which has referenced this object + // start to free the it simultaneously [TD-1569]. size_t offset = offsetof(SCacheDataNode, data); SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset); @@ -609,7 +614,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { __cache_lock_destroy(pCacheObj); - taosTFree(pCacheObj->name); + tfree(pCacheObj->name); memset(pCacheObj, 0, sizeof(SCacheObj)); free(pCacheObj); } @@ -653,7 +658,11 @@ void* taosCacheTimedRefresh(void *handle) { int64_t count = 0; while(1) { +#if defined LINUX + usleep(500*1000); +#else taosMsleep(500); +#endif // check if current cache object will be deleted every 500ms. if (pCacheObj->deleting) { @@ -672,6 +681,7 @@ void* taosCacheTimedRefresh(void *handle) { continue; } + uDebug("%s refresh thread timed scan", pCacheObj->name); pCacheObj->statistics.refreshCount++; // refresh data in hash table diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index ba711ced8ff074e639a046c5dd9908865a3e9468..ff67b1f3ecf88c6da40d7b47cdac426a51636a48 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -1,7 +1,6 @@ #include "taosdef.h" #include "tcompare.h" #include "tarray.h" -#include "tutil.h" int32_t compareInt32Val(const void *pLeft, const void *pRight) { int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); @@ -367,7 +366,14 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2)); case TSDB_DATA_TYPE_NCHAR: { - int32_t ret = wcsncmp((wchar_t*) f1, (wchar_t*) f2, size/TSDB_NCHAR_SIZE); + tstr* t1 = (tstr*) f1; + tstr* t2 = (tstr*) f2; + + if (t1->len != t2->len) { + return t1->len > t2->len? 1:-1; + } + + int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); if (ret == 0) { return ret; } diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 0ec55841a060a49f4aa9e29981fa426e42d29d5c..e89dea5a244acb5823c49d4b7c9aefb4c254db4c 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -288,7 +288,7 @@ void taosReadGlobalLogCfg() { option = value = NULL; olen = vlen = 0; - taosGetline(&line, &len, fp); + tgetline(&line, &len, fp); line[len - 1] = 0; paGetToken(line, &option, &olen); @@ -302,7 +302,7 @@ void taosReadGlobalLogCfg() { taosReadLogOption(option, value); } - taosTFree(line); + tfree(line); fclose(fp); } @@ -334,7 +334,7 @@ bool taosReadGlobalCfg() { option = value = NULL; olen = vlen = 0; - taosGetline(&line, &len, fp); + tgetline(&line, &len, fp); line[len - 1] = 0; paGetToken(line, &option, &olen); @@ -354,8 +354,12 @@ bool taosReadGlobalCfg() { fclose(fp); - taosTFree(line); - + tfree(line); + + if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { + taosSetAllDebugFlag(); + } + return true; } diff --git a/src/util/src/tfile.c b/src/util/src/tfile.c new file mode 100644 index 0000000000000000000000000000000000000000..27ba30fe8179ecd5e213d4cf78862fd4982b3ba4 --- /dev/null +++ b/src/util/src/tfile.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taoserror.h" +#include "tulog.h" +#include "tutil.h" +#include "tref.h" + +static int32_t tsFileRsetId = -1; + +static void taosCloseFile(void *p) { + close((int32_t)(uintptr_t)p); +} + +int32_t tfinit() { + tsFileRsetId = taosOpenRef(2000, taosCloseFile); + return tsFileRsetId; +} + +void tfcleanup() { + if (tsFileRsetId >= 0) taosCloseRef(tsFileRsetId); + tsFileRsetId = -1; +} + +int64_t tfopen(const char *pathname, int32_t flags) { + int32_t fd = open(pathname, flags); + + if (fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + void *p = (void *)(int64_t)fd; + int64_t rid = taosAddRef(tsFileRsetId, p); + if (rid < 0) close(fd); + + return rid; +} + +int64_t tfclose(int64_t tfd) { + return taosRemoveRef(tsFileRsetId, tfd); +} + +int64_t tfwrite(int64_t tfd, void *buf, int64_t count) { + void *p = taosAcquireRef(tsFileRsetId, tfd); + if (p == NULL) return -1; + + int32_t fd = (int32_t)(uintptr_t)p; + + int64_t ret = taosWrite(fd, buf, count); + if (ret < 0) terrno = TAOS_SYSTEM_ERROR(errno); + + taosReleaseRef(tsFileRsetId, tfd); + return ret; +} + +int64_t tfread(int64_t tfd, void *buf, int64_t count) { + void *p = taosAcquireRef(tsFileRsetId, tfd); + if (p == NULL) return -1; + + int32_t fd = (int32_t)(uintptr_t)p; + + int64_t ret = taosRead(fd, buf, count); + if (ret < 0) terrno = TAOS_SYSTEM_ERROR(errno); + + taosReleaseRef(tsFileRsetId, tfd); + return ret; +} diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index 6ba1d87d92ecf216bfde346f9d3ff1563515d34d..31641ac9a74d3fc914dcc05443da9384ba20c339 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -14,9 +14,7 @@ */ #define _DEFAULT_SOURCE - #define TAOS_RANDOM_FILE_FAIL_TEST - #include "os.h" #include "hash.h" #include "taoserror.h" @@ -188,7 +186,7 @@ int tdKVStoreStartCommit(SKVStore *pStore) { goto _err; } - if (taosTSendFile(pStore->sfd, pStore->fd, NULL, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { + if (taosSendFile(pStore->sfd, pStore->fd, NULL, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { uError("failed to send file %d bytes since %s", TD_KVSTORE_HEADER_SIZE, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -238,6 +236,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR); if (rInfo.offset < 0) { uError("failed to lseek file %s since %s", pStore->fname, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } @@ -248,14 +247,15 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe ASSERT(tlen == POINTER_DISTANCE(pBuf, buf)); ASSERT(tlen == sizeof(SKVRecord)); - if (taosTWrite(pStore->fd, buf, tlen) < tlen) { + if (taosWrite(pStore->fd, buf, tlen) < tlen) { uError("failed to write %d bytes to file %s since %s", tlen, pStore->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; } - if (taosTWrite(pStore->fd, cont, contLen) < contLen) { + if (taosWrite(pStore->fd, cont, contLen) < contLen) { uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } @@ -292,7 +292,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) { void *pBuf = buf; tdEncodeKVRecord(&pBuf, &rInfo); - if (taosTWrite(pStore->fd, buf, POINTER_DISTANCE(pBuf, buf)) < POINTER_DISTANCE(pBuf, buf)) { + if (taosWrite(pStore->fd, buf, POINTER_DISTANCE(pBuf, buf)) < POINTER_DISTANCE(pBuf, buf)) { uError("failed to write %" PRId64 " bytes to file %s since %s", (int64_t)(POINTER_DISTANCE(pBuf, buf)), pStore->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -339,7 +339,7 @@ void tsdbGetStoreInfo(char *fname, uint32_t *magic, int64_t *size) { int fd = open(fname, O_RDONLY); if (fd < 0) goto _err; - if (taosTRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) goto _err; + if (taosRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) goto _err; if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) goto _err; void *pBuf = (void *)buf; @@ -368,7 +368,7 @@ static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo, uint32_t return -1; } - if (taosTRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { + if (taosRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { uError("failed to read %d bytes from file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -402,7 +402,7 @@ static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) { ASSERT(POINTER_DISTANCE(pBuf, buf) + sizeof(TSCKSUM) <= TD_KVSTORE_HEADER_SIZE); taosCalcChecksumAppend(0, (uint8_t *)buf, TD_KVSTORE_HEADER_SIZE); - if (taosTWrite(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { + if (taosWrite(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { uError("failed to write %d bytes to file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -477,9 +477,9 @@ _err: static void tdFreeKVStore(SKVStore *pStore) { if (pStore) { - taosTFree(pStore->fname); - taosTFree(pStore->fsnap); - taosTFree(pStore->fnew); + tfree(pStore->fname); + tfree(pStore->fsnap); + tfree(pStore->fnew); taosHashCleanup(pStore->map); free(pStore); } @@ -535,7 +535,7 @@ static int tdRestoreKVStore(SKVStore *pStore) { ASSERT(pStore->info.size == TD_KVSTORE_HEADER_SIZE); while (true) { - ssize_t tsize = taosTRead(pStore->fd, tbuf, sizeof(SKVRecord)); + int64_t tsize = taosRead(pStore->fd, tbuf, sizeof(SKVRecord)); if (tsize == 0) break; if (tsize < sizeof(SKVRecord)) { uError("failed to read %" PRIzu " bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, @@ -598,7 +598,7 @@ static int tdRestoreKVStore(SKVStore *pStore) { goto _err; } - if (taosTRead(pStore->fd, buf, (size_t)pRecord->size) < pRecord->size) { + if (taosRead(pStore->fd, buf, (size_t)pRecord->size) < pRecord->size) { uError("failed to read %" PRId64 " bytes from file %s since %s, offset %" PRId64, pRecord->size, pStore->fname, strerror(errno), pRecord->offset); terrno = TAOS_SYSTEM_ERROR(errno); @@ -618,11 +618,11 @@ static int tdRestoreKVStore(SKVStore *pStore) { if (pStore->aFunc) (*pStore->aFunc)(pStore->appH); taosHashDestroyIter(pIter); - taosTFree(buf); + tfree(buf); return 0; _err: taosHashDestroyIter(pIter); - taosTFree(buf); + tfree(buf); return -1; } diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 09b0933fd6e32e9b65c8c7acbb81fbfe7d5c005b..ad3a92230482ff07b1533ceaf22b9ca30f72e10b 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -287,17 +287,17 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { tsLogObj.fileNum = maxFileNum; taosGetLogFileName(fn); + if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".0"); } + bool log0Exist = stat(name, &logstat0) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - - bool log0Exist = stat(name, &logstat0) >= 0; bool log1Exist = stat(name, &logstat1) >= 0; // if none of the log files exist, open 0, if both exists, open the old one @@ -336,11 +336,11 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { lseek(tsLogObj.logHandle->fd, 0, SEEK_END); sprintf(name, "==================================================\n"); - taosTWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); + taosWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); sprintf(name, " new log file \n"); - taosTWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); + taosWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); sprintf(name, "==================================================\n"); - taosTWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); + taosWrite(tsLogObj.logHandle->fd, name, (uint32_t)strlen(name)); return 0; } @@ -390,7 +390,7 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { if (tsAsyncLog) { taosPushLogBuffer(tsLogObj.logHandle, buffer, len); } else { - taosTWrite(tsLogObj.logHandle->fd, buffer, len); + taosWrite(tsLogObj.logHandle->fd, buffer, len); } if (tsLogObj.maxLines > 0) { @@ -400,7 +400,7 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { } } - if (dflag & DEBUG_SCREEN) taosTWrite(1, buffer, (uint32_t)len); + if (dflag & DEBUG_SCREEN) taosWrite(1, buffer, (uint32_t)len); } void taosDumpData(unsigned char *msg, int32_t len) { @@ -419,7 +419,7 @@ void taosDumpData(unsigned char *msg, int32_t len) { pos += 3; if (c >= 16) { temp[pos++] = '\n'; - taosTWrite(tsLogObj.logHandle->fd, temp, (uint32_t)pos); + taosWrite(tsLogObj.logHandle->fd, temp, (uint32_t)pos); c = 0; pos = 0; } @@ -427,9 +427,7 @@ void taosDumpData(unsigned char *msg, int32_t len) { temp[pos++] = '\n'; - taosTWrite(tsLogObj.logHandle->fd, temp, (uint32_t)pos); - - return; + taosWrite(tsLogObj.logHandle->fd, temp, (uint32_t)pos); } void taosPrintLongString(const char *flags, int32_t dflag, const char *format, ...) { @@ -467,7 +465,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, . if (tsAsyncLog) { taosPushLogBuffer(tsLogObj.logHandle, buffer, len); } else { - taosTWrite(tsLogObj.logHandle->fd, buffer, len); + taosWrite(tsLogObj.logHandle->fd, buffer, len); } if (tsLogObj.maxLines > 0) { @@ -477,7 +475,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, . } } - if (dflag & DEBUG_SCREEN) taosTWrite(1, buffer, (uint32_t)len); + if (dflag & DEBUG_SCREEN) taosWrite(1, buffer, (uint32_t)len); } #if 0 @@ -514,8 +512,8 @@ static SLogBuff *taosLogBuffNew(int32_t bufSize) { return tLogBuff; _err: - taosTFree(LOG_BUF_BUFFER(tLogBuff)); - taosTFree(tLogBuff); + tfree(LOG_BUF_BUFFER(tLogBuff)); + tfree(tLogBuff); return NULL; } @@ -524,7 +522,7 @@ static void taosLogBuffDestroy(SLogBuff *tLogBuff) { tsem_destroy(&(tLogBuff->buffNotEmpty)); pthread_mutex_destroy(&(tLogBuff->buffMutex)); free(tLogBuff->buffer); - taosTFree(tLogBuff); + tfree(tLogBuff); } #endif @@ -606,7 +604,7 @@ static void *taosAsyncOutputLog(void *param) { while (1) { log_size = taosPollLogBuffer(tLogBuff, tempBuffer, TSDB_DEFAULT_LOG_BUF_UNIT); if (log_size) { - taosTWrite(tLogBuff->fd, tempBuffer, log_size); + taosWrite(tLogBuff->fd, tempBuffer, log_size); LOG_BUF_START(tLogBuff) = (LOG_BUF_START(tLogBuff) + log_size) % LOG_BUF_SIZE(tLogBuff); } else { break; diff --git a/src/util/src/tmempool.c b/src/util/src/tmempool.c index a3d10355003c85d993b3a37190081436c0fb19ab..678c965eb1a7315977616778c0e4b39ceb4c7525 100644 --- a/src/util/src/tmempool.c +++ b/src/util/src/tmempool.c @@ -52,9 +52,9 @@ mpool_h taosMemPoolInit(int numOfBlock, int blockSize) { if (pool_p->pool == NULL || pool_p->freeList == NULL) { uError("failed to allocate memory\n"); - taosTFree(pool_p->freeList); - taosTFree(pool_p->pool); - taosTFree(pool_p); + tfree(pool_p->freeList); + tfree(pool_p->pool); + tfree(pool_p); return NULL; } diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 3793f3d3a944cc5c8d86c0dc0c0fa5bfe3cee764..6fd526598365f831addd1bacb0b7f748d9552fdb 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -15,11 +15,16 @@ #include "os.h" #include "taosdef.h" +#include "taosmsg.h" #include "taoserror.h" #include "tulog.h" #include "tconfig.h" #include "tglobal.h" #include "tsocket.h" +#include "trpc.h" +#include "rpcHead.h" +#include "tutil.h" +#include "tnettest.h" #define MAX_PKG_LEN (64*1000) #define BUFFER_SIZE (MAX_PKG_LEN + 1024) @@ -30,9 +35,15 @@ typedef struct { uint16_t pktLen; } info_s; -static char serverFqdn[TSDB_FQDN_LEN]; +extern int tsRpcMaxUdpSize; + +static char g_user[TSDB_USER_LEN+1] = {0}; +static char g_pass[TSDB_PASSWORD_LEN+1] = {0}; +static char g_serverFqdn[TSDB_FQDN_LEN] = {0}; static uint16_t g_startPort = 0; static uint16_t g_endPort = 6042; +static uint32_t g_pktLen = 0; + static void *bindUdpPort(void *sarg) { info_s *pinfo = (info_s *)sarg; @@ -321,19 +332,145 @@ static void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uin return ; } -static void taosNetTestClient(const char* serverFqdn, uint16_t startPort, uint16_t endPort, int pktLen) { - uint32_t serverIp = taosGetIpFromFqdn(serverFqdn); - if (serverIp == 0xFFFFFFFF) { - printf("Failed to resolve FQDN:%s", serverFqdn); - exit(-1); +void* tnetInitRpc(char* secretEncrypt, char spi) { + SRpcInit rpcInit; + void* pRpcConn = NULL; + + taosEncryptPass((uint8_t *)g_pass, strlen(g_pass), secretEncrypt); + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localPort = 0; + rpcInit.label = "NET-TEST"; + rpcInit.numOfThreads = 1; // every DB connection has only one thread + rpcInit.cfp = NULL; + rpcInit.sessions = 16; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.user = g_user; + rpcInit.idleTime = 2000; + rpcInit.ckey = "key"; + rpcInit.spi = spi; + rpcInit.secret = secretEncrypt; + + pRpcConn = rpcOpen(&rpcInit); + return pRpcConn; +} + +static int rpcCheckPortImpl(const char* serverFqdn, uint16_t port, uint16_t pktLen, char spi) { + SRpcEpSet epSet; + SRpcMsg reqMsg; + SRpcMsg rspMsg; + void* pRpcConn; + + char secretEncrypt[32] = {0}; + + pRpcConn = tnetInitRpc(secretEncrypt, spi); + if (NULL == pRpcConn) { + return -1; } - checkPort(serverIp, startPort, endPort, pktLen); + memset(&epSet, 0, sizeof(SRpcEpSet)); + epSet.inUse = 0; + epSet.numOfEps = 1; + epSet.port[0] = port; + strcpy(epSet.fqdn[0], serverFqdn); + + reqMsg.msgType = TSDB_MSG_TYPE_NETWORK_TEST; + reqMsg.pCont = rpcMallocCont(pktLen); + reqMsg.contLen = pktLen; + reqMsg.code = 0; + reqMsg.handle = NULL; // rpc handle returned to app + reqMsg.ahandle = NULL; // app handle set by client + + rpcSendRecv(pRpcConn, &epSet, &reqMsg, &rspMsg); + + // handle response + if ((rspMsg.code != 0) || (rspMsg.msgType != TSDB_MSG_TYPE_NETWORK_TEST + 1)) { + //printf("code:%d[%s]\n", rspMsg.code, tstrerror(rspMsg.code)); + return -1; + } + + rpcFreeCont(rspMsg.pCont); - return; + rpcClose(pRpcConn); + + return 0; +} + +static void rpcCheckPort(uint32_t hostIp) { + int ret; + char spi; + + for (uint16_t port = g_startPort; port <= g_endPort; port++) { + //printf("test: %s:%d\n", info.host, port); + printf("\n"); + + //================ check tcp port ================ + int32_t pktLen; + if (g_pktLen <= tsRpcMaxUdpSize) { + pktLen = tsRpcMaxUdpSize + 1000; + } else { + pktLen = g_pktLen; + } + + spi = 1; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + spi = 0; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + printf("TCP port:%d test fail.\t\t", port); + } else { + //printf("tcp port:%d test ok.\t\t", port); + printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); + } + } else { + //printf("tcp port:%d test ok.\t\t", port); + printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); + } + + //================ check udp port ================ + if (g_pktLen >= tsRpcMaxUdpSize) { + pktLen = tsRpcMaxUdpSize - 1000; + } else { + pktLen = g_pktLen; + } + + spi = 0; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + spi = 1; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + printf("udp port:%d test fail.\t\n", port); + } else { + //printf("udp port:%d test ok.\t\n", port); + printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); + } + } else { + //printf("udp port:%d test ok.\t\n", port); + printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); + } + } + + printf("\n"); + return ; } +static void taosNetTestClient(int flag) { + uint32_t serverIp = taosGetIpFromFqdn(g_serverFqdn); + if (serverIp == 0xFFFFFFFF) { + printf("Failed to resolve FQDN:%s", g_serverFqdn); + exit(-1); + } + if (0 == flag) { + checkPort(serverIp, g_startPort, g_endPort, g_pktLen); + } else { + rpcCheckPort(serverIp); + } + + return; +} static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) { @@ -375,49 +512,66 @@ static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) } -void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole) { - if (pktLen > MAX_PKG_LEN) { - printf("test packet len overflow: %d, max len not greater than %d bytes\n", pktLen, MAX_PKG_LEN); - exit(-1); +void taosNetTest(CmdArguments *args) { + if (0 == args->pktLen) { + g_pktLen = 1000; + } else { + g_pktLen = args->pktLen; } - if (port && endPort) { - if (port > endPort) { - printf("endPort[%d] must not lesss port[%d]\n", endPort, port); + if (args->port && args->endPort) { + if (args->port > args->endPort) { + printf("endPort[%d] must not lesss port[%d]\n", args->endPort, args->port); exit(-1); } } - if (host && host[0] != 0) { - if (strlen(host) >= TSDB_EP_LEN) { - printf("host invalid: %s\n", host); + if (args->host && args->host[0] != 0) { + if (strlen(args->host) >= TSDB_EP_LEN) { + printf("host invalid: %s\n", args->host); exit(-1); } - taosGetFqdnPortFromEp(host, serverFqdn, &g_startPort); + taosGetFqdnPortFromEp(args->host, g_serverFqdn, &g_startPort); } else { - tstrncpy(serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN); + tstrncpy(g_serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN); g_startPort = tsServerPort; } - if (port) { - g_startPort = port; + if (args->port) { + g_startPort = args->port; } - if (endPort) { - g_endPort = endPort; + if (args->endPort) { + g_endPort = args->endPort; } - if (port > endPort) { + if (g_startPort > g_endPort) { printf("endPort[%d] must not lesss port[%d]\n", g_endPort, g_startPort); exit(-1); } + + + if (args->is_use_passwd) { + if (args->password == NULL) args->password = getpass("Enter password: "); + } else { + args->password = TSDB_DEFAULT_PASS; + } + tstrncpy(g_pass, args->password, TSDB_PASSWORD_LEN); + + if (args->user == NULL) { + args->user = TSDB_DEFAULT_USER; + } + tstrncpy(g_user, args->user, TSDB_USER_LEN); - if (0 == strcmp("client", netTestRole)) { - printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", serverFqdn, g_startPort, g_endPort, pktLen); - taosNetTestClient(serverFqdn, g_startPort, g_endPort, pktLen); - } else if (0 == strcmp("server", netTestRole)) { - taosNetTestServer(g_startPort, g_endPort, pktLen); + if (0 == strcmp("client", args->netTestRole)) { + printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); + taosNetTestClient(0); + } else if (0 == strcmp("clients", args->netTestRole)) { + printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); + taosNetTestClient(1); + } else if (0 == strcmp("server", args->netTestRole)) { + taosNetTestServer(g_startPort, g_endPort, g_pktLen); } } diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index 4f05277a847a07831371278915011ebc4da400c6..9536f6fb70f9fe6d74c981274c849cab0c00ef1f 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -265,7 +265,7 @@ void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) buffer[len] = 0; if (pNote->taosNoteFd >= 0) { - taosTWrite(pNote->taosNoteFd, buffer, (unsigned int)len); + taosWrite(pNote->taosNoteFd, buffer, (unsigned int)len); if (pNote->taosNoteMaxLines > 0) { pNote->taosNoteLines++; diff --git a/src/util/src/tref.c b/src/util/src/tref.c new file mode 100644 index 0000000000000000000000000000000000000000..4c1a87c96070534e25149357b766a8539e362680 --- /dev/null +++ b/src/util/src/tref.c @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taoserror.h" +#include "tulog.h" +#include "tutil.h" + +#define TSDB_REF_OBJECTS 50 +#define TSDB_REF_STATE_EMPTY 0 +#define TSDB_REF_STATE_ACTIVE 1 +#define TSDB_REF_STATE_DELETED 2 + +typedef struct SRefNode { + struct SRefNode *prev; // previous node + struct SRefNode *next; // next node + void *p; // pointer to resource protected, + int64_t rid; // reference ID + int32_t count; // number of references + int removed; // 1: removed +} SRefNode; + +typedef struct { + SRefNode **nodeList; // array of SRefNode linked list + int state; // 0: empty, 1: active; 2: deleted + int rsetId; // refSet ID, global unique + int64_t rid; // increase by one for each new reference + int max; // mod + int32_t count; // total number of SRefNodes in this set + int64_t *lockedBy; + void (*fp)(void *); +} SRefSet; + +static SRefSet tsRefSetList[TSDB_REF_OBJECTS]; +static pthread_once_t tsRefModuleInit = PTHREAD_ONCE_INIT; +static pthread_mutex_t tsRefMutex; +static int tsRefSetNum = 0; +static int tsNextId = 0; + +static void taosInitRefModule(void); +static void taosLockList(int64_t *lockedBy); +static void taosUnlockList(int64_t *lockedBy); +static void taosIncRsetCount(SRefSet *pSet); +static void taosDecRsetCount(SRefSet *pSet); +static int taosDecRefCount(int rsetId, int64_t rid, int remove); + +int taosOpenRef(int max, void (*fp)(void *)) +{ + SRefNode **nodeList; + SRefSet *pSet; + int64_t *lockedBy; + int i, rsetId; + + pthread_once(&tsRefModuleInit, taosInitRefModule); + + nodeList = calloc(sizeof(SRefNode *), (size_t)max); + if (nodeList == NULL) { + terrno = TSDB_CODE_REF_NO_MEMORY; + return -1; + } + + lockedBy = calloc(sizeof(int64_t), (size_t)max); + if (lockedBy == NULL) { + free(nodeList); + terrno = TSDB_CODE_REF_NO_MEMORY; + return -1; + } + + pthread_mutex_lock(&tsRefMutex); + + for (i = 0; i < TSDB_REF_OBJECTS; ++i) { + tsNextId = (tsNextId + 1) % TSDB_REF_OBJECTS; + if (tsNextId == 0) tsNextId = 1; // dont use 0 as rsetId + if (tsRefSetList[tsNextId].state == TSDB_REF_STATE_EMPTY) break; + } + + if (i < TSDB_REF_OBJECTS) { + rsetId = tsNextId; + pSet = tsRefSetList + rsetId; + pSet->max = max; + pSet->nodeList = nodeList; + pSet->lockedBy = lockedBy; + pSet->fp = fp; + pSet->rid = 1; + pSet->rsetId = rsetId; + pSet->state = TSDB_REF_STATE_ACTIVE; + taosIncRsetCount(pSet); + + tsRefSetNum++; + uTrace("rsetId:%d is opened, max:%d, fp:%p refSetNum:%d", rsetId, max, fp, tsRefSetNum); + } else { + rsetId = TSDB_CODE_REF_FULL; + free (nodeList); + free (lockedBy); + uTrace("run out of Ref ID, maximum:%d refSetNum:%d", TSDB_REF_OBJECTS, tsRefSetNum); + } + + pthread_mutex_unlock(&tsRefMutex); + + return rsetId; +} + +int taosCloseRef(int rsetId) +{ + SRefSet *pSet; + int deleted = 0; + + if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { + uTrace("rsetId:%d is invalid, out of range", rsetId); + terrno = TSDB_CODE_REF_INVALID_ID; + return -1; + } + + pSet = tsRefSetList + rsetId; + + pthread_mutex_lock(&tsRefMutex); + + if (pSet->state == TSDB_REF_STATE_ACTIVE) { + pSet->state = TSDB_REF_STATE_DELETED; + deleted = 1; + uTrace("rsetId:%d is closed, count:%d", rsetId, pSet->count); + } else { + uTrace("rsetId:%d is already closed, count:%d", rsetId, pSet->count); + } + + pthread_mutex_unlock(&tsRefMutex); + + if (deleted) taosDecRsetCount(pSet); + + return 0; +} + +int64_t taosAddRef(int rsetId, void *p) +{ + int hash; + SRefNode *pNode; + SRefSet *pSet; + int64_t rid = 0; + + if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { + uTrace("rsetId:%d p:%p failed to add, rsetId not valid", rsetId, p); + terrno = TSDB_CODE_REF_INVALID_ID; + return -1; + } + + pSet = tsRefSetList + rsetId; + taosIncRsetCount(pSet); + if (pSet->state != TSDB_REF_STATE_ACTIVE) { + taosDecRsetCount(pSet); + uTrace("rsetId:%d p:%p failed to add, not active", rsetId, p); + terrno = TSDB_CODE_REF_ID_REMOVED; + return -1; + } + + pNode = calloc(sizeof(SRefNode), 1); + if (pNode == NULL) { + terrno = TSDB_CODE_REF_NO_MEMORY; + return -1; + } + + rid = atomic_add_fetch_64(&pSet->rid, 1); + hash = rid % pSet->max; + taosLockList(pSet->lockedBy+hash); + + pNode->p = p; + pNode->rid = rid; + pNode->count = 1; + + pNode->prev = NULL; + pNode->next = pSet->nodeList[hash]; + if (pSet->nodeList[hash]) pSet->nodeList[hash]->prev = pNode; + pSet->nodeList[hash] = pNode; + + uTrace("rsetId:%d p:%p rid:%" PRId64 " is added, count:%d", rsetId, p, rid, pSet->count); + + taosUnlockList(pSet->lockedBy+hash); + + return rid; +} + +int taosRemoveRef(int rsetId, int64_t rid) +{ + return taosDecRefCount(rsetId, rid, 1); +} + +// if rid is 0, return the first p in hash list, otherwise, return the next after current rid +void *taosAcquireRef(int rsetId, int64_t rid) +{ + int hash; + SRefNode *pNode; + SRefSet *pSet; + void *p = NULL; + + if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { + uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rsetId not valid", rsetId, rid); + terrno = TSDB_CODE_REF_INVALID_ID; + return NULL; + } + + if (rid <= 0) { + uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rid not valid", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + return NULL; + } + + pSet = tsRefSetList + rsetId; + taosIncRsetCount(pSet); + if (pSet->state != TSDB_REF_STATE_ACTIVE) { + uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, not active", rsetId, rid); + taosDecRsetCount(pSet); + terrno = TSDB_CODE_REF_ID_REMOVED; + return NULL; + } + + hash = rid % pSet->max; + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + + while (pNode) { + if (pNode->rid == rid) { + break; + } + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->removed == 0) { + pNode->count++; + p = pNode->p; + uTrace("rsetId:%d p:%p rid:%" PRId64 " is acquired", rsetId, pNode->p, rid); + } else { + terrno = TSDB_CODE_REF_NOT_EXIST; + uTrace("rsetId:%d p:%p rid:%" PRId64 " is already removed, failed to acquire", rsetId, pNode->p, rid); + } + } else { + terrno = TSDB_CODE_REF_NOT_EXIST; + uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to acquire", rsetId, rid); + } + + taosUnlockList(pSet->lockedBy+hash); + + taosDecRsetCount(pSet); + + return p; +} + +int taosReleaseRef(int rsetId, int64_t rid) +{ + return taosDecRefCount(rsetId, rid, 0); +} + +// if rid is 0, return the first p in hash list, otherwise, return the next after current rid +void *taosIterateRef(int rsetId, int64_t rid) { + SRefNode *pNode = NULL; + SRefSet *pSet; + + if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { + uTrace("rsetId:%d rid:%" PRId64 " failed to iterate, rsetId not valid", rsetId, rid); + terrno = TSDB_CODE_REF_INVALID_ID; + return NULL; + } + + if (rid <= 0) { + uTrace("rsetId:%d rid:%" PRId64 " failed to iterate, rid not valid", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + return NULL; + } + + pSet = tsRefSetList + rsetId; + taosIncRsetCount(pSet); + if (pSet->state != TSDB_REF_STATE_ACTIVE) { + uTrace("rsetId:%d rid:%" PRId64 " failed to iterate, rset not active", rsetId, rid); + terrno = TSDB_CODE_REF_ID_REMOVED; + taosDecRsetCount(pSet); + return NULL; + } + + int hash = 0; + if (rid > 0) { + hash = rid % pSet->max; + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + while (pNode) { + if (pNode->rid == rid) break; + pNode = pNode->next; + } + + if (pNode == NULL) { + uError("rsetId:%d rid:%" PRId64 " not there, quit", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + taosUnlockList(pSet->lockedBy+hash); + return NULL; + } + + // rid is there + pNode = pNode->next; + if (pNode == NULL) { + taosUnlockList(pSet->lockedBy+hash); + hash++; + } + } + + if (pNode == NULL) { + for (; hash < pSet->max; ++hash) { + taosLockList(pSet->lockedBy+hash); + pNode = pSet->nodeList[hash]; + if (pNode) break; + taosUnlockList(pSet->lockedBy+hash); + } + } + + void *newP = NULL; + if (pNode) { + pNode->count++; // acquire it + newP = pNode->p; + taosUnlockList(pSet->lockedBy+hash); + uTrace("rsetId:%d p:%p rid:%" PRId64 " is returned", rsetId, newP, rid); + } else { + uTrace("rsetId:%d the list is over", rsetId); + } + + if (rid > 0) taosReleaseRef(rsetId, rid); // release the current one + + taosDecRsetCount(pSet); + + return newP; +} + +int taosListRef() { + SRefSet *pSet; + SRefNode *pNode; + int num = 0; + + pthread_mutex_lock(&tsRefMutex); + + for (int i = 0; i < TSDB_REF_OBJECTS; ++i) { + pSet = tsRefSetList + i; + + if (pSet->state == TSDB_REF_STATE_EMPTY) + continue; + + uInfo("rsetId:%d state:%d count::%d", i, pSet->state, pSet->count); + + for (int j=0; j < pSet->max; ++j) { + pNode = pSet->nodeList[j]; + + while (pNode) { + uInfo("rsetId:%d p:%p rid:%" PRId64 "count:%d", i, pNode->p, pNode->rid, pNode->count); + pNode = pNode->next; + num++; + } + } + } + + pthread_mutex_unlock(&tsRefMutex); + + return num; +} + +static int taosDecRefCount(int rsetId, int64_t rid, int remove) { + int hash; + SRefSet *pSet; + SRefNode *pNode; + int released = 0; + int code = 0; + + if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { + uTrace("rsetId:%d rid:%" PRId64 " failed to remove, rsetId not valid", rsetId, rid); + terrno = TSDB_CODE_REF_INVALID_ID; + return -1; + } + + if (rid <= 0) { + uTrace("rsetId:%d rid:%" PRId64 " failed to remove, rid not valid", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + return -1; + } + + pSet = tsRefSetList + rsetId; + if (pSet->state == TSDB_REF_STATE_EMPTY) { + uTrace("rsetId:%d rid:%" PRId64 " failed to remove, cleaned", rsetId, rid); + terrno = TSDB_CODE_REF_ID_REMOVED; + return -1; + } + + hash = rid % pSet->max; + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + while (pNode) { + if (pNode->rid == rid) + break; + + pNode = pNode->next; + } + + if (pNode) { + pNode->count--; + if (remove) pNode->removed = 1; + + if (pNode->count <= 0) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pSet->nodeList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + (*pSet->fp)(pNode->p); + + uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode); + free(pNode); + released = 1; + } else { + uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, count:%d", rsetId, pNode->p, rid, pNode->count); + } + } else { + uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + code = -1; + } + + taosUnlockList(pSet->lockedBy+hash); + + if (released) taosDecRsetCount(pSet); + + return code; +} + +static void taosLockList(int64_t *lockedBy) { + int64_t tid = taosGetPthreadId(); + int i = 0; + while (atomic_val_compare_exchange_64(lockedBy, 0, tid) != 0) { + if (++i % 100 == 0) { + sched_yield(); + } + } +} + +static void taosUnlockList(int64_t *lockedBy) { + int64_t tid = taosGetPthreadId(); + if (atomic_val_compare_exchange_64(lockedBy, tid, 0) != tid) { + assert(false); + } +} + +static void taosInitRefModule(void) { + pthread_mutex_init(&tsRefMutex, NULL); +} + +static void taosIncRsetCount(SRefSet *pSet) { + atomic_add_fetch_32(&pSet->count, 1); + // uTrace("rsetId:%d inc count:%d", pSet->rsetId, count); +} + +static void taosDecRsetCount(SRefSet *pSet) { + int32_t count = atomic_sub_fetch_32(&pSet->count, 1); + // uTrace("rsetId:%d dec count:%d", pSet->rsetId, count); + + if (count > 0) return; + + pthread_mutex_lock(&tsRefMutex); + + if (pSet->state != TSDB_REF_STATE_EMPTY) { + pSet->state = TSDB_REF_STATE_EMPTY; + pSet->max = 0; + pSet->fp = NULL; + + tfree(pSet->nodeList); + tfree(pSet->lockedBy); + + tsRefSetNum--; + uTrace("rsetId:%d is cleaned, refSetNum:%d count:%d", pSet->rsetId, tsRefSetNum, pSet->count); + } + + pthread_mutex_unlock(&tsRefMutex); +} + diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index bacdaef6c8da1e42218331aaa34d25aa40ab5dc4..a36f7f0261ddb34cbfa82a5bc2485c6e9c45f03a 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -13,13 +13,508 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ +#include "tskiplist.h" #include "os.h" +#include "tcompare.h" #include "tulog.h" -#include "tskiplist.h" #include "tutil.h" -#include "tcompare.h" -UNUSED_FUNC static FORCE_INLINE void recordNodeEachLevel(SSkipList *pSkipList, int32_t level) { // record link count in each level +static int initForwardBackwardPtr(SSkipList *pSkipList); +static SSkipListNode * getPriorNode(SSkipList *pSkipList, const char *val, int32_t order, SSkipListNode **pCur); +static void tSkipListRemoveNodeImpl(SSkipList *pSkipList, SSkipListNode *pNode); +static void tSkipListCorrectLevel(SSkipList *pSkipList); +static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t order); +static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward); +static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData); +static SSkipListNode *tSkipListNewNode(uint8_t level); +#define tSkipListFreeNode(n) tfree((n)) +static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, + bool hasDup); + +static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList); +static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList); +static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList); +static FORCE_INLINE int32_t getSkipListRandLevel(SSkipList *pSkipList); + +SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, __compar_fn_t comparFn, uint8_t flags, + __sl_key_fn_t fn) { + SSkipList *pSkipList = (SSkipList *)calloc(1, sizeof(SSkipList)); + if (pSkipList == NULL) return NULL; + + if (maxLevel > MAX_SKIP_LIST_LEVEL) { + maxLevel = MAX_SKIP_LIST_LEVEL; + } + + pSkipList->maxLevel = maxLevel; + pSkipList->type = keyType; + pSkipList->len = keyLen; + pSkipList->flags = flags; + pSkipList->keyFn = fn; + if (comparFn == NULL) { + pSkipList->comparFn = getKeyComparFunc(keyType); + } else { + pSkipList->comparFn = comparFn; + } + + if (initForwardBackwardPtr(pSkipList) < 0) { + tSkipListDestroy(pSkipList); + return NULL; + } + + if (SL_IS_THREAD_SAFE(pSkipList)) { + pSkipList->lock = (pthread_rwlock_t *)calloc(1, sizeof(pthread_rwlock_t)); + if (pSkipList->lock == NULL) { + tSkipListDestroy(pSkipList); + return NULL; + } + + if (pthread_rwlock_init(pSkipList->lock, NULL) != 0) { + tSkipListDestroy(pSkipList); + return NULL; + } + } + + srand((uint32_t)time(NULL)); + +#if SKIP_LIST_RECORD_PERFORMANCE + pSkipList->state.nTotalMemSize += sizeof(SSkipList); +#endif + + return pSkipList; +} + +void tSkipListDestroy(SSkipList *pSkipList) { + if (pSkipList == NULL) return; + + tSkipListWLock(pSkipList); + + SSkipListNode *pNode = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, 0); + + while (pNode != pSkipList->pTail) { + SSkipListNode *pTemp = pNode; + pNode = SL_NODE_GET_FORWARD_POINTER(pNode, 0); + tSkipListFreeNode(pTemp); + } + + tSkipListUnlock(pSkipList); + if (pSkipList->lock != NULL) { + pthread_rwlock_destroy(pSkipList->lock); + tfree(pSkipList->lock); + } + + tSkipListFreeNode(pSkipList->pHead); + tSkipListFreeNode(pSkipList->pTail); + tfree(pSkipList); +} + +SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) { + if (pSkipList == NULL || pData == NULL) return NULL; + + SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; + SSkipListNode *pNode = NULL; + + tSkipListWLock(pSkipList); + + bool hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); + pNode = tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); + + tSkipListUnlock(pSkipList); + + return pNode; +} + +// Put a batch of data into skiplist. The batch of data must be in ascending order +void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { + SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; + SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; + bool hasDup = false; + char * pKey = NULL; + char * pDataKey = NULL; + int compare = 0; + + tSkipListWLock(pSkipList); + + // backward to put the first data + hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]); + tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup); + + for (int level = 0; level < pSkipList->maxLevel; level++) { + forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level); + } + + // forward to put the rest of data + for (int idata = 1; idata < ndata; idata++) { + pDataKey = pSkipList->keyFn(ppData[idata]); + hasDup = false; + + // Compare max key + pKey = SL_GET_MAX_KEY(pSkipList); + compare = pSkipList->comparFn(pDataKey, pKey); + if (compare > 0) { + for (int i = 0; i < pSkipList->maxLevel; i++) { + forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i); + } + } else { + SSkipListNode *px = pSkipList->pHead; + for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { + if (i < pSkipList->level) { + // set new px + if (forward[i] != pSkipList->pHead) { + if (px == pSkipList->pHead || + pSkipList->comparFn(SL_GET_NODE_KEY(pSkipList, forward[i]), SL_GET_NODE_KEY(pSkipList, px)) > 0) { + px = forward[i]; + } + } + + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i); + while (p != pSkipList->pTail) { + pKey = SL_GET_NODE_KEY(pSkipList, p); + + compare = pSkipList->comparFn(pKey, pDataKey); + if (compare >= 0) { + if (compare == 0 && !hasDup) hasDup = true; + break; + } else { + px = p; + p = SL_NODE_GET_FORWARD_POINTER(px, i); + } + } + } + + forward[i] = px; + } + } + + tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup); + } + + tSkipListUnlock(pSkipList); +} + +uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) { + uint32_t count = 0; + + tSkipListWLock(pSkipList); + + SSkipListNode *pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL); + while (1) { + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(pNode, 0); + if (p == pSkipList->pTail) { + break; + } + if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { + break; + } + + tSkipListRemoveNodeImpl(pSkipList, p); + + ++count; + } + + tSkipListCorrectLevel(pSkipList); + + tSkipListUnlock(pSkipList); + + return count; +} + +SArray *tSkipListGet(SSkipList *pSkipList, SSkipListKey key) { + SArray *sa = taosArrayInit(1, POINTER_BYTES); + + tSkipListRLock(pSkipList); + + SSkipListNode *pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL); + while (1) { + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(pNode, 0); + if (p == pSkipList->pTail) { + break; + } + if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { + break; + } + taosArrayPush(sa, &p); + pNode = p; + } + + tSkipListUnlock(pSkipList); + + return sa; +} + +void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode) { + tSkipListWLock(pSkipList); + tSkipListRemoveNodeImpl(pSkipList, pNode); + tSkipListCorrectLevel(pSkipList); + tSkipListUnlock(pSkipList); +} + +SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList) { + if (pSkipList == NULL) return NULL; + + return doCreateSkipListIterator(pSkipList, TSDB_ORDER_ASC); +} + +SSkipListIterator *tSkipListCreateIterFromVal(SSkipList *pSkipList, const char *val, int32_t type, int32_t order) { + ASSERT(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC); + ASSERT(pSkipList != NULL); + + SSkipListIterator *iter = doCreateSkipListIterator(pSkipList, order); + if (val == NULL) { + return iter; + } + + tSkipListRLock(pSkipList); + + iter->cur = getPriorNode(pSkipList, val, order, &(iter->next)); + + tSkipListUnlock(pSkipList); + + return iter; +} + +bool tSkipListIterNext(SSkipListIterator *iter) { + if (iter->pSkipList == NULL) return false; + + SSkipList *pSkipList = iter->pSkipList; + + tSkipListRLock(pSkipList); + + if (iter->order == TSDB_ORDER_ASC) { + if (iter->cur == pSkipList->pTail) return false; + iter->cur = SL_NODE_GET_FORWARD_POINTER(iter->cur, 0); + + // a new node is inserted into between iter->cur and iter->next, ignore it + if (iter->cur != iter->next && (iter->next != NULL)) { + iter->cur = iter->next; + } + + iter->next = SL_NODE_GET_FORWARD_POINTER(iter->cur, 0); + iter->step++; + } else { + if (iter->cur == pSkipList->pHead) return false; + iter->cur = SL_NODE_GET_BACKWARD_POINTER(iter->cur, 0); + + // a new node is inserted into between iter->cur and iter->next, ignore it + if (iter->cur != iter->next && (iter->next != NULL)) { + iter->cur = iter->next; + } + + iter->next = SL_NODE_GET_BACKWARD_POINTER(iter->cur, 0); + iter->step++; + } + + tSkipListUnlock(pSkipList); + + return (iter->order == TSDB_ORDER_ASC) ? (iter->cur != pSkipList->pTail) : (iter->cur != pSkipList->pHead); +} + +SSkipListNode *tSkipListIterGet(SSkipListIterator *iter) { + if (iter == NULL || iter->cur == iter->pSkipList->pTail || iter->cur == iter->pSkipList->pHead) { + return NULL; + } else { + return iter->cur; + } +} + +void *tSkipListDestroyIter(SSkipListIterator *iter) { + if (iter == NULL) { + return NULL; + } + + tfree(iter); + return NULL; +} + +void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel) { + if (pSkipList == NULL || pSkipList->level < nlevel || nlevel <= 0) { + return; + } + + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, nlevel - 1); + + int32_t id = 1; + char * prev = NULL; + + while (p != pSkipList->pTail) { + char *key = SL_GET_NODE_KEY(pSkipList, p); + if (prev != NULL) { + ASSERT(pSkipList->comparFn(prev, key) < 0); + } + + switch (pSkipList->type) { + case TSDB_DATA_TYPE_INT: + fprintf(stdout, "%d: %d\n", id++, *(int32_t *)key); + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BIGINT: + fprintf(stdout, "%d: %" PRId64 " \n", id++, *(int64_t *)key); + break; + case TSDB_DATA_TYPE_BINARY: + fprintf(stdout, "%d: %s \n", id++, key); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(stdout, "%d: %lf \n", id++, *(double *)key); + break; + default: + fprintf(stdout, "\n"); + } + + prev = SL_GET_NODE_KEY(pSkipList, p); + + p = SL_NODE_GET_FORWARD_POINTER(p, nlevel - 1); + } +} + +static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward) { + for (int32_t i = 0; i < pNode->level; ++i) { + SSkipListNode *x = direction[i]; + if (isForward) { + SL_NODE_GET_BACKWARD_POINTER(pNode, i) = x; + + SSkipListNode *next = SL_NODE_GET_FORWARD_POINTER(x, i); + SL_NODE_GET_BACKWARD_POINTER(next, i) = pNode; + + SL_NODE_GET_FORWARD_POINTER(pNode, i) = next; + SL_NODE_GET_FORWARD_POINTER(x, i) = pNode; + } else { + SL_NODE_GET_FORWARD_POINTER(pNode, i) = x; + + SSkipListNode *prev = SL_NODE_GET_BACKWARD_POINTER(x, i); + SL_NODE_GET_FORWARD_POINTER(prev, i) = pNode; + + SL_NODE_GET_BACKWARD_POINTER(pNode, i) = prev; + SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode; + } + } + + if (pSkipList->level < pNode->level) pSkipList->level = pNode->level; + + pSkipList->size += 1; +} + +static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t order) { + SSkipListIterator *iter = calloc(1, sizeof(SSkipListIterator)); + + iter->pSkipList = pSkipList; + iter->order = order; + if (order == TSDB_ORDER_ASC) { + iter->cur = pSkipList->pHead; + iter->next = SL_NODE_GET_FORWARD_POINTER(iter->cur, 0); + } else { + iter->cur = pSkipList->pTail; + iter->next = SL_NODE_GET_BACKWARD_POINTER(iter->cur, 0); + } + + return iter; +} + +static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList) { + if (pSkipList->lock) { + return pthread_rwlock_wrlock(pSkipList->lock); + } + return 0; +} + +static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList) { + if (pSkipList->lock) { + return pthread_rwlock_rdlock(pSkipList->lock); + } + return 0; +} + +static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList) { + if (pSkipList->lock) { + return pthread_rwlock_unlock(pSkipList->lock); + } + return 0; +} + +static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData) { + int compare = 0; + bool hasDupKey = false; + char * pDataKey = pSkipList->keyFn(pData); + + if (pSkipList->size == 0) { + for (int i = 0; i < pSkipList->maxLevel; i++) { + backward[i] = pSkipList->pTail; + } + } else { + char *pKey = NULL; + + // Compare max key + pKey = SL_GET_MAX_KEY(pSkipList); + compare = pSkipList->comparFn(pDataKey, pKey); + if (compare >= 0) { + for (int i = 0; i < pSkipList->maxLevel; i++) { + backward[i] = pSkipList->pTail; + } + + return (compare == 0); + } + + // Compare min key + pKey = SL_GET_MIN_KEY(pSkipList); + compare = pSkipList->comparFn(pDataKey, pKey); + if (compare < 0) { + for (int i = 0; i < pSkipList->maxLevel; i++) { + backward[i] = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i); + } + + return (compare == 0); + } + + SSkipListNode *px = pSkipList->pTail; + for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { + if (i < pSkipList->level) { + SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i); + while (p != pSkipList->pHead) { + pKey = SL_GET_NODE_KEY(pSkipList, p); + + compare = pSkipList->comparFn(pKey, pDataKey); + if (compare <= 0) { + if (compare == 0 && !hasDupKey) hasDupKey = true; + break; + } else { + px = p; + p = SL_NODE_GET_BACKWARD_POINTER(px, i); + } + } + } + + backward[i] = px; + } + } + + return hasDupKey; +} + +static void tSkipListRemoveNodeImpl(SSkipList *pSkipList, SSkipListNode *pNode) { + int32_t level = pNode->level; + uint8_t dupMode = SL_DUP_MODE(pSkipList); + ASSERT(dupMode != SL_DISCARD_DUP_KEY && dupMode != SL_UPDATE_DUP_KEY); + + for (int32_t j = level - 1; j >= 0; --j) { + SSkipListNode *prev = SL_NODE_GET_BACKWARD_POINTER(pNode, j); + SSkipListNode *next = SL_NODE_GET_FORWARD_POINTER(pNode, j); + + SL_NODE_GET_FORWARD_POINTER(prev, j) = next; + SL_NODE_GET_BACKWARD_POINTER(next, j) = prev; + } + + tSkipListFreeNode(pNode); + pSkipList->size--; +} + +// Function must be called after calling tSkipListRemoveNodeImpl() function +static void tSkipListCorrectLevel(SSkipList *pSkipList) { + while (pSkipList->level > 0 && SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == pSkipList->pTail) { + pSkipList->level -= 1; + } +} + +UNUSED_FUNC static FORCE_INLINE void recordNodeEachLevel(SSkipList *pSkipList, + int32_t level) { // record link count in each level #if SKIP_LIST_RECORD_PERFORMANCE for (int32_t i = 0; i < level; ++i) { pSkipList->state.nLevelNodeCnt[i]++; @@ -47,40 +542,28 @@ static FORCE_INLINE int32_t getSkipListNodeRandomHeight(SSkipList *pSkipList) { } static FORCE_INLINE int32_t getSkipListRandLevel(SSkipList *pSkipList) { - int32_t level = getSkipListNodeRandomHeight(pSkipList); + int32_t level = 0; if (pSkipList->size == 0) { level = 1; - pSkipList->level = 1; } else { + level = getSkipListNodeRandomHeight(pSkipList); if (level > pSkipList->level) { if (pSkipList->level < pSkipList->maxLevel) { - level = (++pSkipList->level); + level = pSkipList->level + 1; } else { level = pSkipList->level; } } } - - assert(level <= pSkipList->maxLevel); + + ASSERT(level <= pSkipList->maxLevel); return level; } -#define DO_MEMSET_PTR_AREA(n) do {\ -int32_t _l = (n)->level;\ -memset(pNode, 0, SL_NODE_HEADER_SIZE(_l));\ -(n)->level = _l;\ -} while(0) - -static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **forward, SSkipListNode *pNode); -static SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode); -static SSkipListNode* tSkipListPushFront(SSkipList* pSkipList, SSkipListNode *pNode); -static SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t order); - - // when order is TSDB_ORDER_ASC, return the last node with key less than val // when order is TSDB_ORDER_DESC, return the first node with key large than val -static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_t order, SSkipListNode** pCur) { - __compar_fn_t comparFn = pSkipList->comparFn; +static SSkipListNode *getPriorNode(SSkipList *pSkipList, const char *val, int32_t order, SSkipListNode **pCur) { + __compar_fn_t comparFn = pSkipList->comparFn; SSkipListNode *pNode = NULL; if (pCur != NULL) { *pCur = NULL; @@ -89,12 +572,12 @@ static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_ if (order == TSDB_ORDER_ASC) { pNode = pSkipList->pHead; for (int32_t i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, i); + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(pNode, i); while (p != pSkipList->pTail) { char *key = SL_GET_NODE_KEY(pSkipList, p); if (comparFn(key, val) < 0) { pNode = p; - p = SL_GET_FORWARD_POINTER(p, i); + p = SL_NODE_GET_FORWARD_POINTER(p, i); } else { if (pCur != NULL) { *pCur = p; @@ -106,12 +589,12 @@ static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_ } else { pNode = pSkipList->pTail; for (int32_t i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_GET_BACKWARD_POINTER(pNode, i); + SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(pNode, i); while (p != pSkipList->pHead) { char *key = SL_GET_NODE_KEY(pSkipList, p); if (comparFn(key, val) > 0) { pNode = p; - p = SL_GET_BACKWARD_POINTER(p, i); + p = SL_NODE_GET_BACKWARD_POINTER(p, i); } else { if (pCur != NULL) { *pCur = p; @@ -125,213 +608,62 @@ static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_ return pNode; } - -static bool initForwardBackwardPtr(SSkipList* pSkipList) { +static int initForwardBackwardPtr(SSkipList *pSkipList) { uint32_t maxLevel = pSkipList->maxLevel; - - // head info - pSkipList->pHead = (SSkipListNode *)calloc(1, SL_NODE_HEADER_SIZE(maxLevel) * 2); - if (pSkipList->pHead == NULL) { - return false; - } - - pSkipList->pHead->level = pSkipList->maxLevel; - - // tail info - pSkipList->pTail = (SSkipListNode*) ((char*) pSkipList->pHead + SL_NODE_HEADER_SIZE(maxLevel)); - pSkipList->pTail->level = pSkipList->maxLevel; - - for (uint32_t i = 0; i < maxLevel; ++i) { - SL_GET_FORWARD_POINTER(pSkipList->pHead, i) = pSkipList->pTail; - SL_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pSkipList->pHead; - } - - return true; -} - -SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint8_t keyLen, uint8_t dupKey, uint8_t lock, - uint8_t freeNode, __sl_key_fn_t fn) { - SSkipList *pSkipList = (SSkipList *)calloc(1, sizeof(SSkipList)); - if (pSkipList == NULL) { - return NULL; - } - if (maxLevel > MAX_SKIP_LIST_LEVEL) { - maxLevel = MAX_SKIP_LIST_LEVEL; - } + // head info + pSkipList->pHead = tSkipListNewNode(maxLevel); + if (pSkipList->pHead == NULL) return -1; - pSkipList->keyInfo.type = keyType; - pSkipList->keyInfo.len = keyLen; - pSkipList->keyInfo.dupKey = dupKey; - pSkipList->keyInfo.freeNode = freeNode; - - pSkipList->keyFn = fn; - pSkipList->comparFn = getKeyComparFunc(keyType); - pSkipList->maxLevel = maxLevel; - pSkipList->level = 1; - - if (!initForwardBackwardPtr(pSkipList)) { - taosTFree(pSkipList); - return NULL; + // tail info + pSkipList->pTail = tSkipListNewNode(maxLevel); + if (pSkipList->pTail == NULL) { + tSkipListFreeNode(pSkipList->pHead); + return -1; } - - if (lock) { - pSkipList->lock = calloc(1, sizeof(pthread_rwlock_t)); - if (pthread_rwlock_init(pSkipList->lock, NULL) != 0) { - taosTFree(pSkipList->pHead); - taosTFree(pSkipList); - - return NULL; - } + for (uint32_t i = 0; i < maxLevel; ++i) { + SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i) = pSkipList->pTail; + SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pSkipList->pHead; } - srand((uint32_t)time(NULL)); - -#if SKIP_LIST_RECORD_PERFORMANCE - pSkipList->state.nTotalMemSize += sizeof(SSkipList); -#endif - - return pSkipList; + return 0; } -void *tSkipListDestroy(SSkipList *pSkipList) { - if (pSkipList == NULL) { - return NULL; - } - - if (pSkipList->lock) { - pthread_rwlock_wrlock(pSkipList->lock); - } - - if (pSkipList->keyInfo.freeNode) { - SSkipListNode *pNode = SL_GET_FORWARD_POINTER(pSkipList->pHead, 0); - - while (pNode != pSkipList->pTail) { - SSkipListNode *pTemp = pNode; - pNode = SL_GET_FORWARD_POINTER(pNode, 0); - taosTFree(pTemp); - } - } - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - pthread_rwlock_destroy(pSkipList->lock); - - taosTFree(pSkipList->lock); - } - - taosTFree(pSkipList->pHead); - taosTFree(pSkipList); - return NULL; -} +static SSkipListNode *tSkipListNewNode(uint8_t level) { + int32_t tsize = sizeof(SSkipListNode) + sizeof(SSkipListNode *) * level * 2; -void tSkipListNewNodeInfo(SSkipList *pSkipList, int32_t *level, int32_t *headSize) { - if (pSkipList == NULL) { - *level = 1; - *headSize = SL_NODE_HEADER_SIZE(*level); - return; - } + SSkipListNode *pNode = (SSkipListNode *)calloc(1, tsize); + if (pNode == NULL) return NULL; - *level = getSkipListRandLevel(pSkipList); - *headSize = SL_NODE_HEADER_SIZE(*level); + pNode->level = level; + return pNode; } -SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode) { - if (pSkipList == NULL || pNode == NULL) { - return NULL; - } - - if (pSkipList->lock) { - pthread_rwlock_wrlock(pSkipList->lock); - } - - // if the new key is greater than the maximum key of skip list, push back this node at the end of skip list - char *newDatakey = SL_GET_NODE_KEY(pSkipList, pNode); - if (pSkipList->size == 0 || pSkipList->comparFn(SL_GET_SL_MAX_KEY(pSkipList), newDatakey) < 0) { - return tSkipListPushBack(pSkipList, pNode); - } - - // if the new key is less than the minimum key of skip list, push front this node at the front of skip list - assert(pSkipList->size > 0); - char* minKey = SL_GET_SL_MIN_KEY(pSkipList); - if (pSkipList->comparFn(newDatakey, minKey) < 0) { - return tSkipListPushFront(pSkipList, pNode); - } - - // find the appropriated position to insert data - SSkipListNode *px = pSkipList->pHead; - SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; +static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, + bool hasDup) { + uint8_t dupMode = SL_DUP_MODE(pSkipList); + SSkipListNode *pNode = NULL; - int32_t ret = -1; - for (int32_t i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(px, i); - while (p != pSkipList->pTail) { - char *key = SL_GET_NODE_KEY(pSkipList, p); - - // if the forward element is less than the specified key, forward one step - ret = pSkipList->comparFn(key, newDatakey); - if (ret < 0) { - px = p; - p = SL_GET_FORWARD_POINTER(px, i); + if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) { + if (dupMode == SL_UPDATE_DUP_KEY) { + if (isForward) { + pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0); } else { - break; + pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0); } + atomic_store_ptr(&(pNode->pData), pData); } - - forward[i] = px; - } - - // if the skip list does not allowed identical key inserted, the new data will be discarded. - if (pSkipList->keyInfo.dupKey == 0 && ret == 0) { - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - return NULL; - } - - tSkipListDoInsert(pSkipList, forward, pNode); - return pNode; -} - - - -SArray* tSkipListGet(SSkipList *pSkipList, SSkipListKey key) { - SArray* sa = taosArrayInit(1, POINTER_BYTES); - - if (pSkipList->lock) { - pthread_rwlock_wrlock(pSkipList->lock); - } + } else { + pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList)); + if (pNode != NULL) { + pNode->pData = pData; - SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL); - while (1) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0); - if (p == pSkipList->pTail) { - break; + tSkipListDoInsert(pSkipList, direction, pNode, isForward); } - if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { - break; - } - taosArrayPush(sa, &p); - pNode = p; - } - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); } - return sa; -} - - - -size_t tSkipListGetSize(const SSkipList* pSkipList) { - if (pSkipList == NULL) { - return 0; - } - - return pSkipList->size; + return pNode; } // static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey, @@ -447,178 +779,13 @@ size_t tSkipListGetSize(const SSkipList* pSkipList) { // } // // // compress the minimum level of skip list -// while (pSkipList->level > 0 && SL_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == NULL) { +// while (pSkipList->level > 0 && SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == NULL) { // pSkipList->level -= 1; // } // // return true; //} - -uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) { - uint32_t count = 0; - - if (pSkipList->lock) { - pthread_rwlock_wrlock(pSkipList->lock); - } - - SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL); - while (1) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0); - if (p == pSkipList->pTail) { - break; - } - if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { - break; - } - - for (int32_t j = p->level - 1; j >= 0; --j) { - SSkipListNode* prev = SL_GET_BACKWARD_POINTER(p, j); - SSkipListNode* next = SL_GET_FORWARD_POINTER(p, j); - SL_GET_FORWARD_POINTER(prev, j) = next; - SL_GET_BACKWARD_POINTER(next, j) = prev; - } - - if (pSkipList->keyInfo.freeNode) { - taosTFree(p); - } - - ++count; - } - - // compress the minimum level of skip list - while (pSkipList->level > 0) { - if (SL_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) != NULL) { - break; - } - pSkipList->level--; - } - - pSkipList->size -= count; - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - return count; -} - -void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode) { - int32_t level = pNode->level; - - if (pSkipList->lock) { - pthread_rwlock_wrlock(pSkipList->lock); - } - - for (int32_t j = level - 1; j >= 0; --j) { - SSkipListNode* prev = SL_GET_BACKWARD_POINTER(pNode, j); - SSkipListNode* next = SL_GET_FORWARD_POINTER(pNode, j); - - SL_GET_FORWARD_POINTER(prev, j) = next; - SL_GET_BACKWARD_POINTER(next, j) = prev; - } - - if (pSkipList->keyInfo.freeNode) { - taosTFree(pNode); - } - - atomic_sub_fetch_32(&pSkipList->size, 1); - - // compress the minimum level of skip list - while (pSkipList->level > 0 && SL_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == NULL) { - pSkipList->level -= 1; - } - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } -} - -SSkipListIterator* tSkipListCreateIter(SSkipList *pSkipList) { - if (pSkipList == NULL) { - return NULL; - } - - return doCreateSkipListIterator(pSkipList, TSDB_ORDER_ASC); -} - -SSkipListIterator *tSkipListCreateIterFromVal(SSkipList* pSkipList, const char* val, int32_t type, int32_t order) { - assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC); - assert(pSkipList != NULL); - - SSkipListIterator* iter = doCreateSkipListIterator(pSkipList, order); - if (val == NULL) { - return iter; - } - - if (pSkipList->lock) { - pthread_rwlock_rdlock(pSkipList->lock); - } - - iter->cur = getPriorNode(pSkipList, val, order, &iter->next); - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - return iter; -} - -bool tSkipListIterNext(SSkipListIterator *iter) { - if (iter->pSkipList == NULL) { - return false; - } - - SSkipList *pSkipList = iter->pSkipList; - - if (pSkipList->lock) { - pthread_rwlock_rdlock(pSkipList->lock); - } - - if (iter->order == TSDB_ORDER_ASC) { // ascending order iterate - iter->cur = SL_GET_FORWARD_POINTER(iter->cur, 0); - - // a new node is inserted into between iter->cur and iter->next, ignore it - if (iter->cur != iter->next && (iter->next != NULL)) { - iter->cur = iter->next; - } - - iter->next = SL_GET_FORWARD_POINTER(iter->cur, 0); - } else { // descending order iterate - iter->cur = SL_GET_BACKWARD_POINTER(iter->cur, 0); - - // a new node is inserted into between iter->cur and iter->next, ignore it - if (iter->cur != iter->next && (iter->next != NULL)) { - iter->cur = iter->next; - } - - iter->next = SL_GET_BACKWARD_POINTER(iter->cur, 0); - } - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - iter->step += 1; - return (iter->order == TSDB_ORDER_ASC)? (iter->cur != pSkipList->pTail) : (iter->cur != pSkipList->pHead); -} - -SSkipListNode *tSkipListIterGet(SSkipListIterator *iter) { - if (iter == NULL || iter->cur == iter->pSkipList->pTail || iter->cur == iter->pSkipList->pHead) { - return NULL; - } else { - return iter->cur; - } -} - -void* tSkipListDestroyIter(SSkipListIterator* iter) { - if (iter == NULL) { - return NULL; - } - - taosTFree(iter); - return NULL; -} - +// // bool tSkipListRemove(SSkipList *pSkipList, SSkipListKey *pKey) { // SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; // __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); @@ -638,111 +805,3 @@ void* tSkipListDestroyIter(SSkipListIterator* iter) { // // return ret; //} - -void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel) { - if (pSkipList == NULL || pSkipList->level < nlevel || nlevel <= 0) { - return; - } - - SSkipListNode *p = SL_GET_FORWARD_POINTER(pSkipList->pHead, nlevel - 1); - - int32_t id = 1; - char* prev = NULL; - - while (p != pSkipList->pTail) { - char *key = SL_GET_NODE_KEY(pSkipList, p); - if (prev != NULL) { - assert(pSkipList->comparFn(prev, key) < 0); - } - - switch (pSkipList->keyInfo.type) { - case TSDB_DATA_TYPE_INT: - fprintf(stdout, "%d: %d\n", id++, *(int32_t *)key); - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_BIGINT: - fprintf(stdout, "%d: %" PRId64 " \n", id++, *(int64_t *)key); - break; - case TSDB_DATA_TYPE_BINARY: - fprintf(stdout, "%d: %s \n", id++, key); - break; - case TSDB_DATA_TYPE_DOUBLE: - fprintf(stdout, "%d: %lf \n", id++, *(double *)key); - break; - default: - fprintf(stdout, "\n"); - } - - prev = SL_GET_NODE_KEY(pSkipList, p); - - p = SL_GET_FORWARD_POINTER(p, nlevel - 1); - } -} - -void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **forward, SSkipListNode *pNode) { - DO_MEMSET_PTR_AREA(pNode); - - for (int32_t i = 0; i < pNode->level; ++i) { - SSkipListNode *x = forward[i]; - SL_GET_BACKWARD_POINTER(pNode, i) = x; - - SSkipListNode *next = SL_GET_FORWARD_POINTER(x, i); - SL_GET_BACKWARD_POINTER(next, i) = pNode; - - SL_GET_FORWARD_POINTER(pNode, i) = next; - SL_GET_FORWARD_POINTER(x, i) = pNode; - } - - atomic_add_fetch_32(&pSkipList->size, 1); - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } -} - -SSkipListNode* tSkipListPushFront(SSkipList* pSkipList, SSkipListNode *pNode) { - SSkipListNode* forward[MAX_SKIP_LIST_LEVEL] = {0}; - for(int32_t i = 0; i < pSkipList->level; ++i) { - forward[i] = pSkipList->pHead; - } - - tSkipListDoInsert(pSkipList, forward, pNode); - return pNode; -} - -SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode) { - // do clear pointer area - DO_MEMSET_PTR_AREA(pNode); - - for(int32_t i = 0; i < pNode->level; ++i) { - SSkipListNode* prev = SL_GET_BACKWARD_POINTER(pSkipList->pTail, i); - SL_GET_FORWARD_POINTER(prev, i) = pNode; - SL_GET_FORWARD_POINTER(pNode, i) = pSkipList->pTail; - - SL_GET_BACKWARD_POINTER(pNode, i) = prev; - SL_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode; - } - - atomic_add_fetch_32(&pSkipList->size, 1); - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - return pNode; -} - -SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t order) { - SSkipListIterator* iter = calloc(1, sizeof(SSkipListIterator)); - - iter->pSkipList = pSkipList; - iter->order = order; - if(order == TSDB_ORDER_ASC) { - iter->cur = pSkipList->pHead; - iter->next = SL_GET_FORWARD_POINTER(iter->cur, 0); - } else { - iter->cur = pSkipList->pTail; - iter->next = SL_GET_BACKWARD_POINTER(iter->cur, 0); - } - - return iter; -} \ No newline at end of file diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 4cf73e6dff8aa534e87145a3e1cb04e48d759c65..1be79b7bbd32a20c5f784ad10ca5aeff877eaac5 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -18,7 +18,7 @@ #include "tsocket.h" #include "taoserror.h" -int taosGetFqdn(char *fqdn) { +int32_t taosGetFqdn(char *fqdn) { char hostname[1024]; hostname[1023] = '\0'; if (gethostname(hostname, 1023) == -1) { @@ -26,10 +26,10 @@ int taosGetFqdn(char *fqdn) { return -1; } - struct addrinfo hints = {0}; + struct addrinfo hints = {0}; struct addrinfo *result = NULL; hints.ai_flags = AI_CANONNAME; - int ret = getaddrinfo(hostname, NULL, &hints, &result); + int32_t ret = getaddrinfo(hostname, NULL, &hints, &result); if (!result) { uError("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); return -1; @@ -49,28 +49,28 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) { int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result); if (result) { - struct sockaddr *sa = result->ai_addr; - struct sockaddr_in *si = (struct sockaddr_in*)sa; - struct in_addr ia = si->sin_addr; - uint32_t ip = ia.s_addr; + struct sockaddr * sa = result->ai_addr; + struct sockaddr_in *si = (struct sockaddr_in *)sa; + struct in_addr ia = si->sin_addr; + uint32_t ip = ia.s_addr; freeaddrinfo(result); return ip; } else { #ifdef EAI_SYSTEM if (ret == EAI_SYSTEM) { - uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, strerror(errno)); + uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); } else { - uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret)); + uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret)); } #else - uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret)); + uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret)); #endif return 0xFFFFFFFF; } } -// Function converting an IP address string to an unsigned int. +// Function converting an IP address string to an uint32_t. uint32_t ip2uint(const char *const ip_addr) { char ip_addr_cpy[20]; char ip[5]; @@ -81,7 +81,7 @@ uint32_t ip2uint(const char *const ip_addr) { s_start = ip_addr_cpy; s_end = ip_addr_cpy; - int k; + int32_t k; for (k = 0; *s_start != '\0'; s_start = s_end) { for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) { @@ -95,19 +95,19 @@ uint32_t ip2uint(const char *const ip_addr) { ip[k] = '\0'; - return *((unsigned int *)ip); + return *((uint32_t *)ip); } -int taosWriteMsg(SOCKET fd, void *buf, int nbytes) { - int nleft, nwritten; - char *ptr = (char *)buf; +int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) { + int32_t nleft, nwritten; + char * ptr = (char *)buf; nleft = nbytes; while (nleft > 0) { - nwritten = (int)taosWriteSocket(fd, (char *)ptr, (size_t)nleft); + nwritten = (int32_t)taosWriteSocket(fd, (char *)ptr, (size_t)nleft); if (nwritten <= 0) { - if (errno == EINTR) + if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) continue; else return -1; @@ -120,20 +120,20 @@ int taosWriteMsg(SOCKET fd, void *buf, int nbytes) { return (nbytes - nleft); } -int taosReadMsg(SOCKET fd, void *buf, int nbytes) { - int nleft, nread; - char *ptr = (char *)buf; +int32_t taosReadMsg(SOCKET fd, void *buf, int32_t nbytes) { + int32_t nleft, nread; + char * ptr = (char *)buf; nleft = nbytes; if (fd < 0) return -1; while (nleft > 0) { - nread = (int)taosReadSocket(fd, ptr, (size_t)nleft); + nread = (int32_t)taosReadSocket(fd, ptr, (size_t)nleft); if (nread == 0) { break; } else if (nread < 0) { - if (errno == EINTR) { + if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { continue; } else { return -1; @@ -147,11 +147,11 @@ int taosReadMsg(SOCKET fd, void *buf, int nbytes) { return (nbytes - nleft); } -int taosNonblockwrite(SOCKET fd, char *ptr, int nbytes) { +int32_t taosNonblockwrite(SOCKET fd, char *ptr, int32_t nbytes) { taosSetNonblocking(fd, 1); - int nleft, nwritten, nready; - fd_set fset; + int32_t nleft, nwritten, nready; + fd_set fset; struct timeval tv; nleft = nbytes; @@ -160,7 +160,7 @@ int taosNonblockwrite(SOCKET fd, char *ptr, int nbytes) { tv.tv_usec = 0; FD_ZERO(&fset); FD_SET(fd, &fset); - if ((nready = select((int)(fd + 1), NULL, &fset, NULL, &tv)) == 0) { + if ((nready = select((int32_t)(fd + 1), NULL, &fset, NULL, &tv)) == 0) { errno = ETIMEDOUT; uError("fd %d timeout, no enough space to write", fd); break; @@ -172,7 +172,7 @@ int taosNonblockwrite(SOCKET fd, char *ptr, int nbytes) { return -1; } - nwritten = (int)taosSend(fd, ptr, (size_t)nleft, MSG_NOSIGNAL); + nwritten = (int32_t)taosSend(fd, ptr, (size_t)nleft, MSG_NOSIGNAL); if (nwritten <= 0) { if (errno == EAGAIN || errno == EINTR) continue; @@ -189,10 +189,10 @@ int taosNonblockwrite(SOCKET fd, char *ptr, int nbytes) { return (nbytes - nleft); } -int taosReadn(SOCKET fd, char *ptr, int nbytes) { - int nread, nready, nleft = nbytes; +int32_t taosReadn(SOCKET fd, char *ptr, int32_t nbytes) { + int32_t nread, nready, nleft = nbytes; - fd_set fset; + fd_set fset; struct timeval tv; while (nleft > 0) { @@ -200,7 +200,7 @@ int taosReadn(SOCKET fd, char *ptr, int nbytes) { tv.tv_usec = 0; FD_ZERO(&fset); FD_SET(fd, &fset); - if ((nready = select((int)(fd + 1), NULL, &fset, NULL, &tv)) == 0) { + if ((nready = select((int32_t)(fd + 1), NULL, &fset, NULL, &tv)) == 0) { errno = ETIMEDOUT; uError("fd %d timeout\n", fd); break; @@ -210,7 +210,7 @@ int taosReadn(SOCKET fd, char *ptr, int nbytes) { return -1; } - if ((nread = (int)taosReadSocket(fd, ptr, (size_t)nleft)) < 0) { + if ((nread = (int32_t)taosReadSocket(fd, ptr, (size_t)nleft)) < 0) { if (errno == EINTR) continue; uError("read error, %d (%s)", errno, strerror(errno)); return -1; @@ -229,8 +229,8 @@ int taosReadn(SOCKET fd, char *ptr, int nbytes) { SOCKET taosOpenUdpSocket(uint32_t ip, uint16_t port) { struct sockaddr_in localAddr; - SOCKET sockFd; - int bufSize = 1024000; + SOCKET sockFd; + int32_t bufSize = 1024000; uDebug("open udp socket:0x%x:%hu", ip, port); @@ -239,7 +239,7 @@ SOCKET taosOpenUdpSocket(uint32_t ip, uint16_t port) { localAddr.sin_addr.s_addr = ip; localAddr.sin_port = (uint16_t)htons(port); - if ((sockFd = (int)socket(AF_INET, SOCK_DGRAM, 0)) <= 2) { + if ((sockFd = (int32_t)socket(AF_INET, SOCK_DGRAM, 0)) <= 2) { uError("failed to open udp socket: %d (%s)", errno, strerror(errno)); taosCloseSocketNoCheck(sockFd); return -1; @@ -268,9 +268,9 @@ SOCKET taosOpenUdpSocket(uint32_t ip, uint16_t port) { } SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clientIp) { - SOCKET sockFd = 0; + SOCKET sockFd = 0; + int32_t ret; struct sockaddr_in serverAddr, clientAddr; - int ret; sockFd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -281,7 +281,7 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie } /* set REUSEADDR option, so the portnumber can be re-used */ - int reuse = 1; + int32_t reuse = 1; if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) { uError("setsockopt SO_REUSEADDR failed: %d (%s)", errno, strerror(errno)); taosCloseSocket(sockFd); @@ -296,8 +296,8 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie /* bind socket to client address */ if (bind(sockFd, (struct sockaddr *)&clientAddr, sizeof(clientAddr)) < 0) { - uError("bind tcp client socket failed, client(0x%x:0), dest(0x%x:%d), reason:(%s)", - clientIp, destIp, destPort, strerror(errno)); + uError("bind tcp client socket failed, client(0x%x:0), dest(0x%x:%d), reason:(%s)", clientIp, destIp, destPort, + strerror(errno)); taosCloseSocket(sockFd); return -1; } @@ -311,7 +311,7 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); if (ret != 0) { - //uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); + // uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); taosCloseSocket(sockFd); sockFd = -1; } else { @@ -321,36 +321,36 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie return sockFd; } -int taosKeepTcpAlive(SOCKET sockFd) { - int alive = 1; +int32_t taosKeepTcpAlive(SOCKET sockFd) { + int32_t alive = 1; if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_KEEPALIVE, (void *)&alive, sizeof(alive)) < 0) { uError("fd:%d setsockopt SO_KEEPALIVE failed: %d (%s)", sockFd, errno, strerror(errno)); taosCloseSocket(sockFd); return -1; } - int probes = 3; + int32_t probes = 3; if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPCNT, (void *)&probes, sizeof(probes)) < 0) { uError("fd:%d setsockopt SO_KEEPCNT failed: %d (%s)", sockFd, errno, strerror(errno)); taosCloseSocket(sockFd); return -1; } - int alivetime = 10; + int32_t alivetime = 10; if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPIDLE, (void *)&alivetime, sizeof(alivetime)) < 0) { uError("fd:%d setsockopt SO_KEEPIDLE failed: %d (%s)", sockFd, errno, strerror(errno)); taosCloseSocket(sockFd); return -1; } - int interval = 3; + int32_t interval = 3; if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPINTVL, (void *)&interval, sizeof(interval)) < 0) { uError("fd:%d setsockopt SO_KEEPINTVL failed: %d (%s)", sockFd, errno, strerror(errno)); taosCloseSocket(sockFd); return -1; } - int nodelay = 1; + int32_t nodelay = 1; if (taosSetSockOpt(sockFd, IPPROTO_TCP, TCP_NODELAY, (void *)&nodelay, sizeof(nodelay)) < 0) { uError("fd:%d setsockopt TCP_NODELAY failed %d (%s)", sockFd, errno, strerror(errno)); taosCloseSocket(sockFd); @@ -371,8 +371,8 @@ int taosKeepTcpAlive(SOCKET sockFd) { SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { struct sockaddr_in serverAdd; - SOCKET sockFd; - int reuse; + SOCKET sockFd; + int32_t reuse; uDebug("open tcp server socket:0x%x:%hu", ip, port); @@ -381,7 +381,7 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { serverAdd.sin_addr.s_addr = ip; serverAdd.sin_port = (uint16_t)htons(port); - if ((sockFd = (int)socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) <= 2) { + if ((sockFd = (int32_t)socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) <= 2) { uError("failed to open TCP socket: %d (%s)", errno, strerror(errno)); taosCloseSocketNoCheck(sockFd); return -1; @@ -417,38 +417,38 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { return sockFd; } -void tinet_ntoa(char *ipstr, unsigned int ip) { +void tinet_ntoa(char *ipstr, uint32_t ip) { sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24); } #define COPY_SIZE 32768 // sendfile shall be used -int taosCopyFds(SOCKET sfd, SOCKET dfd, int64_t len) { +int32_t taosCopyFds(SOCKET sfd, SOCKET dfd, int64_t len) { int64_t leftLen; - int readLen, writeLen; + int32_t readLen, writeLen; char temp[COPY_SIZE]; leftLen = len; while (leftLen > 0) { if (leftLen < COPY_SIZE) - readLen = (int)leftLen; + readLen = (int32_t)leftLen; else readLen = COPY_SIZE; // 4K - int retLen = taosReadMsg(sfd, temp, (int)readLen); + int32_t retLen = taosReadMsg(sfd, temp, (int32_t)readLen); if (readLen != retLen) { - uError("read error, readLen:%d retLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, retLen, len, leftLen, - strerror(errno)); + uError("read error, readLen:%d retLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, retLen, len, + leftLen, strerror(errno)); return -1; } writeLen = taosWriteMsg(dfd, temp, readLen); if (readLen != writeLen) { - uError("copy error, readLen:%d writeLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, writeLen, len, leftLen, - strerror(errno)); + uError("copy error, readLen:%d writeLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, writeLen, + len, leftLen, strerror(errno)); return -1; } diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index 6c4af437b27b6623aeebb404e0dad1c70241cf47..451976f563740b4ac933766d5c0c8f4075ad42d8 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -326,6 +326,7 @@ int32_t taosHexStrToByteArray(char hexstr[], char bytes[]) { return 0; } +// TODO move to comm module bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { if (versionStr == NULL || versionNubmer == NULL) { return false; @@ -376,7 +377,8 @@ int taosCheckVersion(char *input_client_version, char *input_server_version, int for(int32_t i = 0; i < comparedSegments; ++i) { if (clientVersionNumber[i] != serverVersionNumber[i]) { - uError("the %d-th number of server version:%s not matched with client version:%s", i, server_version, version); + uError("the %d-th number of server version:%s not matched with client version:%s", i, server_version, + client_version); return TSDB_CODE_TSC_INVALID_VERSION; } } diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 8687a8005ddeda7320c60c9ef90dd221f56b971f..0c96ed2a2f3dfb7f03268c9f8fbb1b0afa2397b9 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -9,7 +9,22 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - + + LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(utilTest tutil common osdetail gtest pthread gcov) + + LIST(APPEND BIN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) + ADD_EXECUTABLE(trefTest ${BIN_SRC}) + TARGET_LINK_LIBRARIES(trefTest common tutil) + ENDIF() + +#IF (TD_LINUX) +# ADD_EXECUTABLE(trefTest ./trefTest.c) +# TARGET_LINK_LIBRARIES(trefTest tutil common) +#ENDIF () + +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc) + + diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index 77174f69fda1a49975c44717d8993996274ca4c9..2203ae8e4fc75c2aa0267055f79884f05e3a65e9 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -59,7 +59,7 @@ void doubleSkipListTest() { } if (size > 0) { - taosTFree(pNodes); + tfree(pNodes); } } @@ -196,7 +196,7 @@ void stringKeySkiplistTest() { tSkipListRemoveNode(pSkipList, pres[0]); if (num > 0) { - taosTFree(pres); + tfree(pres); } } @@ -247,7 +247,7 @@ void skiplistPerformanceTest() { printf("total:%" PRIu64 " ms, avg:%f\n", e - s, (e - s) / (double)size); printf("max level of skiplist:%d, actually level:%d\n ", pSkipList->maxLevel, pSkipList->level); - assert(tSkipListGetSize(pSkipList) == size); + assert(SL_GET_SIZE(pSkipList) == size); // printf("the level of skiplist is:\n"); // @@ -273,10 +273,10 @@ void skiplistPerformanceTest() { int64_t et = taosGetTimestampMs(); printf("delete %d data from skiplist, elapased time:%" PRIu64 "ms\n", 10000, et - st); - assert(tSkipListGetSize(pSkipList) == size); + assert(SL_GET_SIZE(pSkipList) == size); tSkipListDestroy(pSkipList); - taosTFree(total); + tfree(total); } // todo not support duplicated key yet @@ -357,7 +357,7 @@ TEST(testCase, skiplist_test) { printf("-----%lf\n", pNodes[i]->key.dKey); } printf("the range query result size is: %d\n", size); - taosTFree(pNodes); + tfree(pNodes); SSkipListKey *pKeys = malloc(sizeof(SSkipListKey) * 20); for (int32_t i = 0; i < 8; i += 2) { @@ -371,7 +371,7 @@ TEST(testCase, skiplist_test) { for (int32_t i = 0; i < r; ++i) { // printf("%lf ", pNodes[i]->key.dKey); } - taosTFree(pNodes); + tfree(pNodes); free(pKeys);*/ } diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c new file mode 100644 index 0000000000000000000000000000000000000000..454860410b4dd0c3fdf65c4b7fd7950a1a7d4446 --- /dev/null +++ b/src/util/tests/trefTest.c @@ -0,0 +1,195 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "tref.h" +#include "tlog.h" +#include "tglobal.h" +#include "taoserror.h" +#include "tulog.h" + +typedef struct { + int refNum; + int steps; + int rsetId; + int64_t rid; + void **p; +} SRefSpace; + +void iterateRefs(int rsetId) { + int count = 0; + + void *p = taosIterateRef(rsetId, NULL); + while (p) { + // process P + count++; + p = taosIterateRef(rsetId, p); + } + + printf(" %d ", count); +} + +void *addRef(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + int id; + int64_t rid; + + for (int i=0; i < pSpace->steps; ++i) { + printf("a"); + id = random() % pSpace->refNum; + if (pSpace->rid[id] <= 0) { + pSpace->p[id] = malloc(128); + pSpace->rid[id] = taosAddRef(pSpace->rsetId, pSpace->p[id]); + } + usleep(100); + } + + return NULL; +} + +void *removeRef(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + int id; + int64_t rid; + + for (int i=0; i < pSpace->steps; ++i) { + printf("d"); + id = random() % pSpace->refNum; + if (pSpace->rid[id] > 0) { + code = taosRemoveRef(pSpace->rsetId, pSpace->rid[id]); + if (code == 0) pSpace->rid[id] = 0; + } + + usleep(100); + } + + return NULL; +} + +void *acquireRelease(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + int id; + int64_t rid; + + for (int i=0; i < pSpace->steps; ++i) { + printf("a"); + + id = random() % pSpace->refNum; + void *p = taosAcquireRef(pSpace->rsetId, pSpace->p[id]); + if (p) { + usleep(id % 5 + 1); + taosReleaseRef(pSpace->rsetId, pSpace->p[id]); + } + } + + return NULL; +} + +void myfree(void *p) { + free(p); +} + +void *openRefSpace(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + + printf("c"); + pSpace->rsetId = taosOpenRef(50, myfree); + + if (pSpace->rsetId < 0) { + printf("failed to open ref, reson:%s\n", tstrerror(pSpace->rsetId)); + return NULL; + } + + pSpace->p = (void **) calloc(sizeof(void *), pSpace->refNum); + + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + pthread_t thread1, thread2, thread3; + pthread_create(&(thread1), &thattr, addRef, (void *)(pSpace)); + pthread_create(&(thread2), &thattr, removeRef, (void *)(pSpace)); + pthread_create(&(thread3), &thattr, acquireRelease, (void *)(pSpace)); + + pthread_join(thread1, NULL); + pthread_join(thread2, NULL); + pthread_join(thread3, NULL); + + for (int i=0; irefNum; ++i) { + taosRemoveRef(pSpace->rsetId, pSpace->rid[i]); + } + + taosCloseRef(pSpace->rsetId); + + uInfo("rsetId:%d main thread exit", pSpace->rsetId); + free(pSpace->p); + pSpace->p = NULL; + + return NULL; +} + +int main(int argc, char *argv[]) { + int refNum = 100; + int threads = 10; + int steps = 10000; + int loops = 1; + + uDebugFlag = 143; + + for (int i=1; i + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_CFG_H +#define TDENGINE_VNODE_CFG_H + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t vnodeReadCfg(SVnodeObj *pVnode); +int32_t vnodeWriteCfg(SCreateVnodeMsg *pVnodeCfg); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index 169334c6119a35f53fd0a69dd1f7cb6952fbb727..021831a644e1da3d1de882e14852d1fa5e58bc1d 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -37,17 +37,20 @@ extern int32_t vDebugFlag; typedef struct { int32_t vgId; // global vnode group ID int32_t refCount; // reference count - int32_t delay; + int32_t queuedWMsg; + int32_t queuedRMsg; + int32_t delayMs; int8_t status; int8_t role; int8_t accessState; - int64_t version; // current version - int64_t fversion; // version on saved data file + int8_t isFull; + uint64_t version; // current version + uint64_t fversion; // version on saved data file void *wqueue; void *rqueue; void *wal; void *tsdb; - void *sync; + int64_t sync; void *events; void *cq; // continuous query int32_t cfgVersion; @@ -58,11 +61,9 @@ typedef struct { char *rootDir; tsem_t sem; int8_t dropped; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; } SVnodeObj; -int vnodeWriteToQueue(void *param, void *pHead, int type); -int vnodeWriteCqMsgToQueue(void *param, void *pHead, int type); void vnodeInitWriteFp(void); void vnodeInitReadFp(void); diff --git a/src/vnode/inc/vnodeVersion.h b/src/vnode/inc/vnodeVersion.h new file mode 100644 index 0000000000000000000000000000000000000000..1d086cb21fdab0247038c7f3d32d89b38c19d871 --- /dev/null +++ b/src/vnode/inc/vnodeVersion.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_VERSION_H +#define TDENGINE_VNODE_VERSION_H + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t vnodeReadVersion(SVnodeObj *pVnode); +int32_t vnodeSaveVersion(SVnodeObj *pVnode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/src/vnodeCfg.c b/src/vnode/src/vnodeCfg.c new file mode 100644 index 0000000000000000000000000000000000000000..2d56157328714a5ae4b8ac05abe3ac4468e361cb --- /dev/null +++ b/src/vnode/src/vnodeCfg.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taosmsg.h" +#include "taoserror.h" +#include "cJSON.h" +#include "tglobal.h" +#include "tsdb.h" +#include "dnode.h" +#include "vnodeInt.h" +#include "vnodeCfg.h" + +static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) { + tstrncpy(pVnode->db, vnodeMsg->db, sizeof(pVnode->db)); + pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion; + pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize; + pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks; + pVnode->tsdbCfg.daysPerFile = vnodeMsg->cfg.daysPerFile; + pVnode->tsdbCfg.keep = vnodeMsg->cfg.daysToKeep; + pVnode->tsdbCfg.keep1 = vnodeMsg->cfg.daysToKeep1; + pVnode->tsdbCfg.keep2 = vnodeMsg->cfg.daysToKeep2; + pVnode->tsdbCfg.minRowsPerFileBlock = vnodeMsg->cfg.minRowsPerFileBlock; + pVnode->tsdbCfg.maxRowsPerFileBlock = vnodeMsg->cfg.maxRowsPerFileBlock; + pVnode->tsdbCfg.precision = vnodeMsg->cfg.precision; + pVnode->tsdbCfg.compression = vnodeMsg->cfg.compression; + pVnode->walCfg.walLevel = vnodeMsg->cfg.walLevel; + pVnode->walCfg.fsyncPeriod = vnodeMsg->cfg.fsyncPeriod; + pVnode->walCfg.keep = TAOS_WAL_NOT_KEEP; + pVnode->syncCfg.replica = vnodeMsg->cfg.replications; + pVnode->syncCfg.quorum = vnodeMsg->cfg.quorum; + + for (int i = 0; i < pVnode->syncCfg.replica; ++i) { + SVnodeDesc *node = &vnodeMsg->nodes[i]; + pVnode->syncCfg.nodeInfo[i].nodeId = node->nodeId; + taosGetFqdnPortFromEp(node->nodeEp, pVnode->syncCfg.nodeInfo[i].nodeFqdn, &pVnode->syncCfg.nodeInfo[i].nodePort); + pVnode->syncCfg.nodeInfo[i].nodePort += TSDB_PORT_SYNC; + } + + vInfo("vgId:%d, load vnode cfg successfully, replcia:%d", pVnode->vgId, pVnode->syncCfg.replica); + for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) { + SNodeInfo *node = &pVnode->syncCfg.nodeInfo[i]; + vInfo("vgId:%d, dnode:%d, %s:%u", pVnode->vgId, node->nodeId, node->nodeFqdn, node->nodePort); + } +} + +int32_t vnodeReadCfg(SVnodeObj *pVnode) { + int32_t ret = TSDB_CODE_VND_APP_ERROR; + int32_t len = 0; + int maxLen = 1000; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + bool nodeChanged = false; + SCreateVnodeMsg vnodeMsg; + + char file[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(file, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId); + + vnodeMsg.cfg.vgId = pVnode->vgId; + + fp = fopen(file, "r"); + if (!fp) { + vError("vgId:%d, failed to open vnode cfg file:%s to read, error:%s", pVnode->vgId, file, strerror(errno)); + ret = TAOS_SYSTEM_ERROR(errno); + goto PARSE_VCFG_ERROR; + } + + len = fread(content, 1, maxLen, fp); + if (len <= 0) { + vError("vgId:%d, failed to read %s, content is null", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + + content[len] = 0; + root = cJSON_Parse(content); + if (root == NULL) { + vError("vgId:%d, failed to read %s, invalid json format", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + + cJSON *db = cJSON_GetObjectItem(root, "db"); + if (!db || db->type != cJSON_String || db->valuestring == NULL) { + vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + tstrncpy(vnodeMsg.db, db->valuestring, sizeof(vnodeMsg.db)); + + cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion"); + if (!cfgVersion || cfgVersion->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, cfgVersion not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.cfgVersion = cfgVersion->valueint; + + cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize"); + if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, cacheBlockSize not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.cacheBlockSize = cacheBlockSize->valueint; + + cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks"); + if (!totalBlocks || totalBlocks->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, totalBlocks not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.totalBlocks = totalBlocks->valueint; + + cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); + if (!daysPerFile || daysPerFile->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, daysPerFile not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.daysPerFile = daysPerFile->valueint; + + cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep"); + if (!daysToKeep || daysToKeep->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, daysToKeep not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.daysToKeep = daysToKeep->valueint; + + cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1"); + if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, daysToKeep1 not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.daysToKeep1 = daysToKeep1->valueint; + + cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2"); + if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, daysToKeep2 not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.daysToKeep2 = daysToKeep2->valueint; + + cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); + if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, minRowsPerFileBlock not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint; + + cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); + if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, maxRowsPerFileBlock not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint; + + cJSON *precision = cJSON_GetObjectItem(root, "precision"); + if (!precision || precision->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, precision not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.precision = (int8_t)precision->valueint; + + cJSON *compression = cJSON_GetObjectItem(root, "compression"); + if (!compression || compression->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, compression not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.compression = (int8_t)compression->valueint; + + cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel"); + if (!walLevel || walLevel->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, walLevel not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.walLevel = (int8_t)walLevel->valueint; + + cJSON *fsyncPeriod = cJSON_GetObjectItem(root, "fsync"); + if (!walLevel || walLevel->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, fsyncPeriod not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.fsyncPeriod = fsyncPeriod->valueint; + + cJSON *wals = cJSON_GetObjectItem(root, "wals"); + if (!wals || wals->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, wals not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.wals = (int8_t)wals->valueint; + + cJSON *replica = cJSON_GetObjectItem(root, "replica"); + if (!replica || replica->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, replica not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.replications = (int8_t)replica->valueint; + + cJSON *quorum = cJSON_GetObjectItem(root, "quorum"); + if (!quorum || quorum->type != cJSON_Number) { + vError("vgId: %d, failed to read %s, quorum not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + vnodeMsg.cfg.quorum = (int8_t)quorum->valueint; + + cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); + if (!nodeInfos || nodeInfos->type != cJSON_Array) { + vError("vgId:%d, failed to read %s, nodeInfos not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + + int size = cJSON_GetArraySize(nodeInfos); + if (size != vnodeMsg.cfg.replications) { + vError("vgId:%d, failed to read %s, nodeInfos size not matched", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + + for (int i = 0; i < size; ++i) { + cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); + if (nodeInfo == NULL) continue; + SVnodeDesc *node = &vnodeMsg.nodes[i]; + + cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); + if (!nodeId || nodeId->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, nodeId not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + node->nodeId = nodeId->valueint; + + cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp"); + if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) { + vError("vgId:%d, failed to read %s, nodeFqdn not found", pVnode->vgId, file); + goto PARSE_VCFG_ERROR; + } + tstrncpy(node->nodeEp, nodeEp->valuestring, TSDB_EP_LEN); + + if (!nodeChanged) { + nodeChanged = dnodeCheckEpChanged(node->nodeId, node->nodeEp); + } + } + + ret = TSDB_CODE_SUCCESS; + +PARSE_VCFG_ERROR: + if (content != NULL) free(content); + if (root != NULL) cJSON_Delete(root); + if (fp != NULL) fclose(fp); + + if (nodeChanged) { + vnodeWriteCfg(&vnodeMsg); + } + + if (ret == TSDB_CODE_SUCCESS) { + vnodeLoadCfg(pVnode, &vnodeMsg); + } + + terrno = 0; + return ret; +} + +int32_t vnodeWriteCfg(SCreateVnodeMsg *pMsg) { + char file[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(file, "%s/vnode%d/config.json", tsVnodeDir, pMsg->cfg.vgId); + + FILE *fp = fopen(file, "w"); + if (!fp) { + vError("vgId:%d, failed to write %s error:%s", pMsg->cfg.vgId, file, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return terrno; + } + + int32_t len = 0; + int32_t maxLen = 1000; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pMsg->db); + len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pMsg->cfg.cfgVersion); + len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pMsg->cfg.cacheBlockSize); + len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pMsg->cfg.totalBlocks); + len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pMsg->cfg.daysPerFile); + len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pMsg->cfg.daysToKeep); + len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pMsg->cfg.daysToKeep1); + len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pMsg->cfg.daysToKeep2); + len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pMsg->cfg.minRowsPerFileBlock); + len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pMsg->cfg.maxRowsPerFileBlock); + len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pMsg->cfg.precision); + len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pMsg->cfg.compression); + len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pMsg->cfg.walLevel); + len += snprintf(content + len, maxLen - len, " \"fsync\": %d,\n", pMsg->cfg.fsyncPeriod); + len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pMsg->cfg.replications); + len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pMsg->cfg.wals); + len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pMsg->cfg.quorum); + len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); + for (int32_t i = 0; i < pMsg->cfg.replications; i++) { + SVnodeDesc *node = &pMsg->nodes[i]; + dnodeUpdateEp(node->nodeId, node->nodeEp, NULL, NULL); + len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", node->nodeId); + len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", node->nodeEp); + if (i < pMsg->cfg.replications - 1) { + len += snprintf(content + len, maxLen - len, " },{\n"); + } else { + len += snprintf(content + len, maxLen - len, " }]\n"); + } + } + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fflush(fp); + fclose(fp); + free(content); + terrno = 0; + + vInfo("vgId:%d, successed to write %s", pMsg->cfg.vgId, file); + return TSDB_CODE_SUCCESS; +} diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index e529f27f55abe99e0a842fb839f2916a0f303bfc..3f20efb150a8607a39fb7df11f387a46bc97f011 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -15,45 +15,35 @@ #define _DEFAULT_SOURCE #include "os.h" - -#include "tcache.h" -#include "cJSON.h" -#include "dnode.h" -#include "hash.h" #include "taoserror.h" #include "taosmsg.h" #include "tglobal.h" #include "trpc.h" #include "tsdb.h" -#include "ttimer.h" #include "tutil.h" #include "vnode.h" #include "vnodeInt.h" #include "query.h" #include "dnode.h" +#include "vnodeCfg.h" +#include "vnodeVersion.h" -#define TSDB_VNODE_VERSION_CONTENT_LEN 31 - -static SHashObj*tsDnodeVnodesHash; +static SHashObj*tsVnodesHash; static void vnodeCleanUp(SVnodeObj *pVnode); -static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg); -static int32_t vnodeReadCfg(SVnodeObj *pVnode); -static int32_t vnodeSaveVersion(SVnodeObj *pVnode); -static int32_t vnodeReadVersion(SVnodeObj *pVnode); -static int vnodeProcessTsdbStatus(void *arg, int status); +static int vnodeProcessTsdbStatus(void *arg, int status, int eno); static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion); -static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); +static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId); static void vnodeNotifyRole(void *ahandle, int8_t role); static void vnodeCtrlFlow(void *handle, int32_t mseconds); static int vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); #ifndef _SYNC -tsync_h syncStart(const SSyncInfo *info) { return NULL; } -int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; } -void syncStop(tsync_h shandle) {} -int32_t syncReconfig(tsync_h shandle, const SSyncCfg * cfg) { return 0; } -int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; } -void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {} +int64_t syncStart(const SSyncInfo *info) { return NULL; } +int32_t syncForwardToPeer(int64_t rid, void *pHead, void *mhandle, int qtype) { return 0; } +void syncStop(int64_t rid) {} +int32_t syncReconfig(int64_t rid, const SSyncCfg * cfg) { return 0; } +int syncGetNodesRole(int64_t rid, SNodesRole * cfg) { return 0; } +void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) {} #endif char* vnodeStatus[] = { @@ -71,26 +61,33 @@ int32_t vnodeInitResources() { vnodeInitWriteFp(); vnodeInitReadFp(); - tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); - if (tsDnodeVnodesHash == NULL) { + tsVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); + if (tsVnodesHash == NULL) { vError("failed to init vnode list"); return TSDB_CODE_VND_OUT_OF_MEMORY; } + if (tsdbInitCommitQueue(tsNumOfCommitThreads) < 0) { + vError("failed to init vnode commit queue"); + return terrno; + } + return TSDB_CODE_SUCCESS; } void vnodeCleanupResources() { - if (tsDnodeVnodesHash != NULL) { + tsdbDestroyCommitQueue(); + + if (tsVnodesHash != NULL) { vDebug("vnode list is cleanup"); - taosHashCleanup(tsDnodeVnodesHash); - tsDnodeVnodesHash = NULL; + taosHashCleanup(tsVnodesHash); + tsVnodesHash = NULL; } syncCleanUp(); } -int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { +int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { int32_t code; SVnodeObj *pVnode = vnodeAcquire(pVnodeCfg->cfg.vgId); @@ -128,7 +125,7 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { } } - code = vnodeSaveCfg(pVnodeCfg); + code = vnodeWriteCfg(pVnodeCfg); if (code != TSDB_CODE_SUCCESS) { vError("vgId:%d, failed to save vnode cfg, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code)); return code; @@ -138,13 +135,13 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize; tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks; - // tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables; tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep; tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock; tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock; tsdbCfg.precision = pVnodeCfg->cfg.precision; tsdbCfg.compression = pVnodeCfg->cfg.compression; + tsdbCfg.update = pVnodeCfg->cfg.update; char tsdbDir[TSDB_FILENAME_LEN] = {0}; sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId); @@ -176,7 +173,7 @@ int32_t vnodeDrop(int32_t vgId) { return TSDB_CODE_SUCCESS; } -int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { +int32_t vnodeAlter(void *param, SCreateVnodeMsg *pVnodeCfg) { SVnodeObj *pVnode = param; // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS @@ -186,7 +183,7 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { return TSDB_CODE_SUCCESS; } - int32_t code = vnodeSaveCfg(pVnodeCfg); + int32_t code = vnodeWriteCfg(pVnodeCfg); if (code != TSDB_CODE_SUCCESS) { pVnode->status = TAOS_VN_STATUS_READY; return code; @@ -237,6 +234,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->vgId = vnode; pVnode->status = TAOS_VN_STATUS_INIT; + pVnode->fversion = 0; pVnode->version = 0; pVnode->tsdbCfg.tsdbId = pVnode->vgId; pVnode->rootDir = strdup(rootDir); @@ -259,8 +257,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->fversion = pVnode->version; - pVnode->wqueue = dnodeAllocateVnodeWqueue(pVnode); - pVnode->rqueue = dnodeAllocateVnodeRqueue(pVnode); + pVnode->wqueue = dnodeAllocVWriteQueue(pVnode); + pVnode->rqueue = dnodeAllocVReadQueue(pVnode); if (pVnode->wqueue == NULL || pVnode->rqueue == NULL) { vnodeCleanUp(pVnode); return terrno; @@ -271,7 +269,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { strcpy(cqCfg.pass, tsInternalPass); strcpy(cqCfg.db, pVnode->db); cqCfg.vgId = vnode; - cqCfg.cqWrite = vnodeWriteCqMsgToQueue; + cqCfg.cqWrite = vnodeWriteToWQueue; pVnode->cq = cqOpen(pVnode, &cqCfg); if (pVnode->cq == NULL) { vnodeCleanUp(pVnode); @@ -291,25 +289,42 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; - } else if (terrno != TSDB_CODE_SUCCESS && pVnode->syncCfg.replica <= 1) { + } else if (terrno != TSDB_CODE_SUCCESS) { vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); - vnodeCleanUp(pVnode); - return terrno; + if (pVnode->syncCfg.replica <= 1) { + vnodeCleanUp(pVnode); + return terrno; + } else { + pVnode->fversion = 0; + pVnode->version = 0; + } } sprintf(temp, "%s/wal", rootDir); + pVnode->walCfg.vgId = pVnode->vgId; pVnode->wal = walOpen(temp, &pVnode->walCfg); if (pVnode->wal == NULL) { vnodeCleanUp(pVnode); return terrno; } - walRestore(pVnode->wal, pVnode, vnodeWriteToQueue); + walRestore(pVnode->wal, pVnode, vnodeProcessWrite); if (pVnode->version == 0) { + pVnode->fversion = 0; pVnode->version = walGetVersion(pVnode->wal); } + code = tsdbSyncCommit(pVnode->tsdb); + if (code != 0) { + vError("vgId:%d, failed to commit after restore from wal since %s", pVnode->vgId, tstrerror(code)); + vnodeCleanUp(pVnode); + return code; + } + + walRemoveAllOldFiles(pVnode->wal); + walRenew(pVnode->wal); + SSyncInfo syncInfo; syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; @@ -318,8 +333,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { syncInfo.ahandle = pVnode; syncInfo.getWalInfo = vnodeGetWalInfo; syncInfo.getFileInfo = vnodeGetFileInfo; - syncInfo.writeToCache = vnodeWriteToQueue; - syncInfo.confirmForward = dnodeSendRpcVnodeWriteRsp; + syncInfo.writeToCache = vnodeWriteToWQueue; + syncInfo.confirmForward = dnodeSendRpcVWriteRsp; syncInfo.notifyRole = vnodeNotifyRole; syncInfo.notifyFlowCtrl = vnodeCtrlFlow; syncInfo.notifyFileSynced = vnodeNotifyFileSynced; @@ -328,7 +343,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { #ifndef _SYNC pVnode->role = TAOS_SYNC_ROLE_MASTER; #else - if (pVnode->sync == NULL) { + if (pVnode->sync <= 0) { vError("vgId:%d, failed to open sync module, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); vnodeCleanUp(pVnode); @@ -346,7 +361,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->status = TAOS_VN_STATUS_READY; vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); - taosHashPut(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t), (char *)(&pVnode), sizeof(SVnodeObj *)); + tsdbIncCommitRef(pVnode->vgId); + taosHashPut(tsVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t), (char *)(&pVnode), sizeof(SVnodeObj *)); return TSDB_CODE_SUCCESS; } @@ -362,8 +378,10 @@ int32_t vnodeClose(int32_t vgId) { return 0; } -void vnodeRelease(void *pVnodeRaw) { - SVnodeObj *pVnode = pVnodeRaw; +void vnodeRelease(void *vparam) { + if (vparam == NULL) return; + SVnodeObj *pVnode = vparam; + int32_t code = 0; int32_t vgId = pVnode->vgId; int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); @@ -384,8 +402,12 @@ void vnodeRelease(void *pVnodeRaw) { pVnode->qMgmt = NULL; } + if (pVnode->wal) { + walStop(pVnode->wal); + } + if (pVnode->tsdb) { - tsdbCloseRepo(pVnode->tsdb, 1); + code = tsdbCloseRepo(pVnode->tsdb, 1); pVnode->tsdb = NULL; } @@ -397,21 +419,26 @@ void vnodeRelease(void *pVnodeRaw) { } if (pVnode->wal) { + if (code != 0) { + vError("vgId:%d, failed to commit while close tsdb repo, keep wal", pVnode->vgId); + } else { + walRemoveAllOldFiles(pVnode->wal); + } walClose(pVnode->wal); pVnode->wal = NULL; } if (pVnode->wqueue) { - dnodeFreeVnodeWqueue(pVnode->wqueue); + dnodeFreeVWriteQueue(pVnode->wqueue); pVnode->wqueue = NULL; } if (pVnode->rqueue) { - dnodeFreeVnodeRqueue(pVnode->rqueue); + dnodeFreeVReadQueue(pVnode->rqueue); pVnode->rqueue = NULL; } - taosTFree(pVnode->rootDir); + tfree(pVnode->rootDir); if (pVnode->dropped) { char rootDir[TSDB_FILENAME_LEN] = {0}; @@ -432,8 +459,9 @@ void vnodeRelease(void *pVnodeRaw) { tsem_destroy(&pVnode->sem); free(pVnode); + tsdbDecCommitRef(vgId); - int32_t count = taosHashGetSize(tsDnodeVnodesHash); + int32_t count = taosHashGetSize(tsVnodesHash); vDebug("vgId:%d, vnode is destroyed, vnodes:%d", vgId, count); } @@ -450,7 +478,7 @@ static void vnodeIncRef(void *ptNode) { } void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = taosHashGetCB(tsDnodeVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + SVnodeObj **ppVnode = taosHashGetCB(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); if (ppVnode == NULL || *ppVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; @@ -461,41 +489,11 @@ void *vnodeAcquire(int32_t vgId) { return *ppVnode; } -void *vnodeAcquireRqueue(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) return NULL; - - int32_t code = vnodeCheckRead(pVnode); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - vInfo("vgId:%d, can not provide read service, status is %s", vgId, vnodeStatus[pVnode->status]); - vnodeRelease(pVnode); - return NULL; - } - - return pVnode->rqueue; -} - -void *vnodeAcquireWqueue(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) return NULL; - - int32_t code = vnodeCheckWrite(pVnode); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - vInfo("vgId:%d, can not provide write service, status is %s", vgId, vnodeStatus[pVnode->status]); - vnodeRelease(pVnode); - return NULL; - } - - return pVnode->wqueue; -} - void *vnodeGetWal(void *pVnode) { return ((SVnodeObj *)pVnode)->wal; } -static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { +static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) { int64_t totalStorage = 0; int64_t compStorage = 0; int64_t pointsWritten = 0; @@ -519,7 +517,7 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { } int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { - SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash); + SHashMutableIterator *pIter = taosHashCreateIter(tsVnodesHash); while (taosHashIterNext(pIter)) { SVnodeObj **pVnode = taosHashIterGet(pIter); if (pVnode == NULL) continue; @@ -539,8 +537,8 @@ int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { } void vnodeBuildStatusMsg(void *param) { - SDMStatusMsg *pStatus = param; - SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash); + SStatusMsg *pStatus = param; + SHashMutableIterator *pIter = taosHashCreateIter(tsVnodesHash); while (taosHashIterNext(pIter)) { SVnodeObj **pVnode = taosHashIterGet(pIter); @@ -553,7 +551,7 @@ void vnodeBuildStatusMsg(void *param) { taosHashDestroyIter(pIter); } -void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) { +void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) { for (int32_t i = 0; i < numOfVnodes; ++i) { pAccess[i].vgId = htonl(pAccess[i].vgId); SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId); @@ -569,7 +567,7 @@ void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) { static void vnodeCleanUp(SVnodeObj *pVnode) { // remove from hash, so new messages wont be consumed - taosHashRemove(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t)); + taosHashRemove(tsVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t)); if (pVnode->status != TAOS_VN_STATUS_INIT) { // it may be in updateing or reset state, then it shall wait @@ -583,9 +581,9 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { } // stop replication module - if (pVnode->sync) { - void *sync = pVnode->sync; - pVnode->sync = NULL; + if (pVnode->sync > 0) { + int64_t sync = pVnode->sync; + pVnode->sync = -1; syncStop(sync); } @@ -597,29 +595,46 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { } // TODO: this is a simple implement -static int vnodeProcessTsdbStatus(void *arg, int status) { +static int vnodeProcessTsdbStatus(void *arg, int status, int eno) { SVnodeObj *pVnode = arg; + if (eno != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to commit since %s, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, tstrerror(eno), + pVnode->fversion, pVnode->version); + pVnode->isFull = 1; + return 0; + } + if (status == TSDB_STATUS_COMMIT_START) { - pVnode->fversion = pVnode->version; - return walRenew(pVnode->wal); + pVnode->fversion = pVnode->version; + vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); + if (pVnode->status == TAOS_VN_STATUS_INIT) { + return 0; + } else { + return walRenew(pVnode->wal); + } } - if (status == TSDB_STATUS_COMMIT_OVER) + if (status == TSDB_STATUS_COMMIT_OVER) { + vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); + pVnode->isFull = 0; + walRemoveOneOldFile(pVnode->wal); return vnodeSaveVersion(pVnode); + } - return 0; + return 0; } -static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion) { +static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, + uint64_t *fversion) { SVnodeObj *pVnode = ahandle; *fversion = pVnode->fversion; return tsdbGetFileInfo(pVnode->tsdb, name, index, eindex, size); } -static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index) { +static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId) { SVnodeObj *pVnode = ahandle; - return walGetWalFile(pVnode->wal, name, index); + return walGetWalFile(pVnode->wal, fileName, fileId); } static void vnodeNotifyRole(void *ahandle, int8_t role) { @@ -628,26 +643,27 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) { pVnode->role = role; dnodeSendStatusMsgToMnode(); - if (pVnode->role == TAOS_SYNC_ROLE_MASTER) + if (pVnode->role == TAOS_SYNC_ROLE_MASTER) { cqStart(pVnode->cq); - else + } else { cqStop(pVnode->cq); + } } static void vnodeCtrlFlow(void *ahandle, int32_t mseconds) { SVnodeObj *pVnode = ahandle; - if (pVnode->delay != mseconds) - vInfo("vgId:%d, sync flow control, mseconds:%d", pVnode->vgId, mseconds); - pVnode->delay = mseconds; + if (pVnode->delayMs != mseconds) { + pVnode->delayMs = mseconds; + vDebug("vgId:%d, sync flow control, mseconds:%d", pVnode->vgId, mseconds); + } } -static int vnodeResetTsdb(SVnodeObj *pVnode) -{ +static int vnodeResetTsdb(SVnodeObj *pVnode) { char rootDir[128] = "\0"; sprintf(rootDir, "%s/tsdb", pVnode->rootDir); - if (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_RESET) != TAOS_VN_STATUS_READY) { - return -1; + if (pVnode->status != TAOS_VN_STATUS_CLOSING && pVnode->status != TAOS_VN_STATUS_INIT) { + pVnode->status = TAOS_VN_STATUS_RESET; } void *tsdb = pVnode->tsdb; @@ -671,375 +687,19 @@ static int vnodeResetTsdb(SVnodeObj *pVnode) pVnode->tsdb = tsdbOpenRepo(rootDir, &appH); pVnode->status = TAOS_VN_STATUS_READY; - vnodeRelease(pVnode); + vnodeRelease(pVnode); return 0; } static int vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) { SVnodeObj *pVnode = ahandle; - vDebug("vgId:%d, data file is synced, fversion:%" PRId64, pVnode->vgId, fversion); pVnode->fversion = fversion; pVnode->version = fversion; vnodeSaveVersion(pVnode); + vDebug("vgId:%d, data file is synced, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, + pVnode->version); return vnodeResetTsdb(pVnode); } - -static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) { - char cfgFile[TSDB_FILENAME_LEN + 30] = {0}; - sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnodeCfg->cfg.vgId); - FILE *fp = fopen(cfgFile, "w"); - if (!fp) { - vError("vgId:%d, failed to open vnode cfg file for write, file:%s error:%s", pVnodeCfg->cfg.vgId, cfgFile, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; - } - - int32_t len = 0; - int32_t maxLen = 1000; - char * content = calloc(1, maxLen + 1); - if (content == NULL) { - fclose(fp); - return TSDB_CODE_VND_OUT_OF_MEMORY; - } - - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pVnodeCfg->db); - len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnodeCfg->cfg.cfgVersion); - len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnodeCfg->cfg.cacheBlockSize); - len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnodeCfg->cfg.totalBlocks); - // len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables); - len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile); - len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep); - len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnodeCfg->cfg.daysToKeep1); - len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnodeCfg->cfg.daysToKeep2); - len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock); - len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock); - // len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime); - len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision); - len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression); - len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnodeCfg->cfg.walLevel); - len += snprintf(content + len, maxLen - len, " \"fsync\": %d,\n", pVnodeCfg->cfg.fsyncPeriod); - len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications); - len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals); - len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); - - len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); - - vInfo("vgId:%d, save vnode cfg, replica:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.replications); - for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { - len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); - len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", pVnodeCfg->nodes[i].nodeEp); - vInfo("vgId:%d, save vnode cfg, nodeId:%d nodeEp:%s", pVnodeCfg->cfg.vgId, pVnodeCfg->nodes[i].nodeId, - pVnodeCfg->nodes[i].nodeEp); - - if (i < pVnodeCfg->cfg.replications - 1) { - len += snprintf(content + len, maxLen - len, " },{\n"); - } else { - len += snprintf(content + len, maxLen - len, " }]\n"); - } - } - len += snprintf(content + len, maxLen - len, "}\n"); - - fwrite(content, 1, len, fp); - fflush(fp); - fclose(fp); - free(content); - - vInfo("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId); - - return TSDB_CODE_SUCCESS; -} - -static int32_t vnodeReadCfg(SVnodeObj *pVnode) { - cJSON *root = NULL; - char *content = NULL; - char cfgFile[TSDB_FILENAME_LEN + 30] = {0}; - int maxLen = 1000; - - terrno = TSDB_CODE_VND_APP_ERROR; - sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId); - FILE *fp = fopen(cfgFile, "r"); - if (!fp) { - vError("vgId:%d, failed to open vnode cfg file:%s to read, error:%s", pVnode->vgId, - cfgFile, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto PARSE_OVER; - } - - content = calloc(1, maxLen + 1); - if (content == NULL) goto PARSE_OVER; - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - vError("vgId:%d, failed to read vnode cfg, content is null", pVnode->vgId); - free(content); - fclose(fp); - return errno; - } - - root = cJSON_Parse(content); - if (root == NULL) { - vError("vgId:%d, failed to read vnode cfg, invalid json format", pVnode->vgId); - goto PARSE_OVER; - } - - cJSON *db = cJSON_GetObjectItem(root, "db"); - if (!db || db->type != cJSON_String || db->valuestring == NULL) { - vError("vgId:%d, failed to read vnode cfg, db not found", pVnode->vgId); - goto PARSE_OVER; - } - strcpy(pVnode->db, db->valuestring); - - cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion"); - if (!cfgVersion || cfgVersion->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, cfgVersion not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->cfgVersion = cfgVersion->valueint; - - cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize"); - if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, cacheBlockSize not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.cacheBlockSize = cacheBlockSize->valueint; - - cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks"); - if (!totalBlocks || totalBlocks->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, totalBlocks not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint; - - // cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables"); - // if (!maxTables || maxTables->type != cJSON_Number) { - // vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId); - // goto PARSE_OVER; - // } - // pVnode->tsdbCfg.maxTables = maxTables->valueint; - - cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); - if (!daysPerFile || daysPerFile->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint; - - cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep"); - if (!daysToKeep || daysToKeep->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, daysToKeep not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.keep = daysToKeep->valueint; - - cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1"); - if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, daysToKeep1 not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.keep1 = daysToKeep1->valueint; - - cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2"); - if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, daysToKeep2 not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.keep2 = daysToKeep2->valueint; - - cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); - if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint; - - cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); - if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint; - - // cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime"); - // if (!commitTime || commitTime->type != cJSON_Number) { - // vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId); - // goto PARSE_OVER; - // } - // pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint; - - cJSON *precision = cJSON_GetObjectItem(root, "precision"); - if (!precision || precision->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, precision not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.precision = (int8_t)precision->valueint; - - cJSON *compression = cJSON_GetObjectItem(root, "compression"); - if (!compression || compression->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, compression not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->tsdbCfg.compression = (int8_t)compression->valueint; - - cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel"); - if (!walLevel || walLevel->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, walLevel not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->walCfg.walLevel = (int8_t) walLevel->valueint; - - cJSON *fsyncPeriod = cJSON_GetObjectItem(root, "fsync"); - if (!walLevel || walLevel->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, fsyncPeriod not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->walCfg.fsyncPeriod = fsyncPeriod->valueint; - - cJSON *wals = cJSON_GetObjectItem(root, "wals"); - if (!wals || wals->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, wals not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->walCfg.wals = (int8_t)wals->valueint; - pVnode->walCfg.keep = 0; - - cJSON *replica = cJSON_GetObjectItem(root, "replica"); - if (!replica || replica->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, replica not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->syncCfg.replica = (int8_t)replica->valueint; - - cJSON *quorum = cJSON_GetObjectItem(root, "quorum"); - if (!quorum || quorum->type != cJSON_Number) { - vError("vgId: %d, failed to read vnode cfg, quorum not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->syncCfg.quorum = (int8_t)quorum->valueint; - - cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); - if (!nodeInfos || nodeInfos->type != cJSON_Array) { - vError("vgId:%d, failed to read vnode cfg, nodeInfos not found", pVnode->vgId); - goto PARSE_OVER; - } - - int size = cJSON_GetArraySize(nodeInfos); - if (size != pVnode->syncCfg.replica) { - vError("vgId:%d, failed to read vnode cfg, nodeInfos size not matched", pVnode->vgId); - goto PARSE_OVER; - } - - for (int i = 0; i < size; ++i) { - cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); - if (nodeInfo == NULL) continue; - - cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); - if (!nodeId || nodeId->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode cfg, nodeId not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint; - - cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp"); - if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) { - vError("vgId:%d, failed to read vnode cfg, nodeFqdn not found", pVnode->vgId); - goto PARSE_OVER; - } - - taosGetFqdnPortFromEp(nodeEp->valuestring, pVnode->syncCfg.nodeInfo[i].nodeFqdn, &pVnode->syncCfg.nodeInfo[i].nodePort); - pVnode->syncCfg.nodeInfo[i].nodePort += TSDB_PORT_SYNC; - } - - terrno = TSDB_CODE_SUCCESS; - - vInfo("vgId:%d, read vnode cfg successfully, replcia:%d", pVnode->vgId, pVnode->syncCfg.replica); - for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) { - vInfo("vgId:%d, dnode:%d, %s:%d", pVnode->vgId, pVnode->syncCfg.nodeInfo[i].nodeId, - pVnode->syncCfg.nodeInfo[i].nodeFqdn, pVnode->syncCfg.nodeInfo[i].nodePort); - } - -PARSE_OVER: - taosTFree(content); - cJSON_Delete(root); - if (fp) fclose(fp); - return terrno; -} - -static int32_t vnodeSaveVersion(SVnodeObj *pVnode) { - char versionFile[TSDB_FILENAME_LEN + 30] = {0}; - sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); - FILE *fp = fopen(versionFile, "w"); - if (!fp) { - vError("vgId:%d, failed to open vnode version file for write, file:%s error:%s", pVnode->vgId, - versionFile, strerror(errno)); - return TAOS_SYSTEM_ERROR(errno); - } - - int32_t len = 0; - int32_t maxLen = 30; - char content[TSDB_VNODE_VERSION_CONTENT_LEN] = {0}; - - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->fversion); - len += snprintf(content + len, maxLen - len, "}\n"); - - fwrite(content, 1, len, fp); - fflush(fp); - fclose(fp); - - vInfo("vgId:%d, save vnode version:%" PRId64 " succeed", pVnode->vgId, pVnode->fversion); - - return TSDB_CODE_SUCCESS; -} - -static int32_t vnodeReadVersion(SVnodeObj *pVnode) { - char versionFile[TSDB_FILENAME_LEN + 30] = {0}; - char *content = NULL; - cJSON *root = NULL; - int maxLen = 100; - - terrno = TSDB_CODE_VND_INVALID_VRESION_FILE; - sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); - FILE *fp = fopen(versionFile, "r"); - if (!fp) { - if (errno != ENOENT) { - vError("vgId:%d, failed to open version file:%s error:%s", pVnode->vgId, versionFile, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - terrno = TSDB_CODE_SUCCESS; - } - goto PARSE_OVER; - } - - content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - vError("vgId:%d, failed to read vnode version, content is null", pVnode->vgId); - goto PARSE_OVER; - } - - root = cJSON_Parse(content); - if (root == NULL) { - vError("vgId:%d, failed to read vnode version, invalid json format", pVnode->vgId); - goto PARSE_OVER; - } - - cJSON *ver = cJSON_GetObjectItem(root, "version"); - if (!ver || ver->type != cJSON_Number) { - vError("vgId:%d, failed to read vnode version, version not found", pVnode->vgId); - goto PARSE_OVER; - } - pVnode->version = ver->valueint; - - terrno = TSDB_CODE_SUCCESS; - vInfo("vgId:%d, read vnode version successfully, version:%" PRId64, pVnode->vgId, pVnode->version); - -PARSE_OVER: - taosTFree(content); - cJSON_Delete(root); - if (fp) fclose(fp); - return terrno; -} diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 99aed03e54ccd069e5879104f62eb01ff7bb3d05..e10d62a0c9f283eded9969547fad779468d3cdaa 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -15,13 +15,10 @@ #define _DEFAULT_SOURCE #define _NON_BLOCKING_RETRIEVE 0 - #include "os.h" - #include "tglobal.h" #include "taoserror.h" #include "taosmsg.h" -#include "tcache.h" #include "query.h" #include "trpc.h" #include "tsdb.h" @@ -29,9 +26,9 @@ #include "vnodeInt.h" #include "tqueue.h" -static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SReadMsg *pReadMsg); -static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg); -static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg); +static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SVReadMsg *pRead); +static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead); +static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId); void vnodeInitReadFp(void) { @@ -44,58 +41,103 @@ void vnodeInitReadFp(void) { // still required, or there will be a deadlock, so we don’t do any check here, but put the check codes before the // request enters the queue // -int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { - SVnodeObj *pVnode = (SVnodeObj *)param; - int msgType = pReadMsg->rpcMsg.msgType; +int32_t vnodeProcessRead(void *vparam, SVReadMsg *pRead) { + SVnodeObj *pVnode = vparam; + int32_t msgType = pRead->msgType; if (vnodeProcessReadMsgFp[msgType] == NULL) { vDebug("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[msgType]); return TSDB_CODE_VND_MSG_NOT_PROCESSED; } - return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg); + return (*vnodeProcessReadMsgFp[msgType])(pVnode, pRead); } -int32_t vnodeCheckRead(void *param) { - SVnodeObj *pVnode = param; +static int32_t vnodeCheckRead(void *vparam) { + SVnodeObj *pVnode = vparam; if (pVnode->status != TAOS_VN_STATUS_READY) { - vDebug("vgId:%d, vnode status is %s, recCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], + vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } // tsdb may be in reset state if (pVnode->tsdb == NULL) { - vDebug("vgId:%d, tsdb is null, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + vDebug("vgId:%d, tsdb is null, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) { - vDebug("vgId:%d, replica:%d role:%s, recCount:%d pVnode:%p", pVnode->vgId, pVnode->syncCfg.replica, + vDebug("vgId:%d, replica:%d role:%s, refCount:%d pVnode:%p", pVnode->vgId, pVnode->syncCfg.replica, syncRole[pVnode->role], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } return TSDB_CODE_SUCCESS; } -static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void *ahandle) { - int32_t code = vnodeCheckRead(pVnode); - if (code != TSDB_CODE_SUCCESS) return code; - SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); - pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; - pRead->pCont = qhandle; - pRead->contLen = 0; - pRead->rpcMsg.ahandle = ahandle; +void vnodeFreeFromRQueue(void *vparam, SVReadMsg *pRead) { + SVnodeObj *pVnode = vparam; - atomic_add_fetch_32(&pVnode->refCount, 1); + atomic_sub_fetch_32(&pVnode->queuedRMsg, 1); + vTrace("vgId:%d, free from vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + + taosFreeQitem(pRead); + vnodeRelease(pVnode); +} + +int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) { + SVnodeObj *pVnode = vparam; + + if (qtype == TAOS_QTYPE_RPC || qtype == TAOS_QTYPE_QUERY) { + int32_t code = vnodeCheckRead(pVnode); + if (code != TSDB_CODE_SUCCESS) return code; + } + + int32_t size = sizeof(SVReadMsg) + contLen; + SVReadMsg *pRead = taosAllocateQitem(size); + if (pRead == NULL) { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + + if (rparam != NULL) { + SRpcMsg *pRpcMsg = rparam; + pRead->rpcHandle = pRpcMsg->handle; + pRead->rpcAhandle = pRpcMsg->ahandle; + pRead->msgType = pRpcMsg->msgType; + pRead->code = pRpcMsg->code; + } - vDebug("QInfo:%p add to vread queue for exec query, msg:%p", *qhandle, pRead); - taosWriteQitem(pVnode->rqueue, TAOS_QTYPE_QUERY, pRead); + if (contLen != 0) { + pRead->contLen = contLen; + memcpy(pRead->pCont, pCont, contLen); + } else { + pRead->qhandle = pCont; + } + + pRead->qtype = qtype; + atomic_add_fetch_32(&pVnode->refCount, 1); + atomic_add_fetch_32(&pVnode->queuedRMsg, 1); + vTrace("vgId:%d, write into vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + + taosWriteQitem(pVnode->rqueue, qtype, pRead); return TSDB_CODE_SUCCESS; } +static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void *ahandle) { + SRpcMsg rpcMsg = {0}; + rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; + rpcMsg.ahandle = ahandle; + + int32_t code = vnodeWriteToRQueue(pVnode, qhandle, 0, TAOS_QTYPE_QUERY, &rpcMsg); + if (code == TSDB_CODE_SUCCESS) { + vDebug("QInfo:%p add to vread queue for exec query", *qhandle); + } + + return code; +} + /** * * @param pRet response message object @@ -146,27 +188,27 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) { pRsp->completed = true; } -static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { - void * pCont = pReadMsg->pCont; - int32_t contLen = pReadMsg->contLen; - SRspRet *pRet = &pReadMsg->rspRet; +static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { + void * pCont = pRead->pCont; + int32_t contLen = pRead->contLen; + SRspRet *pRet = &pRead->rspRet; SQueryTableMsg *pQueryTableMsg = (SQueryTableMsg *)pCont; memset(pRet, 0, sizeof(SRspRet)); // qHandle needs to be freed correctly - if (pReadMsg->rpcMsg.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - SRetrieveTableMsg *killQueryMsg = (SRetrieveTableMsg *)pReadMsg->pCont; + if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + SRetrieveTableMsg *killQueryMsg = (SRetrieveTableMsg *)pRead->pCont; killQueryMsg->free = htons(killQueryMsg->free); killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); - vWarn("QInfo:%p connection %p broken, kill query", (void *)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); - assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1); + vWarn("QInfo:%p connection %p broken, kill query", (void *)killQueryMsg->qhandle, pRead->rpcHandle); + assert(pRead->contLen > 0 && killQueryMsg->free == 1); void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)killQueryMsg->qhandle); if (qhandle == NULL || *qhandle == NULL) { vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)killQueryMsg->qhandle, - pReadMsg->rpcMsg.handle); + pRead->rpcHandle); } else { assert(*qhandle == (void *)killQueryMsg->qhandle); @@ -198,7 +240,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle == NULL) { // failed to register qhandle pRsp->code = terrno; terrno = 0; - vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo, + vError("vgId:%d, QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo, tstrerror(pRsp->code)); qDestroyQueryInfo(pQInfo); // destroy it directly return pRsp->code; @@ -208,9 +250,9 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } if (handle != NULL && - vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, - pReadMsg->rpcMsg.handle); + pRead->rpcHandle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); return pRsp->code; @@ -221,7 +263,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle != NULL) { vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); - code = vnodePutItemIntoReadQueue(pVnode, handle, pReadMsg->rpcMsg.ahandle); + code = vnodePutItemIntoReadQueue(pVnode, handle, pRead->rpcHandle); if (code != TSDB_CODE_SUCCESS) { pRsp->code = code; qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); @@ -230,7 +272,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } } else { assert(pCont != NULL); - void **qhandle = (void **)pCont; + void **qhandle = (void **)pRead->qhandle; vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); @@ -242,14 +284,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // build query rsp, the retrieve request has reached here already if (buildRes) { // update the connection info according to the retrieve connection - pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*qhandle); - assert(pReadMsg->rpcMsg.handle != NULL); + pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle); + assert(pRead->rpcHandle != NULL); vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, - pReadMsg->rpcMsg.handle); + pRead->rpcHandle); // set the real rsp error code - pReadMsg->rpcMsg.code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle, pReadMsg->rpcMsg.ahandle); + pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qhandle, &freehandle, pRead->rpcHandle); // NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client code = TSDB_CODE_QRY_HAS_RSP; @@ -274,16 +316,16 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { return code; } -static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { - void * pCont = pReadMsg->pCont; - SRspRet *pRet = &pReadMsg->rspRet; +static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { + void * pCont = pRead->pCont; + SRspRet *pRet = &pRead->rspRet; SRetrieveTableMsg *pRetrieve = pCont; pRetrieve->free = htons(pRetrieve->free); pRetrieve->qhandle = htobe64(pRetrieve->qhandle); vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void *)pRetrieve->qhandle, - pRetrieve->free, pReadMsg->rpcMsg.handle); + pRetrieve->free, pRead->rpcHandle); memset(pRet, 0, sizeof(SRspRet)); @@ -298,7 +340,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } if (code != TSDB_CODE_SUCCESS) { - vDebug("vgId:%d, invalid handle in retrieving result, code:0x%08x, QInfo:%p", pVnode->vgId, code, (void *)pRetrieve->qhandle); + vError("vgId:%d, invalid handle in retrieving result, code:0x%08x, QInfo:%p", pVnode->vgId, code, (void *)pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } @@ -314,9 +356,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } // register the qhandle to connect to quit query immediate if connection is broken - if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { - vError("vgId:%d, QInfo:%p, retrieve discarded since link is broken, %p", pVnode->vgId, *handle, - pReadMsg->rpcMsg.handle); + if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%p, retrieve discarded since link is broken, %p", pVnode->vgId, *handle, pRead->rpcHandle); code = TSDB_CODE_RPC_NETWORK_UNAVAIL; qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); @@ -326,7 +367,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { bool freeHandle = true; bool buildRes = false; - code = qRetrieveQueryResultInfo(*handle, &buildRes, pReadMsg->rpcMsg.handle); + code = qRetrieveQueryResultInfo(*handle, &buildRes, pRead->rpcHandle); if (code != TSDB_CODE_SUCCESS) { // TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -337,7 +378,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { assert(buildRes == true); #if _NON_BLOCKING_RETRIEVE if (!buildRes) { - assert(pReadMsg->rpcMsg.handle != NULL); + assert(pRead->rpcHandle != NULL); qReleaseQInfo(pVnode->qMgmt, (void **)&handle, false); return TSDB_CODE_QRY_NOT_READY; @@ -345,7 +386,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { #endif // ahandle is the sqlObj pointer - code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pReadMsg->rpcMsg.ahandle); + code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pRead->rpcHandle); } // If qhandle is not added into vread queue, the query should be completed already or paused with error. diff --git a/src/vnode/src/vnodeVersion.c b/src/vnode/src/vnodeVersion.c new file mode 100644 index 0000000000000000000000000000000000000000..8f6360b4f98eac8f394f5078ed2b025cea4192b0 --- /dev/null +++ b/src/vnode/src/vnodeVersion.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "cJSON.h" +#include "tglobal.h" +#include "tsdb.h" +#include "vnodeInt.h" +#include "vnodeVersion.h" + +int32_t vnodeReadVersion(SVnodeObj *pVnode) { + int32_t len = 0; + int32_t maxLen = 100; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + + terrno = TSDB_CODE_VND_INVALID_VRESION_FILE; + char file[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(file, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); + + fp = fopen(file, "r"); + if (!fp) { + if (errno != ENOENT) { + vError("vgId:%d, failed to read %s, error:%s", pVnode->vgId, file, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + } else { + terrno = TSDB_CODE_SUCCESS; + } + goto PARSE_VER_ERROR; + } + + len = fread(content, 1, maxLen, fp); + if (len <= 0) { + vError("vgId:%d, failed to read %s, content is null", pVnode->vgId, file); + goto PARSE_VER_ERROR; + } + + root = cJSON_Parse(content); + if (root == NULL) { + vError("vgId:%d, failed to read %s, invalid json format", pVnode->vgId, file); + goto PARSE_VER_ERROR; + } + + cJSON *ver = cJSON_GetObjectItem(root, "version"); + if (!ver || ver->type != cJSON_Number) { + vError("vgId:%d, failed to read %s, version not found", pVnode->vgId, file); + goto PARSE_VER_ERROR; + } + pVnode->version = (uint64_t)ver->valueint; + + terrno = TSDB_CODE_SUCCESS; + vInfo("vgId:%d, read %s successfully, fver:%" PRIu64, pVnode->vgId, file, pVnode->version); + +PARSE_VER_ERROR: + if (content != NULL) free(content); + if (root != NULL) cJSON_Delete(root); + if (fp != NULL) fclose(fp); + + return terrno; +} + +int32_t vnodeSaveVersion(SVnodeObj *pVnode) { + char file[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(file, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); + + FILE *fp = fopen(file, "w"); + if (!fp) { + vError("vgId:%d, failed to write %s, reason:%s", pVnode->vgId, file, strerror(errno)); + return -1; + } + + int32_t len = 0; + int32_t maxLen = 100; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"version\": %" PRIu64 "\n", pVnode->fversion); + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fflush(fp); + fclose(fp); + free(content); + terrno = 0; + + vInfo("vgId:%d, successed to write %s, fver:%" PRIu64, pVnode->vgId, file, pVnode->fversion); + return TSDB_CODE_SUCCESS; +} \ No newline at end of file diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 855df81a1b9029d7ddd2f5e5ca6d42e8c680f7ac..2d2be602ed217df502101e423613c6631b400b9f 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -19,7 +19,6 @@ #include "taoserror.h" #include "tqueue.h" #include "trpc.h" -#include "tutil.h" #include "tsdb.h" #include "twal.h" #include "tsync.h" @@ -29,13 +28,15 @@ #include "syncInt.h" #include "tcq.h" -static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *, SRspRet *); -static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *); -static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *); -static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *); -static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *); -static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *); -static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet); +#define MAX_QUEUED_MSG_NUM 10000 + +static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *); +static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); +static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); +static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); +static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); +static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); +static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); void vnodeInitWriteFp(void) { vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg; @@ -46,32 +47,37 @@ void vnodeInitWriteFp(void) { vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessUpdateTagValMsg; } -int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { - int32_t code = 0; - SVnodeObj *pVnode = (SVnodeObj *)param1; - SWalHead * pHead = param2; +int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rparam) { + int32_t code = 0; + SVnodeObj * pVnode = vparam; + SWalHead * pHead = wparam; + SRspRet * pRspRet = rparam; if (vnodeProcessWriteMsgFp[pHead->msgType] == NULL) { - vDebug("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[pHead->msgType]); + vError("vgId:%d, msg:%s not processed since no handle, qtype:%s hver:%" PRIu64, pVnode->vgId, + taosMsg[pHead->msgType], qtypeStr[qtype], pHead->version); return TSDB_CODE_VND_MSG_NOT_PROCESSED; } + vTrace("vgId:%d, msg:%s will be processed in vnode, qtype:%s hver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, + taosMsg[pHead->msgType], qtypeStr[qtype], pHead->version, pVnode->version); + if (pHead->version == 0) { // from client or CQ if (pVnode->status != TAOS_VN_STATUS_READY) { - vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[pHead->msgType], - pVnode->status); + vDebug("vgId:%d, msg:%s not processed since vstatus:%d, qtype:%s hver:%" PRIu64, pVnode->vgId, + taosMsg[pHead->msgType], pVnode->status, qtypeStr[qtype], pHead->version); return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state } if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { - vDebug("vgId:%d, msgType:%s not processed, replica:%d role:%s", pVnode->vgId, taosMsg[pHead->msgType], - pVnode->syncCfg.replica, syncRole[pVnode->role]); + vDebug("vgId:%d, msg:%s not processed since replica:%d role:%s, qtype:%s hver:%" PRIu64, pVnode->vgId, + taosMsg[pHead->msgType], pVnode->syncCfg.replica, syncRole[pVnode->role], qtypeStr[qtype], pHead->version); return TSDB_CODE_APP_NOT_READY; } // assign version pHead->version = pVnode->version + 1; - if (pVnode->delay) usleep(pVnode->delay * 1000); + if (pVnode->delayMs) taosMsleep(pVnode->delayMs); } else { // from wal or forward // for data from WAL or forward, version may be smaller @@ -80,7 +86,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { // forward to peers, even it is WAL/FWD, it shall be called to update version in sync int32_t syncCode = 0; - syncCode = syncForwardToPeer(pVnode->sync, pHead, item, qtype); + syncCode = syncForwardToPeer(pVnode->sync, pHead, pRspRet, qtype); if (syncCode < 0) return syncCode; // write into WAL @@ -90,36 +96,41 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { pVnode->version = pHead->version; // write data locally - code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, item); + code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, pRspRet); if (code < 0) return code; return syncCode; } -int32_t vnodeCheckWrite(void *param) { - SVnodeObj *pVnode = param; +static int32_t vnodeCheckWrite(void *vparam) { + SVnodeObj *pVnode = vparam; if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { - vDebug("vgId:%d, no write auth, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + vDebug("vgId:%d, no write auth, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); return TSDB_CODE_VND_NO_WRITE_AUTH; } // tsdb may be in reset state if (pVnode->tsdb == NULL) { - vDebug("vgId:%d, tsdb is null, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + vDebug("vgId:%d, tsdb is null, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } if (pVnode->status == TAOS_VN_STATUS_CLOSING) { - vDebug("vgId:%d, vnode status is %s, recCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], + vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } + if (pVnode->isFull) { + vDebug("vgId:%d, vnode is full, refCount:%d", pVnode->vgId, pVnode->refCount); + return TSDB_CODE_VND_IS_FULL; + } + return TSDB_CODE_SUCCESS; } -void vnodeConfirmForward(void *param, uint64_t version, int32_t code) { - SVnodeObj *pVnode = (SVnodeObj *)param; +void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code) { + SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code); } @@ -183,8 +194,8 @@ static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet } static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { - SMDDropSTableMsg *pTable = pCont; - int32_t code = TSDB_CODE_SUCCESS; + SDropSTableMsg *pTable = pCont; + int32_t code = TSDB_CODE_SUCCESS; vDebug("vgId:%d, stable:%s, start to drop", pVnode->vgId, pTable->tableId); @@ -204,37 +215,54 @@ static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspR return TSDB_CODE_SUCCESS; } +int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SVnodeObj *pVnode = vparam; + SWalHead * pHead = wparam; -int vnodeWriteCqMsgToQueue(void *param, void *data, int type) { - SVnodeObj *pVnode = param; - SWalHead * pHead = data; + if (qtype == TAOS_QTYPE_RPC) { + int32_t code = vnodeCheckWrite(pVnode); + if (code != TSDB_CODE_SUCCESS) return code; + } - int size = sizeof(SWalHead) + pHead->len; - SSyncHead *pSync = (SSyncHead*) taosAllocateQitem(size + sizeof(SSyncHead)); - SWalHead *pWal = (SWalHead *)(pSync + 1); - memcpy(pWal, pHead, size); + if (pHead->len > TSDB_MAX_WAL_SIZE) { + vError("vgId:%d, wal len:%d exceeds limit, hver:%" PRIu64, pVnode->vgId, pHead->len, pHead->version); + return TSDB_CODE_WAL_SIZE_LIMIT; + } - atomic_add_fetch_32(&pVnode->refCount, 1); - vTrace("CQ: vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + int32_t size = sizeof(SVWriteMsg) + sizeof(SWalHead) + pHead->len; + SVWriteMsg *pWrite = taosAllocateQitem(size); + if (pWrite == NULL) { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } - taosWriteQitem(pVnode->wqueue, type, pSync); + if (rparam != NULL) { + SRpcMsg *pRpcMsg = rparam; + pWrite->rpcHandle = pRpcMsg->handle; + pWrite->rpcAhandle = pRpcMsg->ahandle; + } - return 0; -} + memcpy(pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len); + + atomic_add_fetch_32(&pVnode->refCount, 1); + int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); + if (queued > MAX_QUEUED_MSG_NUM) { + vDebug("vgId:%d, too many msg:%d in vwqueue, flow control", pVnode->vgId, queued); + taosMsleep(1); + } -int vnodeWriteToQueue(void *param, void *data, int type) { - SVnodeObj *pVnode = param; - SWalHead * pHead = data; + vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg); - int size = sizeof(SWalHead) + pHead->len; - SWalHead *pWal = (SWalHead *)taosAllocateQitem(size); - memcpy(pWal, pHead, size); + taosWriteQitem(pVnode->wqueue, qtype, pWrite); + return TSDB_CODE_SUCCESS; +} - atomic_add_fetch_32(&pVnode->refCount, 1); - vTrace("vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); +void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { + SVnodeObj *pVnode = vparam; - taosWriteQitem(pVnode->wqueue, type, pWal); + atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); + vTrace("vgId:%d, free from vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg); - return 0; + taosFreeQitem(pWrite); + vnodeRelease(pVnode); } diff --git a/src/wal/inc/walInt.h b/src/wal/inc/walInt.h new file mode 100644 index 0000000000000000000000000000000000000000..b0edabfbd8b0f9e0c71d478994c796f4de755c0a --- /dev/null +++ b/src/wal/inc/walInt.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_WAL_INT_H +#define TDENGINE_WAL_INT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tlog.h" + +extern int32_t wDebugFlag; + +#define wFatal(...) { if (wDebugFlag & DEBUG_FATAL) { taosPrintLog("WAL FATAL ", 255, __VA_ARGS__); }} +#define wError(...) { if (wDebugFlag & DEBUG_ERROR) { taosPrintLog("WAL ERROR ", 255, __VA_ARGS__); }} +#define wWarn(...) { if (wDebugFlag & DEBUG_WARN) { taosPrintLog("WAL WARN ", 255, __VA_ARGS__); }} +#define wInfo(...) { if (wDebugFlag & DEBUG_INFO) { taosPrintLog("WAL ", 255, __VA_ARGS__); }} +#define wDebug(...) { if (wDebugFlag & DEBUG_DEBUG) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} +#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} + +#define WAL_PREFIX "wal" +#define WAL_PREFIX_LEN 3 +#define WAL_REFRESH_MS 1000 +#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16) +#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE)) +#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) +#define WAL_FILE_LEN (WAL_PATH_LEN + 32) +#define WAL_FILE_NUM 3 + +typedef struct { + uint64_t version; + int64_t fileId; + int64_t rid; + int32_t vgId; + int32_t fd; + int32_t keep; + int32_t level; + int32_t fsyncPeriod; + int32_t fsyncSeq; + int8_t stop; + int8_t reserved[3]; + char path[WAL_PATH_LEN]; + char name[WAL_FILE_LEN]; + pthread_mutex_t mutex; +} SWal; + +int32_t walGetNextFile(SWal *pWal, int64_t *nextFileId); +int32_t walGetOldFile(SWal *pWal, int64_t curFileId, int32_t minDiff, int64_t *oldFileId); +int32_t walGetNewFile(SWal *pWal, int64_t *newFileId); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c deleted file mode 100644 index 182600204259e703d171d6598f46a2a16cdcb27b..0000000000000000000000000000000000000000 --- a/src/wal/src/walMain.c +++ /dev/null @@ -1,588 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE - -#define TAOS_RANDOM_FILE_FAIL_TEST - -#include "os.h" -#include "tlog.h" -#include "tchecksum.h" -#include "tutil.h" -#include "ttimer.h" -#include "taoserror.h" -#include "twal.h" -#include "tqueue.h" - -#define walPrefix "wal" - -#define wFatal(...) { if (wDebugFlag & DEBUG_FATAL) { taosPrintLog("WAL FATAL ", 255, __VA_ARGS__); }} -#define wError(...) { if (wDebugFlag & DEBUG_ERROR) { taosPrintLog("WAL ERROR ", 255, __VA_ARGS__); }} -#define wWarn(...) { if (wDebugFlag & DEBUG_WARN) { taosPrintLog("WAL WARN ", 255, __VA_ARGS__); }} -#define wInfo(...) { if (wDebugFlag & DEBUG_INFO) { taosPrintLog("WAL ", 255, __VA_ARGS__); }} -#define wDebug(...) { if (wDebugFlag & DEBUG_DEBUG) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} -#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} - -typedef struct { - uint64_t version; - int fd; - int keep; - int level; - int32_t fsyncPeriod; - void *timer; - void *signature; - int max; // maximum number of wal files - uint32_t id; // increase continuously - int num; // number of wal files - char path[TSDB_FILENAME_LEN]; - char name[TSDB_FILENAME_LEN+16]; - pthread_mutex_t mutex; -} SWal; - -static void *walTmrCtrl = NULL; -static int tsWalNum = 0; -static pthread_once_t walModuleInit = PTHREAD_ONCE_INIT; -static uint32_t walSignature = 0xFAFBFDFE; -static int walHandleExistingFiles(const char *path); -static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp); -static int walRemoveWalFiles(const char *path); -static void walProcessFsyncTimer(void *param, void *tmrId); -static void walRelease(SWal *pWal); -static int walGetMaxOldFileId(char *odir); - -static void walModuleInitFunc() { - walTmrCtrl = taosTmrInit(1000, 100, 300000, "WAL"); - if (walTmrCtrl == NULL) - walModuleInit = PTHREAD_ONCE_INIT; - else - wDebug("WAL module is initialized"); -} - -static inline bool walNeedFsyncTimer(SWal *pWal) { - if (pWal->fsyncPeriod > 0 && pWal->level == TAOS_WAL_FSYNC) { - return true; - } - return false; -} - -void *walOpen(const char *path, const SWalCfg *pCfg) { - SWal *pWal = calloc(sizeof(SWal), 1); - if (pWal == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - return NULL; - } - - pthread_once(&walModuleInit, walModuleInitFunc); - if (walTmrCtrl == NULL) { - free(pWal); - terrno = TAOS_SYSTEM_ERROR(errno); - return NULL; - } - - atomic_add_fetch_32(&tsWalNum, 1); - pWal->fd = -1; - pWal->max = pCfg->wals; - pWal->id = 0; - pWal->num = 0; - pWal->level = pCfg->walLevel; - pWal->keep = pCfg->keep; - pWal->fsyncPeriod = pCfg->fsyncPeriod; - pWal->signature = pWal; - tstrncpy(pWal->path, path, sizeof(pWal->path)); - pthread_mutex_init(&pWal->mutex, NULL); - - if (walNeedFsyncTimer(pWal)) { - pWal->timer = taosTmrStart(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, walTmrCtrl); - if (pWal->timer == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - walRelease(pWal); - return NULL; - } - } - - if (taosMkDir(path, 0755) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - wError("wal:%s, failed to create directory(%s)", path, strerror(errno)); - walRelease(pWal); - pWal = NULL; - } - - if (pCfg->keep == 1) return pWal; - - if (walHandleExistingFiles(path) == 0) walRenew(pWal); - - if (pWal && pWal->fd < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - wError("wal:%s, failed to open(%s)", path, strerror(errno)); - walRelease(pWal); - pWal = NULL; - } - - if (pWal) wDebug("wal:%s, it is open, level:%d fsyncPeriod:%d", path, pWal->level, pWal->fsyncPeriod); - return pWal; -} - -int walAlter(twalh wal, const SWalCfg *pCfg) { - SWal *pWal = wal; - if (pWal == NULL) { - return TSDB_CODE_WAL_APP_ERROR; - } - - if (pWal->level == pCfg->walLevel && pWal->fsyncPeriod == pCfg->fsyncPeriod) { - wDebug("wal:%s, old walLevel:%d fsync:%d, new walLevel:%d fsync:%d not change", pWal->name, pWal->level, - pWal->fsyncPeriod, pCfg->walLevel, pCfg->fsyncPeriod); - return TSDB_CODE_SUCCESS; - } - - wInfo("wal:%s, change old walLevel:%d fsync:%d, new walLevel:%d fsync:%d", pWal->name, pWal->level, pWal->fsyncPeriod, - pCfg->walLevel, pCfg->fsyncPeriod); - - pthread_mutex_lock(&pWal->mutex); - pWal->level = pCfg->walLevel; - pWal->fsyncPeriod = pCfg->fsyncPeriod; - if (walNeedFsyncTimer(pWal)) { - wInfo("wal:%s, reset fsync timer, walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod); - taosTmrReset(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, &pWal->timer, walTmrCtrl); - } else { - wInfo("wal:%s, stop fsync timer, walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod); - taosTmrStop(pWal->timer); - pWal->timer = NULL; - } - pthread_mutex_unlock(&pWal->mutex); - - return TSDB_CODE_SUCCESS; -} - -void walClose(void *handle) { - if (handle == NULL) return; - - SWal *pWal = handle; - taosClose(pWal->fd); - if (pWal->timer) taosTmrStopA(&pWal->timer); - - if (pWal->keep == 0) { - // remove all files in the directory - for (int i = 0; i < pWal->num; ++i) { - snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id - i); - if (remove(pWal->name) < 0) { - wError("wal:%s, failed to remove", pWal->name); - } else { - wDebug("wal:%s, it is removed", pWal->name); - } - } - } else { - wDebug("wal:%s, it is closed and kept", pWal->name); - } - - walRelease(pWal); -} - -int walRenew(void *handle) { - if (handle == NULL) return 0; - SWal *pWal = handle; - - terrno = 0; - - pthread_mutex_lock(&pWal->mutex); - - if (pWal->fd >= 0) { - close(pWal->fd); - pWal->id++; - wDebug("wal:%s, it is closed", pWal->name); - } - - pWal->num++; - - snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id); - pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); - - if (pWal->fd < 0) { - wError("wal:%s, failed to open(%s)", pWal->name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - wDebug("wal:%s, it is created", pWal->name); - - if (pWal->num > pWal->max) { - // remove the oldest wal file - char name[TSDB_FILENAME_LEN * 3]; - snprintf(name, sizeof(name), "%s/%s%d", pWal->path, walPrefix, pWal->id - pWal->max); - if (remove(name) < 0) { - wError("wal:%s, failed to remove(%s)", name, strerror(errno)); - } else { - wDebug("wal:%s, it is removed", name); - } - - pWal->num--; - } - } - - pthread_mutex_unlock(&pWal->mutex); - - return terrno; -} - -int walWrite(void *handle, SWalHead *pHead) { - SWal *pWal = handle; - if (pWal == NULL) return -1; - - terrno = 0; - - // no wal - if (pWal->level == TAOS_WAL_NOLOG) return 0; - if (pHead->version <= pWal->version) return 0; - - pHead->signature = walSignature; - taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); - int contLen = pHead->len + sizeof(SWalHead); - - if (taosTWrite(pWal->fd, pHead, contLen) != contLen) { - wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - pWal->version = pHead->version; - } - - return terrno; -} - -void walFsync(void *handle) { - SWal *pWal = handle; - if (pWal == NULL || pWal->level != TAOS_WAL_FSYNC || pWal->fd < 0) return; - - if (pWal->fsyncPeriod == 0) { - if (fsync(pWal->fd) < 0) { - wError("wal:%s, fsync failed(%s)", pWal->name, strerror(errno)); - } - } -} - -int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) { - SWal *pWal = handle; - struct dirent *ent; - int count = 0; - uint32_t maxId = 0, minId = -1, index =0; - - terrno = 0; - int plen = strlen(walPrefix); - char opath[TSDB_FILENAME_LEN + 5]; - - int slen = snprintf(opath, sizeof(opath), "%s", pWal->path); - if (pWal->keep == 0) strcpy(opath + slen, "/old"); - - DIR *dir = opendir(opath); - if (dir == NULL && errno == ENOENT) return 0; - if (dir == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; - } - - while ((ent = readdir(dir)) != NULL) { - if (strncmp(ent->d_name, walPrefix, plen) == 0) { - index = atol(ent->d_name + plen); - if (index > maxId) maxId = index; - if (index < minId) minId = index; - count++; - } - } - - closedir(dir); - - if (count == 0) { - if (pWal->keep) terrno = walRenew(pWal); - return terrno; - } - - if (count != (maxId - minId + 1)) { - wError("wal:%s, messed up, count:%d max:%d min:%d", opath, count, maxId, minId); - terrno = TSDB_CODE_WAL_APP_ERROR; - } else { - wDebug("wal:%s, %d files will be restored", opath, count); - - for (index = minId; index <= maxId; ++index) { - snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, index); - terrno = walRestoreWalFile(pWal, pVnode, writeFp); - if (terrno < 0) continue; - } - } - - if (terrno == 0) { - if (pWal->keep == 0) { - terrno = walRemoveWalFiles(opath); - if (terrno == 0) { - if (remove(opath) < 0) { - wError("wal:%s, failed to remove directory(%s)", opath, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } - } - } else { - // open the existing WAL file in append mode - pWal->num = count; - pWal->id = maxId; - snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, maxId); - pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO); - if (pWal->fd < 0) { - wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } - } - } - - return terrno; -} - -int walGetWalFile(void *handle, char *name, uint32_t *index) { - SWal * pWal = handle; - int code = 1; - int32_t first = 0; - - name[0] = 0; - if (pWal == NULL || pWal->num == 0) return 0; - - pthread_mutex_lock(&(pWal->mutex)); - - first = pWal->id + 1 - pWal->num; - if (*index == 0) *index = first; // set to first one - - if (*index < first && *index > pWal->id) { - code = -1; // index out of range - } else { - sprintf(name, "wal/%s%d", walPrefix, *index); - code = (*index == pWal->id) ? 0 : 1; - } - - pthread_mutex_unlock(&(pWal->mutex)); - - return code; -} - -static void walRelease(SWal *pWal) { - pthread_mutex_destroy(&pWal->mutex); - pWal->signature = NULL; - free(pWal); - - if (atomic_sub_fetch_32(&tsWalNum, 1) == 0) { - if (walTmrCtrl) taosTmrCleanUp(walTmrCtrl); - walTmrCtrl = NULL; - walModuleInit = PTHREAD_ONCE_INIT; - wDebug("WAL module is cleaned up"); - } -} - -static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { - char *name = pWal->name; - int size = 1024 * 1024; // default 1M buffer size - - terrno = 0; - char *buffer = malloc(size); - if (buffer == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; - } - - SWalHead *pHead = (SWalHead *)buffer; - - int fd = open(name, O_RDWR); - if (fd < 0) { - wError("wal:%s, failed to open for restore(%s)", name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - free(buffer); - return terrno; - } - - wDebug("wal:%s, start to restore", name); - - size_t offset = 0; - while (1) { - int ret = taosTRead(fd, pHead, sizeof(SWalHead)); - if (ret == 0) break; - - if (ret < 0) { - wError("wal:%s, failed to read wal head part since %s", name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - if (ret < sizeof(SWalHead)) { - wError("wal:%s, failed to read head, ret:%d, skip the rest of file", name, ret); - taosFtruncate(fd, offset); - fsync(fd); - break; - } - - if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { - wWarn("wal:%s, cksum is messed up, skip the rest of file", name); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - // ASSERT(false); - break; - } - - if (pHead->len > size - sizeof(SWalHead)) { - size = sizeof(SWalHead) + pHead->len; - buffer = realloc(buffer, size); - if (buffer == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - pHead = (SWalHead *)buffer; - } - - ret = taosTRead(fd, pHead->cont, pHead->len); - if (ret < 0) { - wError("wal:%s failed to read wal body part since %s", name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - if (ret < pHead->len) { - wError("wal:%s, failed to read body, len:%d ret:%d, skip the rest of file", name, pHead->len, ret); - taosFtruncate(fd, offset); - fsync(fd); - break; - } - - offset = offset + sizeof(SWalHead) + pHead->len; - - if (pWal->keep) pWal->version = pHead->version; - (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL); - } - - close(fd); - free(buffer); - - return terrno; -} - -int walHandleExistingFiles(const char *path) { - char oname[TSDB_FILENAME_LEN * 3]; - char nname[TSDB_FILENAME_LEN * 3]; - char opath[TSDB_FILENAME_LEN]; - - snprintf(opath, sizeof(opath), "%s/old", path); - - struct dirent *ent; - DIR *dir = opendir(path); - int plen = strlen(walPrefix); - terrno = 0; - - int midx = walGetMaxOldFileId(opath); - int count = 0; - while ((ent = readdir(dir)) != NULL) { - if (strncmp(ent->d_name, walPrefix, plen) == 0) { - midx++; - snprintf(oname, sizeof(oname), "%s/%s", path, ent->d_name); - snprintf(nname, sizeof(nname), "%s/old/wal%d", path, midx); - if (taosMkDir(opath, 0755) != 0) { - wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - if (rename(oname, nname) < 0) { - wError("wal:%s, failed to move to new:%s", oname, nname); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - count++; - } - - wDebug("wal:%s, %d files are moved for restoration", path, count); - } - - closedir(dir); - return terrno; -} - -static int walRemoveWalFiles(const char *path) { - int plen = strlen(walPrefix); - char name[TSDB_FILENAME_LEN * 3]; - - terrno = 0; - - struct dirent *ent; - DIR *dir = opendir(path); - if (dir == NULL && errno == ENOENT) return 0; - if (dir == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; - } - - while ((ent = readdir(dir)) != NULL) { - if (strncmp(ent->d_name, walPrefix, plen) == 0) { - snprintf(name, sizeof(name), "%s/%s", path, ent->d_name); - if (remove(name) < 0) { - wError("wal:%s, failed to remove(%s)", name, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } - } - } - - closedir(dir); - - return terrno; -} - -static void walProcessFsyncTimer(void *param, void *tmrId) { - SWal *pWal = param; - - if (pWal->signature != pWal) return; - if (pWal->fd < 0) return; - - if (fsync(pWal->fd) < 0) { - wError("wal:%s, fsync failed(%s)", pWal->name, strerror(errno)); - } - - if (walNeedFsyncTimer(pWal)) { - pWal->timer = taosTmrStart(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, walTmrCtrl); - } else { - wInfo("wal:%s, stop fsync timer for walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod); - taosTmrStop(pWal->timer); - pWal->timer = NULL; - } -} - -int64_t walGetVersion(twalh param) { - SWal *pWal = param; - if (pWal == 0) return 0; - - return pWal->version; -} - -static int walGetMaxOldFileId(char *odir) { - int midx = 0; - DIR * dir = NULL; - struct dirent *dp = NULL; - int plen = strlen(walPrefix); - - if (access(odir, F_OK) != 0) return midx; - - dir = opendir(odir); - if (dir == NULL) { - wError("failed to open directory %s since %s", odir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - while ((dp = readdir(dir)) != NULL) { - if (strncmp(dp->d_name, walPrefix, plen) == 0) { - int idx = atol(dp->d_name + plen); - if (midx < idx) midx = idx; - } - } - - closedir(dir); - return midx; -} \ No newline at end of file diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..36c190be3e0f72f96b9075b7fb90db39ee3c5412 --- /dev/null +++ b/src/wal/src/walMgmt.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "tref.h" +#include "twal.h" +#include "walInt.h" + +typedef struct { + int32_t refId; + int32_t seq; + int8_t stop; + pthread_t thread; + pthread_mutex_t mutex; +} SWalMgmt; + +static SWalMgmt tsWal = {0}; +static int32_t walCreateThread(); +static void walStopThread(); +static int32_t walInitObj(SWal *pWal); +static void walFreeObj(void *pWal); + +int32_t walInit() { + tsWal.refId = taosOpenRef(TSDB_MIN_VNODES, walFreeObj); + + int32_t code = walCreateThread(); + if (code != TSDB_CODE_SUCCESS) { + wError("failed to init wal module since %s", tstrerror(code)); + return code; + } + + wInfo("wal module is initialized, refId:%d", tsWal.refId); + return code; +} + +void walCleanUp() { + walStopThread(); + taosCloseRef(tsWal.refId); + wInfo("wal module is cleaned up"); +} + +void *walOpen(char *path, SWalCfg *pCfg) { + SWal *pWal = tcalloc(1, sizeof(SWal)); + if (pWal == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return NULL; + } + + pWal->vgId = pCfg->vgId; + pWal->fd = -1; + pWal->fileId = -1; + pWal->level = pCfg->walLevel; + pWal->keep = pCfg->keep; + pWal->fsyncPeriod = pCfg->fsyncPeriod; + tstrncpy(pWal->path, path, sizeof(pWal->path)); + pthread_mutex_init(&pWal->mutex, NULL); + + pWal->fsyncSeq = pCfg->fsyncPeriod / 1000; + if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1; + + if (walInitObj(pWal) != TSDB_CODE_SUCCESS) { + walFreeObj(pWal); + return NULL; + } + + pWal->rid = taosAddRef(tsWal.refId, pWal); + if (pWal->rid < 0) { + walFreeObj(pWal); + return NULL; + } + + wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->vgId, pWal, pWal->level, pWal->fsyncPeriod); + + return pWal; +} + +int32_t walAlter(void *handle, SWalCfg *pCfg) { + if (handle == NULL) return TSDB_CODE_WAL_APP_ERROR; + SWal *pWal = handle; + + if (pWal->level == pCfg->walLevel && pWal->fsyncPeriod == pCfg->fsyncPeriod) { + wDebug("vgId:%d, old walLevel:%d fsync:%d, new walLevel:%d fsync:%d not change", pWal->vgId, pWal->level, + pWal->fsyncPeriod, pCfg->walLevel, pCfg->fsyncPeriod); + return TSDB_CODE_SUCCESS; + } + + wInfo("vgId:%d, change old walLevel:%d fsync:%d, new walLevel:%d fsync:%d", pWal->vgId, pWal->level, + pWal->fsyncPeriod, pCfg->walLevel, pCfg->fsyncPeriod); + + pWal->level = pCfg->walLevel; + pWal->fsyncPeriod = pCfg->fsyncPeriod; + pWal->fsyncSeq = pCfg->fsyncPeriod % 1000; + if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1; + + return TSDB_CODE_SUCCESS; +} + +void walStop(void *handle) { + if (handle == NULL) return; + SWal *pWal = handle; + + pthread_mutex_lock(&pWal->mutex); + pWal->stop = 1; + pthread_mutex_unlock(&pWal->mutex); + wDebug("vgId:%d, stop write wal", pWal->vgId); +} + +void walClose(void *handle) { + if (handle == NULL) return; + + SWal *pWal = handle; + pthread_mutex_lock(&pWal->mutex); + taosClose(pWal->fd); + pthread_mutex_unlock(&pWal->mutex); + taosRemoveRef(tsWal.refId, pWal->rid); +} + +static int32_t walInitObj(SWal *pWal) { + if (taosMkDir(pWal->path, 0755) != 0) { + wError("vgId:%d, path:%s, failed to create directory since %s", pWal->vgId, pWal->path, strerror(errno)); + return TAOS_SYSTEM_ERROR(errno); + } + + wDebug("vgId:%d, object is initialized", pWal->vgId); + return TSDB_CODE_SUCCESS; +} + +static void walFreeObj(void *wal) { + SWal *pWal = wal; + wDebug("vgId:%d, wal:%p is freed", pWal->vgId, pWal); + + taosClose(pWal->fd); + pthread_mutex_destroy(&pWal->mutex); + tfree(pWal); +} + +static bool walNeedFsync(SWal *pWal) { + if (pWal->fsyncPeriod <= 0 || pWal->level != TAOS_WAL_FSYNC) { + return false; + } + + if (tsWal.seq % pWal->fsyncSeq == 0) { + return true; + } + + return false; +} + +static void walUpdateSeq() { + taosMsleep(WAL_REFRESH_MS); + if (++tsWal.seq <= 0) { + tsWal.seq = 1; + } +} + +static void walFsyncAll() { + SWal *pWal = taosIterateRef(tsWal.refId, 0); + while (pWal) { + if (walNeedFsync(pWal)) { + wTrace("vgId:%d, do fsync, level:%d seq:%d rseq:%d", pWal->vgId, pWal->level, pWal->fsyncSeq, tsWal.seq); + int32_t code = fsync(pWal->fd); + if (code != 0) { + wError("vgId:%d, file:%s, failed to fsync since %s", pWal->vgId, pWal->name, strerror(code)); + } + } + pWal = taosIterateRef(tsWal.refId, pWal->rid); + } +} + +static void *walThreadFunc(void *param) { + while (1) { + walUpdateSeq(); + walFsyncAll(); + if (tsWal.stop) break; + } + + return NULL; +} + +static int32_t walCreateThread() { + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&tsWal.thread, &thAttr, walThreadFunc, NULL) != 0) { + wError("failed to create wal thread since %s", strerror(errno)); + return TAOS_SYSTEM_ERROR(errno); + } + + pthread_attr_destroy(&thAttr); + wDebug("wal thread is launched"); + + return TSDB_CODE_SUCCESS; +} + +static void walStopThread() { + tsWal.stop = 1; + if (tsWal.thread) { + pthread_join(tsWal.thread, NULL); + } + + wDebug("wal thread is stopped"); +} diff --git a/src/wal/src/walUtil.c b/src/wal/src/walUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..e4d9a555b3a60cb6be1e6584652ec4a309b1c301 --- /dev/null +++ b/src/wal/src/walUtil.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "walInt.h" + +int32_t walGetNextFile(SWal *pWal, int64_t *nextFileId) { + int64_t curFileId = *nextFileId; + int64_t minFileId = INT64_MAX; + + DIR *dir = opendir(pWal->path); + if (dir == NULL) { + wError("vgId:%d, path:%s, failed to open since %s", pWal->vgId, pWal->path, strerror(errno)); + return -1; + } + + struct dirent *ent; + while ((ent = readdir(dir)) != NULL) { + char *name = ent->d_name; + + if (strncmp(name, WAL_PREFIX, WAL_PREFIX_LEN) == 0) { + int64_t id = atoll(name + WAL_PREFIX_LEN); + if (id <= curFileId) continue; + + if (id < minFileId) { + minFileId = id; + } + } + } + closedir(dir); + + if (minFileId == INT64_MAX) return -1; + + *nextFileId = minFileId; + wTrace("vgId:%d, path:%s, curFileId:%" PRId64 " nextFileId:%" PRId64, pWal->vgId, pWal->path, curFileId, *nextFileId); + + return 0; +} + +int32_t walGetOldFile(SWal *pWal, int64_t curFileId, int32_t minDiff, int64_t *oldFileId) { + int64_t minFileId = INT64_MAX; + + DIR *dir = opendir(pWal->path); + if (dir == NULL) { + wError("vgId:%d, path:%s, failed to open since %s", pWal->vgId, pWal->path, strerror(errno)); + return -1; + } + + struct dirent *ent; + while ((ent = readdir(dir)) != NULL) { + char *name = ent->d_name; + + if (strncmp(name, WAL_PREFIX, WAL_PREFIX_LEN) == 0) { + int64_t id = atoll(name + WAL_PREFIX_LEN); + if (id >= curFileId) continue; + + minDiff--; + if (id < minFileId) { + minFileId = id; + } + } + } + closedir(dir); + + if (minFileId == INT64_MAX) return -1; + if (minDiff > 0) return -1; + + *oldFileId = minFileId; + wTrace("vgId:%d, path:%s, curFileId:%" PRId64 " oldFildId:%" PRId64, pWal->vgId, pWal->path, curFileId, *oldFileId); + + return 0; +} + +int32_t walGetNewFile(SWal *pWal, int64_t *newFileId) { + int64_t maxFileId = INT64_MIN; + + DIR *dir = opendir(pWal->path); + if (dir == NULL) { + wError("vgId:%d, path:%s, failed to open since %s", pWal->vgId, pWal->path, strerror(errno)); + return -1; + } + + struct dirent *ent; + while ((ent = readdir(dir)) != NULL) { + char *name = ent->d_name; + + if (strncmp(name, WAL_PREFIX, WAL_PREFIX_LEN) == 0) { + int64_t id = atoll(name + WAL_PREFIX_LEN); + if (id > maxFileId) { + maxFileId = id; + } + } + } + closedir(dir); + + if (maxFileId == INT64_MIN) { + *newFileId = 0; + } else { + *newFileId = maxFileId; + } + + wTrace("vgId:%d, path:%s, newFileId:%" PRId64, pWal->vgId, pWal->path, *newFileId); + + return 0; +} \ No newline at end of file diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c new file mode 100644 index 0000000000000000000000000000000000000000..36b3dba165cbf399360e95d090278a681b10646d --- /dev/null +++ b/src/wal/src/walWrite.c @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#define TAOS_RANDOM_FILE_FAIL_TEST +#include "os.h" +#include "taoserror.h" +#include "tchecksum.h" +#include "twal.h" +#include "walInt.h" + +static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId); + +int32_t walRenew(void *handle) { + if (handle == NULL) return 0; + + SWal * pWal = handle; + int32_t code = 0; + + if (pWal->stop) { + wDebug("vgId:%d, do not create a new wal file", pWal->vgId); + return 0; + } + + pthread_mutex_lock(&pWal->mutex); + + if (pWal->fd >= 0) { + tclose(pWal->fd); + wDebug("vgId:%d, file:%s, it is closed", pWal->vgId, pWal->name); + } + + if (pWal->keep == TAOS_WAL_KEEP) { + pWal->fileId = 0; + } else { + if (walGetNewFile(pWal, &pWal->fileId) != 0) pWal->fileId = 0; + pWal->fileId++; + } + + snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, pWal->fileId); + pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + + if (pWal->fd < 0) { + code = TAOS_SYSTEM_ERROR(errno); + wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); + } else { + wDebug("vgId:%d, file:%s, it is created", pWal->vgId, pWal->name); + } + + pthread_mutex_unlock(&pWal->mutex); + + return code; +} + +void walRemoveOneOldFile(void *handle) { + SWal *pWal = handle; + if (pWal == NULL) return; + if (pWal->keep == TAOS_WAL_KEEP) return; + if (pWal->fd <= 0) return; + + pthread_mutex_lock(&pWal->mutex); + + // remove the oldest wal file + int64_t oldFileId = -1; + if (walGetOldFile(pWal, pWal->fileId, WAL_FILE_NUM, &oldFileId) == 0) { + char walName[WAL_FILE_LEN] = {0}; + snprintf(walName, sizeof(walName), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, oldFileId); + + if (remove(walName) < 0) { + wError("vgId:%d, file:%s, failed to remove since %s", pWal->vgId, walName, strerror(errno)); + } else { + wInfo("vgId:%d, file:%s, it is removed", pWal->vgId, walName); + } + } + + pthread_mutex_unlock(&pWal->mutex); +} + +void walRemoveAllOldFiles(void *handle) { + if (handle == NULL) return; + + SWal * pWal = handle; + int64_t fileId = -1; + + pthread_mutex_lock(&pWal->mutex); + while (walGetNextFile(pWal, &fileId) >= 0) { + snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); + + if (remove(pWal->name) < 0) { + wError("vgId:%d, wal:%p file:%s, failed to remove", pWal->vgId, pWal, pWal->name); + } else { + wInfo("vgId:%d, wal:%p file:%s, it is removed", pWal->vgId, pWal, pWal->name); + } + } + pthread_mutex_unlock(&pWal->mutex); +} + +int32_t walWrite(void *handle, SWalHead *pHead) { + if (handle == NULL) return -1; + + SWal * pWal = handle; + int32_t code = 0; + + // no wal + if (pWal->fd <= 0) return 0; + if (pWal->level == TAOS_WAL_NOLOG) return 0; + if (pHead->version <= pWal->version) return 0; + + pHead->signature = WAL_SIGNATURE; + taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); + int32_t contLen = pHead->len + sizeof(SWalHead); + + pthread_mutex_lock(&pWal->mutex); + + if (taosWrite(pWal->fd, pHead, contLen) != contLen) { + code = TAOS_SYSTEM_ERROR(errno); + wError("vgId:%d, file:%s, failed to write since %s", pWal->vgId, pWal->name, strerror(errno)); + } else { + wTrace("vgId:%d, write wal, fileId:%" PRId64 " fd:%d hver:%" PRId64 " wver:%" PRIu64 " len:%d", pWal->vgId, + pWal->fileId, pWal->fd, pHead->version, pWal->version, pHead->len); + pWal->version = pHead->version; + } + + pthread_mutex_unlock(&pWal->mutex); + + ASSERT(contLen == pHead->len + sizeof(SWalHead)); + + return code; +} + +void walFsync(void *handle, bool forceFsync) { + SWal *pWal = handle; + if (pWal == NULL || pWal->fd < 0) return; + + if (forceFsync || (pWal->level == TAOS_WAL_FSYNC && pWal->fsyncPeriod == 0)) { + wTrace("vgId:%d, fileId:%" PRId64 ", do fsync", pWal->vgId, pWal->fileId); + if (fsync(pWal->fd) < 0) { + wError("vgId:%d, fileId:%" PRId64 ", fsync failed since %s", pWal->vgId, pWal->fileId, strerror(errno)); + } + } +} + +int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) { + if (handle == NULL) return -1; + + SWal * pWal = handle; + int32_t count = 0; + int32_t code = 0; + int64_t fileId = -1; + + while ((code = walGetNextFile(pWal, &fileId)) >= 0) { + if (fileId == pWal->fileId) continue; + + char walName[WAL_FILE_LEN]; + snprintf(walName, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); + + wDebug("vgId:%d, file:%s, will be restored", pWal->vgId, walName); + int32_t code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId); + if (code != TSDB_CODE_SUCCESS) { + wError("vgId:%d, file:%s, failed to restore since %s", pWal->vgId, walName, tstrerror(code)); + continue; + } + + wDebug("vgId:%d, file:%s, restore success", pWal->vgId, walName); + + count++; + } + + if (pWal->keep != TAOS_WAL_KEEP) return TSDB_CODE_SUCCESS; + + if (count == 0) { + wDebug("vgId:%d, wal file not exist, renew it", pWal->vgId); + return walRenew(pWal); + } else { + // open the existing WAL file in append mode + pWal->fileId = 0; + snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, pWal->fileId); + pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO); + if (pWal->fd < 0) { + wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); + return TAOS_SYSTEM_ERROR(errno); + } + wDebug("vgId:%d, file:%s open success", pWal->vgId, pWal->name); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t walGetWalFile(void *handle, char *fileName, int64_t *fileId) { + if (handle == NULL) return -1; + SWal *pWal = handle; + + if (*fileId == 0) *fileId = -1; + + pthread_mutex_lock(&(pWal->mutex)); + + int32_t code = walGetNextFile(pWal, fileId); + if (code >= 0) { + sprintf(fileName, "wal/%s%" PRId64, WAL_PREFIX, *fileId); + code = (*fileId == pWal->fileId) ? 0 : 1; + } + + wTrace("vgId:%d, get wal file, code:%d curId:%" PRId64 " outId:%" PRId64, pWal->vgId, code, pWal->fileId, *fileId); + pthread_mutex_unlock(&(pWal->mutex)); + + return code; +} + +static void walFtruncate(SWal *pWal, int32_t fd, int64_t offset) { + taosFtruncate(fd, offset); + fsync(fd); +} + +static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int32_t fd, int64_t *offset) { + int64_t pos = *offset; + while (1) { + pos++; + + if (lseek(fd, pos, SEEK_SET) < 0) { + wError("vgId:%d, failed to seek from corrupted wal file since %s", pWal->vgId, strerror(errno)); + return TSDB_CODE_WAL_FILE_CORRUPTED; + } + + if (taosRead(fd, pHead, sizeof(SWalHead)) <= 0) { + wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); + return TSDB_CODE_WAL_FILE_CORRUPTED; + } + + if (pHead->signature != WAL_SIGNATURE) { + continue; + } + + if (taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { + wInfo("vgId:%d, wal head cksum check passed, offset:%" PRId64, pWal->vgId, pos); + *offset = pos; + return TSDB_CODE_SUCCESS; + } + } + + return TSDB_CODE_WAL_FILE_CORRUPTED; +} + +static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { + int32_t size = WAL_MAX_SIZE; + void * buffer = tmalloc(size); + if (buffer == NULL) { + wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); + return TAOS_SYSTEM_ERROR(errno); + } + + int32_t fd = open(name, O_RDWR); + if (fd < 0) { + wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); + tfree(buffer); + return TAOS_SYSTEM_ERROR(errno); + } + + wDebug("vgId:%d, file:%s, start to restore", pWal->vgId, name); + + int32_t code = TSDB_CODE_SUCCESS; + int64_t offset = 0; + SWalHead *pHead = buffer; + + while (1) { + int32_t ret = taosRead(fd, pHead, sizeof(SWalHead)); + if (ret == 0) break; + + if (ret < 0) { + wError("vgId:%d, file:%s, failed to read wal head since %s", pWal->vgId, name, strerror(errno)); + code = TAOS_SYSTEM_ERROR(errno); + break; + } + + if (ret < sizeof(SWalHead)) { + wError("vgId:%d, file:%s, failed to read wal head, ret is %d", pWal->vgId, name, ret); + walFtruncate(pWal, fd, offset); + break; + } + + if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { + wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, fd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, fd, offset); + break; + } + } + + if (pHead->len > size - sizeof(SWalHead)) { + size = sizeof(SWalHead) + pHead->len; + buffer = realloc(buffer, size); + if (buffer == NULL) { + wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); + code = TAOS_SYSTEM_ERROR(errno); + break; + } + + pHead = buffer; + } + + ret = taosRead(fd, pHead->cont, pHead->len); + if (ret < 0) { + wError("vgId:%d, file:%s, failed to read wal body since %s", pWal->vgId, name, strerror(errno)); + code = TAOS_SYSTEM_ERROR(errno); + break; + } + + if (ret < pHead->len) { + wError("vgId:%d, file:%s, failed to read wal body, ret:%d len:%d", pWal->vgId, name, ret, pHead->len); + offset += sizeof(SWalHead); + continue; + } + + offset = offset + sizeof(SWalHead) + pHead->len; + + wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, + fileId, pHead->version, pWal->version, pHead->len); + + pWal->version = pHead->version; + (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); + } + + tclose(fd); + tfree(buffer); + + return code; +} + +uint64_t walGetVersion(twalh param) { + SWal *pWal = param; + if (pWal == 0) return 0; + + return pWal->version; +} diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c index bbee1347b8f92aa6cfad448fdfb369de8f5a6301..7a473ed18c958afa8be3c5b94b04d2fd548a56fd 100644 --- a/src/wal/test/waltest.c +++ b/src/wal/test/waltest.c @@ -23,7 +23,7 @@ int64_t ver = 0; void *pWal = NULL; -int writeToQueue(void *pVnode, void *data, int type) { +int writeToQueue(void *pVnode, void *data, int type, void *pMsg) { // do nothing SWalHead *pHead = data; @@ -37,7 +37,6 @@ int writeToQueue(void *pVnode, void *data, int type) { int main(int argc, char *argv[]) { char path[128] = "/home/jhtao/test/wal"; - int max = 3; int level = 2; int total = 5; int rows = 10000; @@ -47,8 +46,6 @@ int main(int argc, char *argv[]) { for (int i=1; ijava -version +java version "1.8.0_131" +Java(TM) SE Runtime Environment (build 1.8.0_131-b11) +Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode) +``` + + +(2)安装配置maven + +官网下载maven,下载地址:http://maven.apache.org/download.cgi + +配置环境变量MAVEN_HOME,将MAVEN_HOME/bin添加到PATH + +命令行里查看maven的版本 + +```shell +>mvn --version +Apache Maven 3.5.0 (ff8f5e7444045639af65f6095c62210b5713f426; 2017-04-04T03:39:06+08:00) +Maven home: D:\apache-maven-3.5.0\bin\.. +Java version: 1.8.0_131, vendor: Oracle Corporation +Java home: C:\Program Files\Java\jdk1.8.0_131\jre +Default locale: zh_CN, platform encoding: GBK +OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows" +``` + +为了加快maven下载依赖的速度,可以为maven配置mirror,修改MAVEN_HOME\config\settings.xml文件 + +```xml + + + D:\apache-maven-localRepository + + + + + alimaven + aliyun maven + http://maven.aliyun.com/nexus/content/groups/public/ + central + + + + + + + jdk-1.8 + + true + 1.8 + + + 1.8 + 1.8 + 1.8 + + + + +``` + + + +(3)在linux服务器上安装TDengine-server + +在taosdata官网下载TDengine-server,下载地址:https://www.taosdata.com/cn/all-downloads/ + +在linux服务器上安装TDengine-server + +```shell +# tar -zxvf package/TDengine-server-2.0.1.1-Linux-x64.tar.gz +# cd TDengine-server/ +# ./install.sh +``` + +启动taosd + +```shell +# systemctl start taosd +``` + +在server上用taos连接taosd + +```shell +# taos +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | +================================================================================================================== + 1 | td01:6030 | 2 | 4 | ready | any | 2020-08-19 18:40:25.045 | +Query OK, 1 row(s) in set (0.005765s) +``` + +如果可以正确连接到taosd实例,并打印出databases的信息,说明TDengine的server已经正确启动。这里查看server的hostname + +```shell +# hostname -f +td01 +``` + +注意,如果安装TDengine后,使用默认的taos.cfg配置文件,taosd会使用当前server的hostname创建dnode实例。之后,在client也需要使用这个hostname来连接taosd。 + + + +(4)在windows上安装TDengine-client + +在taosdata官网下载taos客户端,下载地址: +https://www.taosdata.com/cn/all-downloads/ +下载后,双击exe安装。 + +修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中 + +``` +192.168.236.136 td01 +``` + +配置完成后,在命令行内使用taos shell连接server端 + +```shell +C:\TDengine>taos -h td01 +Welcome to the TDengine shell from Linux, Client Version:2.0.1.1 +Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | +=================================================================================================================================================================================================================================================================== + test | 2020-08-19 18:43:50.731 | 1 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | + log | 2020-08-19 18:40:28.064 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | +Query OK, 2 row(s) in set (0.068000s) +``` + +如果windows上的client能够正常连接,并打印database信息,说明client可以正常连接server了。 + + + +## 应用开发 + +(1)新建maven工程,在pom.xml中引入taos-jdbcdriver依赖。 + +```xml + + + 4.0.0 + + com.taosdata.demo + JdbcDemo + 1.0-SNAPSHOT + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.8 + + + +``` + +(2)使用jdbc查询TDengine数据库 + +下面是示例代码: + +```java +public class JdbcDemo { + + public static void main(String[] args) throws Exception { + Connection conn = getConn(); + Statement stmt = conn.createStatement(); + // create database + stmt.executeUpdate("create database if not exists db"); + // use database + stmt.executeUpdate("use db"); + // create table + stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); + // insert data + int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + System.out.println("insert " + affectedRows + " rows."); + // query data + ResultSet resultSet = stmt.executeQuery("select * from tb"); + Timestamp ts = null; + int temperature = 0; + float humidity = 0; + while(resultSet.next()){ + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); + } + } + + public static Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://td01:0/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; + } + +} +``` + +(3)测试jdbc访问tdengine的sever实例 + +console输出: + +``` +insert 2 rows. +2020-08-26 00:06:34.575, 23, 10.3 +2020-08-26 00:06:35.575, 20, 9.3 +``` + + + +## 指南 + +(1)如何设置主机名和hosts + +在server上查看hostname和fqdn +```shell +查看hostname +# hostname +taos-server + +查看fqdn +# hostname -f +taos-server +``` + +windows下hosts文件位于: +C:\\Windows\System32\drivers\etc\hosts +修改hosts文件,添加server的ip和hostname + +```s +192.168.56.101 node5 +``` + +(2)什么是fqdn? + + +> 什么是FQDN? +> +> FQDN(Full qualified domain name)全限定域名,fqdn由2部分组成:hostname+domainname。 +> +> 例如,一个邮件服务器的fqdn可能是:mymail.somecollege.edu,其中mymail是hostname(主机名),somcollege.edu是domainname(域名)。本例中,.edu是顶级域名,.somecollege是二级域名。 +> +> 当连接服务器时,必须指定fqdn,然后,dns服务器通过查看dns表,将hostname解析为相应的ip地址。如果只指定hostname(不指定domainname),应用程序可能服务解析主机名。因为如果你试图访问不在本地的远程服务器时,本地的dns服务器和可能没有远程服务器的hostname列表。 +> +> 参考:https://kb.iu.edu/d/aiuv diff --git a/tests/examples/JDBC/connectionPools/README-cn.md b/tests/examples/JDBC/connectionPools/README-cn.md new file mode 100644 index 0000000000000000000000000000000000000000..761596dfc55a3e2c9f449ed34fd72ac96c277512 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/README-cn.md @@ -0,0 +1,33 @@ +这个example中,我们适配了java常见的连接池: +* c3p0 +* dbcp +* druid +* HikariCP + +### 说明 +ConnectionPoolDemo的程序逻辑: +1. 创建到host的connection连接池 +2. 创建名称为pool_test的database,创建表超级weather,创建tableSize个子表 +3. 不断向所有子表进行插入。 + +### 如何运行这个例子: +```shell script +# mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="-host localhost" +``` +使用mvn运行ConnectionPoolDemo的main方法,可以指定参数 +```shell script +Usage: +mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="" +-host : hostname +-poolType +-poolSize +-tableSize +-batchSize : 每条Insert SQL中values的数量 +-sleep : 每次插入任务提交后的 +``` + +### 如何停止程序: +ConnectionPoolDemo不会自己停止,会一直执行插入,需要手动Ctrl+C运行。 + +### 日志 +使用log4j,将日志和错误分别输出到了debug.log和error.log中 \ No newline at end of file diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..2793f0a83ddc88711796c133802c82979ae14be5 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + + com.taosdata.demo + connectionPools + 1.0-SNAPSHOT + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.11 + + + + + com.alibaba + druid + 1.1.17 + + + + com.zaxxer + HikariCP + 3.2.0 + + + + commons-pool + commons-pool + 1.5.4 + + + commons-dbcp + commons-dbcp + 1.4 + + + + com.mchange + c3p0 + 0.9.5.4 + + + + + log4j + log4j + 1.2.17 + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java new file mode 100644 index 0000000000000000000000000000000000000000..79c0aacea740dcb6fca9780c7f64872c537c3225 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java @@ -0,0 +1,117 @@ +package com.taosdata.demo; + +import com.taosdata.demo.common.InsertTask; +import com.taosdata.demo.pool.C3p0Builder; +import com.taosdata.demo.pool.DbcpBuilder; +import com.taosdata.demo.pool.DruidPoolBuilder; +import com.taosdata.demo.pool.HikariCpBuilder; +import org.apache.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ConnectionPoolDemo { + + private static Logger logger = Logger.getLogger(DruidPoolBuilder.class); + private static final String dbName = "pool_test"; + + private static int batchSize = 10; + private static int sleep = 1000; + private static int poolSize = 50; + private static int tableSize = 1000; + private static int threadCount = 50; + private static String poolType = "hikari"; + + + public static void main(String[] args) throws InterruptedException { + String host = null; + for (int i = 0; i < args.length; i++) { + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-batchSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + batchSize = Integer.parseInt(args[++i]); + } + if ("-sleep".equalsIgnoreCase(args[i]) && i < args.length - 1) { + sleep = Integer.parseInt(args[++i]); + } + if ("-poolSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + poolSize = Integer.parseInt(args[++i]); + } + if ("-tableSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + tableSize = Integer.parseInt(args[++i]); + } + if ("-poolType".equalsIgnoreCase(args[i]) && i < args.length - 1) { + poolType = args[++i]; + } + } + if (host == null) { + System.out.println("Usage: java -jar XXX.jar " + + "-host " + + "-batchSize " + + "-sleep " + + "-poolSize " + + "-tableSize " + + "-poolType "); + return; + } + + DataSource dataSource; + switch (poolType) { + case "c3p0": + dataSource = C3p0Builder.getDataSource(host, poolSize); + break; + case "dbcp": + dataSource = DbcpBuilder.getDataSource(host, poolSize); + break; + case "druid": + dataSource = DruidPoolBuilder.getDataSource(host, poolSize); + break; + case "hikari": + default: + dataSource = HikariCpBuilder.getDataSource(host, poolSize); + poolType = "hikari"; + } + + logger.info(">>>>>>>>>>>>>> connection pool Type: " + poolType); + + init(dataSource); + + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + while (true) { + executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize)); + if (sleep > 0) + TimeUnit.MILLISECONDS.sleep(sleep); + } + } + + private static void init(DataSource dataSource) { + try (Connection conn = dataSource.getConnection()) { + execute(conn, "drop database if exists " + dbName + ""); + execute(conn, "create database if not exists " + dbName + ""); + execute(conn, "use " + dbName + ""); + execute(conn, "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"); + for (int tb_ind = 1; tb_ind <= tableSize; tb_ind++) { + execute(conn, "create table t_" + tb_ind + " using weather tags('beijing'," + (tb_ind + 1) + ")"); + } + logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>> init finished."); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private static void execute(Connection con, String sql) { + try (Statement stmt = con.createStatement()) { + stmt.executeUpdate(sql); + logger.info("SQL >>> " + sql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java new file mode 100644 index 0000000000000000000000000000000000000000..ed86acd6e9f8bfb8c862c1764e39f541d3f054eb --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java @@ -0,0 +1,77 @@ +package com.taosdata.demo.common; + +import org.apache.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Random; + +public class InsertTask implements Runnable { + private final Random random = new Random(System.currentTimeMillis()); + private static final Logger logger = Logger.getLogger(InsertTask.class); + + private final DataSource ds; + private final int batchSize; + private final String dbName; + private final int tableSize; + + public InsertTask(DataSource ds, String dbName, int tableSize, int batchSize) { + this.ds = ds; + this.dbName = dbName; + this.tableSize = tableSize; + this.batchSize = batchSize; + } + + @Override + public void run() { + Connection conn = null; + Statement stmt = null; + int affectedRows = 0; + + long start = System.currentTimeMillis(); + try { + conn = ds.getConnection(); + stmt = conn.createStatement(); + + for (int tb_index = 1; tb_index <= tableSize; tb_index++) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into "); + sb.append(dbName); + sb.append(".t_"); + sb.append(tb_index); + sb.append("(ts, temperature, humidity) values "); + for (int i = 0; i < batchSize; i++) { + sb.append("("); + sb.append(start + i); + sb.append(", "); + sb.append(random.nextFloat() * 30); + sb.append(", "); + sb.append(random.nextInt(70)); + sb.append(") "); + } + logger.info("SQL >>> " + sb.toString()); + affectedRows += stmt.executeUpdate(sb.toString()); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + logger.info(">>> affectedRows:" + affectedRows + " TimeCost:" + (System.currentTimeMillis() - start) + " ms"); + } + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java new file mode 100644 index 0000000000000000000000000000000000000000..587f417410f96f43be2ced5a4820cd49cdb99a17 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java @@ -0,0 +1,28 @@ +package com.taosdata.demo.pool; + +import com.mchange.v2.c3p0.ComboPooledDataSource; +import org.apache.commons.dbcp.BasicDataSource; + +import javax.sql.DataSource; +import java.beans.PropertyVetoException; + +public class C3p0Builder { + + public static DataSource getDataSource(String host, int poolSize) { + ComboPooledDataSource ds = new ComboPooledDataSource(); + + try { + ds.setDriverClass("com.taosdata.jdbc.TSDBDriver"); + } catch (PropertyVetoException e) { + e.printStackTrace(); + } + ds.setJdbcUrl("jdbc:TAOS://" + host + ":6030"); + ds.setUser("root"); + ds.setPassword("taosdata"); + + ds.setMinPoolSize(poolSize); + ds.setMaxPoolSize(poolSize); + ds.setAcquireIncrement(5); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..3c34a32532f595bf3134942094e96e952bd09dbb --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java @@ -0,0 +1,21 @@ +package com.taosdata.demo.pool; + +import org.apache.commons.dbcp.BasicDataSource; + +import javax.sql.DataSource; + +public class DbcpBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + BasicDataSource ds = new BasicDataSource(); + ds.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + ds.setUrl("jdbc:TAOS://" + host + ":6030"); + ds.setUsername("root"); + ds.setPassword("taosdata"); + + ds.setMaxActive(poolSize); + ds.setMinIdle(poolSize); + ds.setInitialSize(poolSize); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..e5dc14c6a5ef69c2a7059d5d78b621e25ff3d799 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java @@ -0,0 +1,31 @@ +package com.taosdata.demo.pool; + +import com.alibaba.druid.pool.DruidDataSource; + +import javax.sql.DataSource; + +public class DruidPoolBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + final String url = "jdbc:TAOS://" + host + ":6030"; + + DruidDataSource dataSource = new DruidDataSource(); + dataSource.setUrl(url); + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + + //初始连接数,默认0 + dataSource.setInitialSize(poolSize); + //最大连接数,默认8 + dataSource.setMaxActive(poolSize); + //最小闲置数 + dataSource.setMinIdle(poolSize); + //获取连接的最大等待时间,单位毫秒 + dataSource.setMaxWait(2000); + + return dataSource; + } + + +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..87f1f4ad2cbba41a779f0247f2214ef2bf04a8ca --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java @@ -0,0 +1,22 @@ +package com.taosdata.demo.pool; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +import javax.sql.DataSource; + +public class HikariCpBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + HikariConfig config = new HikariConfig(); + config.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + config.setJdbcUrl("jdbc:TAOS://" + host + ":6030"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMaximumPoolSize(poolSize); + config.setMinimumIdle(poolSize); + HikariDataSource ds = new HikariDataSource(config); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties b/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties new file mode 100644 index 0000000000000000000000000000000000000000..1299357be3d2e99ca6b79227f14ca7a587718914 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties @@ -0,0 +1,21 @@ +### 设置### +log4j.rootLogger=debug,stdout,DebugLog,ErrorLog +### 输出信息到控制抬 ### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n +### 输出DEBUG 级别以上的日志到=logs/debug.log +log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DebugLog.File=logs/debug.log +log4j.appender.DebugLog.Append=true +log4j.appender.DebugLog.Threshold=DEBUG +log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout +log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n +### 输出ERROR 级别以上的日志到=logs/error.log +log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.ErrorLog.File=logs/error.log +log4j.appender.ErrorLog.Append=true +log4j.appender.ErrorLog.Threshold=ERROR +log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/.gitignore b/tests/examples/JDBC/mybatisplus-demo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b56f1dd0d04da4e03f710af3917e4fbdd9be4aa8 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.gitignore @@ -0,0 +1,33 @@ +README.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000000000000000000000000000000000..a45eb6ba269cd38f8965cef786729790945d9537 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..2cc7d4a55c0cd0092912bf49ae38b3a9e3fd0054 Binary files /dev/null and b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000000000000000000000000000000000..642d572ce90e5085986bdd9c9204b9404f028084 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/tests/examples/JDBC/mybatisplus-demo/mvnw b/tests/examples/JDBC/mybatisplus-demo/mvnw new file mode 100755 index 0000000000000000000000000000000000000000..3c8a5537314954d53ec2fb774b34fe5d5a5f253a --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/mvnw @@ -0,0 +1,322 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="$(/usr/libexec/java_home)" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +if [ -z "$M2_HOME" ]; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ]; do + ls=$(ls -ld "$PRG") + link=$(expr "$ls" : '.*-> \(.*\)$') + if expr "$link" : '/.*' >/dev/null; then + PRG="$link" + else + PRG="$(dirname "$PRG")/$link" + fi + done + + saveddir=$(pwd) + + M2_HOME=$(dirname "$PRG")/.. + + # make it fully qualified + M2_HOME=$(cd "$M2_HOME" && pwd) + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --unix "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$M2_HOME" ] && + M2_HOME="$( ( + cd "$M2_HOME" + pwd + ))" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="$( ( + cd "$JAVA_HOME" + pwd + ))" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then + if $darwin; then + javaHome="$(dirname \"$javaExecutable\")" + javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac" + else + javaExecutable="$(readlink -f \"$javaExecutable\")" + fi + javaHome="$(dirname \"$javaExecutable\")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(which java)" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." + pwd + ) + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' <"$1")" + fi +} + +BASE_DIR=$(find_maven_basedir "$(pwd)") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in wrapperUrl) + jarUrl="$value" + break + ;; + esac + done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --path --windows "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd b/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd new file mode 100644 index 0000000000000000000000000000000000000000..c8d43372c986d97911cdc21bd87e0cbe3d83bdda --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..8535f3b797dcf13bd47d968f735ba5e7873fad51 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml @@ -0,0 +1,101 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.4.0 + + + com.taosdata.example + mybatisplus-demo + 0.0.1-SNAPSHOT + mybatisplus-demo + Demo project for tdengine + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter + + + org.projectlombok + lombok + true + + + com.baomidou + mybatis-plus-boot-starter + 3.1.2 + + + com.h2database + h2 + runtime + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.11 + + + + + mysql + mysql-connector-java + 5.1.47 + + + org.springframework.boot + spring-boot-starter-web + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.springframework.boot + spring-boot-starter-test + test + + + junit + junit + 4.12 + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.17 + + + **/*Test.java + + + **/Abstract*.java + + + + + + + + diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java new file mode 100644 index 0000000000000000000000000000000000000000..7aaebca0846c15c2055596c95ae76d0cee773e41 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +@MapperScan("com.taosdata.example.mybatisplusdemo.mapper") +public class MybatisplusDemoApplication { + + public static void main(String[] args) { + SpringApplication.run(MybatisplusDemoApplication.class, args); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..a6ac7f7fc247a361286333de4b3c03ffba306336 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java @@ -0,0 +1,34 @@ +package com.taosdata.example.mybatisplusdemo.config; + +import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class MybatisPlusConfig { + + + /** mybatis 3.4.1 pagination config start ***/ +// @Bean +// public MybatisPlusInterceptor mybatisPlusInterceptor() { +// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor(); +// interceptor.addInnerInterceptor(new PaginationInnerInterceptor()); +// return interceptor; +// } + +// @Bean +// public ConfigurationCustomizer configurationCustomizer() { +// return configuration -> configuration.setUseDeprecatedExecutor(false); +// } + + @Bean + public PaginationInterceptor paginationInterceptor() { +// return new PaginationInterceptor(); + PaginationInterceptor paginationInterceptor = new PaginationInterceptor(); + //TODO: mybatis-plus do not support TDengine, use postgresql Dialect + paginationInterceptor.setDialectType("postgresql"); + + return paginationInterceptor; + } + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java new file mode 100644 index 0000000000000000000000000000000000000000..97e50b06f6b71c26d1edd65c5ae9e7ff29a03e4d --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Temperature { + + private Timestamp ts; + private float temperature; + private String location; + private int tbIndex; + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java new file mode 100644 index 0000000000000000000000000000000000000000..361757411a15d29e742a07a92060e20190921223 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Weather { + + private Timestamp ts; + private float temperature; + private int humidity; + private String location; + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..3e122524d57b5a54e08ff1cfc54101d517f32c32 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java @@ -0,0 +1,23 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Temperature; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Update; + +public interface TemperatureMapper extends BaseMapper { + + @Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)") + int createSuperTable(); + + @Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})") + int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex); + + @Update("drop table if exists temperature") + void dropSuperTable(); + + @Insert("insert into t${tbIndex}(ts, temperature) values(#{ts}, #{temperature})") + int insertOne(Temperature one); + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..6733cbded9d1d180408eccaad9e8badad7d39a3d --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java @@ -0,0 +1,8 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Weather; + +public interface WeatherMapper extends BaseMapper { + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml new file mode 100644 index 0000000000000000000000000000000000000000..96667f28b8d45d74541609f3d44176534c609f23 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -0,0 +1,34 @@ +spring: + datasource: + # driver-class-name: org.h2.Driver + # schema: classpath:db/schema-mysql.sql + # data: classpath:db/data-mysql.sql + # url: jdbc:h2:mem:test + # username: root + # password: test + + # driver-class-name: com.mysql.jdbc.Driver + # url: jdbc:mysql://master:3306/test?useSSL=false + # username: root + # password: 123456 + + driver-class-name: com.taosdata.jdbc.TSDBDriver + url: jdbc:TAOS://localhost:6030/mp_test + user: root + password: taosdata + charset: UTF-8 + locale: en_US.UTF-8 + timezone: UTC-8 + +mybatis-plus: + configuration: + map-underscore-to-camel-case: false + +logging: + level: + com: + taosdata: + example: + mybatisplusdemo: + mapper: debug + diff --git a/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4331d15d3476d3428e72a186664ed77cc59aad3e --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java @@ -0,0 +1,140 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Temperature; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class TemperatureMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"}; + + @Before + public void before() { + mapper.dropSuperTable(); + // create table temperature + mapper.createSuperTable(); + // create table t_X using temperature + for (int i = 0; i < 10; i++) { + mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i); + } + // insert into table + int affectRows = 0; + // insert 10 tables + for (int i = 0; i < 10; i++) { + // each table insert 5 rows + for (int j = 0; j < 5; j++) { + Temperature one = new Temperature(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setLocation("望京"); + one.setTbIndex(i); + affectRows += mapper.insertOne(one); + } + } + Assert.assertEquals(50, affectRows); + } + + @After + public void after() { + mapper.dropSuperTable(); + } + + @Autowired + private TemperatureMapper mapper; + + /*** + * test SelectList + * **/ + @Test + public void testSelectList() { + List temperatureList = mapper.selectList(null); + temperatureList.forEach(System.out::println); + } + + /*** + * test InsertOne which is a custom metheod + * ***/ + @Test + public void testInsert() { + Temperature one = new Temperature(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setLocation("望京"); + int affectRows = mapper.insertOne(one); + Assert.assertEquals(1, affectRows); + } + + /*** + * test SelectOne + * **/ + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "beijing"); + Temperature one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertNotNull(one); + } + + /*** + * test select By map + * ***/ + @Test + public void testSelectByMap() { + Map map = new HashMap<>(); + map.put("location", "beijing"); + List temperatures = mapper.selectByMap(map); + Assert.assertEquals(1, temperatures.size()); + } + + /*** + * test selectObjs + * **/ + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + /** + * test selectC ount + * **/ + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); + Assert.assertEquals(5, count); + } + + /**** + * 分页 + */ + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage temperatureIPage = mapper.selectPage(page, null); + System.out.println("total : " + temperatureIPage.getTotal()); + System.out.println("pages : " + temperatureIPage.getPages()); + for (Temperature temperature : temperatureIPage.getRecords()) { + System.out.println(temperature); + } + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1699344552f89e1595d1317019c992dcd3820e77 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java @@ -0,0 +1,88 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class WeatherMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + + @Autowired + private WeatherMapper mapper; + + @Test + public void testSelectList() { + List weathers = mapper.selectList(null); + weathers.forEach(System.out::println); + } + + @Test + public void testInsert() { + Weather one = new Weather(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setHumidity(random.nextInt(100)); + one.setLocation("望京"); + int affectRows = mapper.insert(one); + Assert.assertEquals(1, affectRows); + } + + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "beijing"); + Weather one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertEquals(12.22f, one.getTemperature(), 0.00f); + Assert.assertEquals("beijing", one.getLocation()); + } + + @Test + public void testSelectByMap() { + Map map = new HashMap<>(); + map.put("location", "beijing"); + List weathers = mapper.selectByMap(map); + Assert.assertEquals(1, weathers.size()); + } + + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); +// Assert.assertEquals(5, count); + System.out.println(count); + } + + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage weatherIPage = mapper.selectPage(page, null); + System.out.println("total : " + weatherIPage.getTotal()); + System.out.println("pages : " + weatherIPage.getPages()); + for (Weather weather : weatherIPage.getRecords()) { + System.out.println(weather); + } + } + +} \ No newline at end of file diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 759e16d1de285c9490d7f1af7a94de81dbadb6e4..be60a88ad70721bd6281d9ca8f1d73263a788532 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -9,26 +9,40 @@ static void prepare_data(TAOS* taos) { - taos_query(taos, "drop database if exists test;"); + TAOS_RES *result; + result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); usleep(100000); - taos_query(taos, "create database test;"); + result = taos_query(taos, "create database test;"); + taos_free_result(result); usleep(100000); taos_select_db(taos, "test"); - taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);"); + result = taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);"); + taos_free_result(result); - taos_query(taos, "create table t0 using meters tags(0);"); - taos_query(taos, "create table t1 using meters tags(1);"); - taos_query(taos, "create table t2 using meters tags(2);"); - taos_query(taos, "create table t3 using meters tags(3);"); - taos_query(taos, "create table t4 using meters tags(4);"); - taos_query(taos, "create table t5 using meters tags(5);"); - taos_query(taos, "create table t6 using meters tags(6);"); - taos_query(taos, "create table t7 using meters tags(7);"); - taos_query(taos, "create table t8 using meters tags(8);"); - taos_query(taos, "create table t9 using meters tags(9);"); + result = taos_query(taos, "create table t0 using meters tags(0);"); + taos_free_result(result); + result = taos_query(taos, "create table t1 using meters tags(1);"); + taos_free_result(result); + result = taos_query(taos, "create table t2 using meters tags(2);"); + taos_free_result(result); + result = taos_query(taos, "create table t3 using meters tags(3);"); + taos_free_result(result); + result = taos_query(taos, "create table t4 using meters tags(4);"); + taos_free_result(result); + result = taos_query(taos, "create table t5 using meters tags(5);"); + taos_free_result(result); + result = taos_query(taos, "create table t6 using meters tags(6);"); + taos_free_result(result); + result = taos_query(taos, "create table t7 using meters tags(7);"); + taos_free_result(result); + result = taos_query(taos, "create table t8 using meters tags(8);"); + taos_free_result(result); + result = taos_query(taos, "create table t9 using meters tags(9);"); + taos_free_result(result); - TAOS_RES* res = taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0)" + result = taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0)" " ('2020-01-01 00:01:00.000', 0)" " ('2020-01-01 00:02:00.000', 0)" " t1 values('2020-01-01 00:00:00.000', 0)" @@ -46,10 +60,11 @@ static void prepare_data(TAOS* taos) { " t7 values('2020-01-01 00:01:02.000', 0)" " t8 values('2020-01-01 00:01:02.000', 0)" " t9 values('2020-01-01 00:01:02.000', 0)"); - int affected = taos_affected_rows(res); + int affected = taos_affected_rows(result); if (affected != 18) { printf("\033[31m%d rows affected by last insert statement, but it should be 18\033[0m\n", affected); } + taos_free_result(result); // super tables subscription usleep(1000000); } @@ -135,6 +150,7 @@ static void verify_query(TAOS* taos) { res = taos_query(taos, "select * from meters"); taos_stop_query(res); + taos_free_result(res); } @@ -153,23 +169,30 @@ static void verify_subscribe(TAOS* taos) { res = taos_consume(tsub); check_row_count(__LINE__, res, 0); - taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);"); - taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);"); + TAOS_RES *result; + result = taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);"); + taos_free_result(result); + result = taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 2); - taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);"); - taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);"); + result = taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);"); + taos_free_result(result); + result = taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 2); - taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);"); + result = taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 1); // keep progress information and restart subscription taos_unsubscribe(tsub, 1); - taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);"); + taos_free_result(result); tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); res = taos_consume(tsub); check_row_count(__LINE__, res, 24); @@ -196,7 +219,8 @@ static void verify_subscribe(TAOS* taos) { res = taos_consume(tsub); check_row_count(__LINE__, res, 0); - taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 1); @@ -205,7 +229,8 @@ static void verify_subscribe(TAOS* taos) { int blockFetch = 0; tsub = taos_subscribe(taos, 1, "test", "select * from meters;", subscribe_callback, &blockFetch, 1000); usleep(2000000); - taos_query(taos, "insert into t0 values('2020-01-01 00:05:00.001', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:05:00.001', 0);"); + taos_free_result(result); usleep(2000000); taos_unsubscribe(tsub, 0); } @@ -213,8 +238,9 @@ static void verify_subscribe(TAOS* taos) { void verify_prepare(TAOS* taos) { TAOS_RES* result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); usleep(100000); - taos_query(taos, "create database test;"); + result = taos_query(taos, "create database test;"); int code = taos_errno(result); if (code != 0) { @@ -429,7 +455,8 @@ void verify_stream(TAOS* taos) { NULL); printf("waiting for stream data\n"); usleep(100000); - taos_query(taos, "insert into t0 values(now, 0)(now+5s,1)(now+10s, 2);"); + TAOS_RES* result = taos_query(taos, "insert into t0 values(now, 0)(now+5s,1)(now+10s, 2);"); + taos_free_result(result); usleep(200000000); taos_close_stream(strm); } diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index 1e523bd7fec9ba9fd90d1a71949f40c7be71384d..c6cc89b31d6280c45ea30b33509eed5ebdf0dc08 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -46,6 +46,35 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code); void taos_select_call_back(void *param, TAOS_RES *tres, int code); void taos_error(TAOS *taos); +static void queryDB(TAOS *taos, char *command) { + int i; + TAOS_RES *pSql = NULL; + int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != pSql) { + taos_free_result(pSql); + pSql = NULL; + } + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + taos_cleanup(); + exit(EXIT_FAILURE); + } + + taos_free_result(pSql); +} + int main(int argc, char *argv[]) { TAOS *taos; @@ -78,16 +107,14 @@ int main(int argc, char *argv[]) printf("success to connect to server\n"); - sprintf(sql, "drop database %s", db); - taos_query(taos, sql); + sprintf(sql, "drop database if exists %s", db); + queryDB(taos, sql); sprintf(sql, "create database %s", db); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); sprintf(sql, "use %s", db); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); strcpy(prefix, "asytbl_"); for (i = 0; i < numOfTables; ++i) { @@ -95,8 +122,7 @@ int main(int argc, char *argv[]) tableList[i].taos = taos; sprintf(tableList[i].name, "%s%d", prefix, i); sprintf(sql, "create table %s%d (ts timestamp, volume bigint)", prefix, i); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); } gettimeofday(&systemTime, NULL); @@ -151,6 +177,7 @@ void taos_error(TAOS *con) { fprintf(stderr, "TDengine error: %s\n", taos_errstr(con)); taos_close(con); + taos_cleanup(); exit(1); } @@ -186,6 +213,8 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code) printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables); } } + + taos_free_result(tres); } void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) @@ -197,7 +226,7 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) for (int i = 0; iname, numOfRows); - taos_free_result(tres); + //taos_free_result(tres); printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name); tablesProcessed++; @@ -221,6 +250,8 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables); } } + + taos_free_result(tres); } void taos_select_call_back(void *param, TAOS_RES *tres, int code) @@ -236,6 +267,10 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code) } else { printf("%s select failed, code:%d\n", pTable->name, code); + taos_free_result(tres); + taos_cleanup(); exit(1); } + + taos_free_result(tres); } diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 8f8a66a32593bc25d71b554808719ca42f5b32ac..74a49288e9a1aa7081db45925bf52d6516e4801a 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -22,10 +22,38 @@ #include #include // TAOS header file +static void queryDB(TAOS *taos, char *command) { + int i; + TAOS_RES *pSql = NULL; + int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != pSql) { + taos_free_result(pSql); + pSql = NULL; + } + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + exit(EXIT_FAILURE); + } + + taos_free_result(pSql); +} + +void Test(char *qstr, const char *input, int i); + int main(int argc, char *argv[]) { - TAOS * taos; char qstr[1024]; - TAOS_RES *result; // connect to server if (argc < 2) { @@ -35,37 +63,26 @@ int main(int argc, char *argv[]) { // init TAOS taos_init(); - - taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + for (int i = 0; i < 4000000; i++) { + Test(qstr, argv[1], i); + } + taos_cleanup(); +} +void Test(char *qstr, const char *input, int index) { + TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0); + printf("==================test at %d\n================================", index); + queryDB(taos, "drop database if exists demo"); + queryDB(taos, "create database demo"); + TAOS_RES *result; if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } - printf("success to connect to server\n"); - - - taos_query(taos, "drop database demo"); - - result = taos_query(taos, "create database demo"); - if (result == NULL) { - printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/); - exit(1); - } - printf("success to create database\n"); - - taos_query(taos, "use demo"); + queryDB(taos, "use demo"); - // create table - if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) { - printf("failed to create table, reason:%s\n", taos_errstr(result)); - exit(1); - } + queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"); printf("success to create table\n"); - // sleep for one second to make sure table is created on data node - // taosMsleep(1000); - - // insert 10 records int i = 0; for (i = 0; i < 10; ++i) { sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello"); @@ -80,10 +97,11 @@ int main(int argc, char *argv[]) { printf("insert row: %i\n", i); } else { printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/); + taos_free_result(result); exit(1); } + taos_free_result(result); - //sleep(1); } printf("success to insert rows, total %d rows\n", i); @@ -91,7 +109,8 @@ int main(int argc, char *argv[]) { sprintf(qstr, "SELECT * FROM m1"); result = taos_query(taos, qstr); if (result == NULL || taos_errno(result) != 0) { - printf("failed to select, reason:%s\n", taos_errstr(result)); + printf("failed to select, reason:%s\n", taos_errstr(result)); + taos_free_result(result); exit(1); } @@ -112,5 +131,6 @@ int main(int argc, char *argv[]) { taos_free_result(result); printf("====demo end====\n\n"); - return getchar(); + taos_close(taos); } + diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go index b42e1e6d703a96bb86454f177a7207577c6d4d4c..2c3a7d09b68d84feea1ae2771b90643dbbfbc063 100644 --- a/tests/examples/go/taosdemo.go +++ b/tests/examples/go/taosdemo.go @@ -87,7 +87,7 @@ func init() { func printAllArgs() { fmt.Printf("\n============= args parse result: =============\n") - fmt.Printf("dbName: %v\n", configPara.hostName) + fmt.Printf("hostName: %v\n", configPara.hostName) fmt.Printf("serverPort: %v\n", configPara.serverPort) fmt.Printf("usr: %v\n", configPara.user) fmt.Printf("password: %v\n", configPara.password) @@ -107,7 +107,7 @@ func main() { fmt.Scanln() url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" - //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) + //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) // open connect to taos server //db, err := sql.Open(taosDriverName, url) //if err != nil { @@ -115,6 +115,7 @@ func main() { // os.Exit(1) //} //defer db.Close() + rand.Seed(time.Now().Unix()) createDatabase(configPara.dbName, configPara.supTblName) fmt.Printf("======== create database success! ========\n\n") diff --git a/tests/examples/nodejs/README-win.md b/tests/examples/nodejs/README-win.md new file mode 100644 index 0000000000000000000000000000000000000000..75fec69413af2bb49498118ec7235c9947e2f89e --- /dev/null +++ b/tests/examples/nodejs/README-win.md @@ -0,0 +1,200 @@ +# 如何在windows上使用nodejs进行TDengine应用开发 + +## 环境准备 + +(1)安装nodejs-10.22.0 + +下载链接:https://nodejs.org/dist/v10.22.0/node-v10.22.0-win-x64.zip +解压安装,把node配置到环境变量里 + +cmd启动命令行,查看node的版本 + +```shell +> node.exe --version +v10.22.0 + +> npm --version +6.14.6 +``` + + + +(2)安装python2.7 + +下载链接:https://www.python.org/ftp/python/2.7.18/python-2.7.18.amd64.msi + +查看python版本 + +```shell +>python --version +Python 2.7.18 +``` + + +(3)安装TDengine-client + +下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致) + +使用client的taos shell连接server + +```shell +>taos -h node5 + +Welcome to the TDengine shell from Linux, Client Version:2.0.6.0 +Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +============================================================================================================================================ + 1 | node5:6030 | 7 | 1 | ready | any | 2020-10-26 09:45:26.308 | | +Query OK, 1 row(s) in set (0.036000s) +``` + +注意: +* 检查能否在client的机器上ping通server的fqdn +* 如果你的dns server并没有提供到server的域名解析,可以将server的hostname配置到client的hosts文件中 + + +## 应用开发 + +(1)建立nodejs项目 + +``` +npm init +``` + +(2)安装windows-build-tools +``` +npm install --global --production windows-build-tools +``` + +(3)安装td2.0-connector驱动 + +``` tdshell +npm install td2.0-connector +``` + +(4)nodejs访问tdengine的示例程序 + +```javascript +const taos = require('td2.0-connector'); + +var host = null; +var port = 6030; +for (var i = 2; i < global.process.argv.length; i++) { + var key = global.process.argv[i].split("=")[0]; + var value = global.process.argv[i].split("=")[1]; + + if ("host" == key) { + host = value; + } + if ("port" == key) { + port = value; + } +} + +if (host == null) { + console.log("Usage: node nodejsChecker.js host= port="); + process.exit(0); +} + +// establish connection +var conn = taos.connect({host: host, user: "root", password: "taosdata", port: port}); +var cursor = conn.cursor(); +// create database +executeSql("create database if not exists testnodejs", 0); +// use db +executeSql("use testnodejs", 0); +// drop table +executeSql("drop table if exists testnodejs.weather", 0); +// create table +executeSql("create table if not exists testnodejs.weather(ts timestamp, temperature float, humidity int)", 0); +// insert +executeSql("insert into testnodejs.weather (ts, temperature, humidity) values(now, 20.5, 34)", 1); +// select +executeQuery("select * from testnodejs.weather"); +// close connection +conn.close(); + +function executeQuery(sql) { + var start = new Date().getTime(); + var promise = cursor.query(sql, true); + var end = new Date().getTime(); + promise.then(function (result) { + printSql(sql, result != null, (end - start)); + result.pretty(); + }); +} + +function executeSql(sql, affectRows) { + var start = new Date().getTime(); + var promise = cursor.execute(sql); + var end = new Date().getTime(); + printSql(sql, promise == affectRows, (end - start)); +} + +function printSql(sql, succeed, cost) { + console.log("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); +} +``` + +(5)测试nodejs程序 + +```shell +>node nodejsChecker.js +Usage: node nodejsChecker.js host= port= +# 提示指定host + +>node nodejsChecker.js host=node5 +Successfully connected to TDengine +Query OK, 0 row(s) affected (0.00997610s) +[ OK ] time cost: 14 ms, execute statement ====> create database if not exists testnodejs +Query OK, 0 row(s) affected (0.00235920s) +[ OK ] time cost: 4 ms, execute statement ====> use testnodejs +Query OK, 0 row(s) affected (0.06604280s) +[ OK ] time cost: 67 ms, execute statement ====> drop table if exists testnodejs.weather +Query OK, 0 row(s) affected (0.59403290s) +[ OK ] time cost: 595 ms, execute statement ====> create table if not exists testnodejs.weather(ts timestamp, temperature float, humidity int) +Query OK, 1 row(s) affected (0.01058950s) +[ OK ] time cost: 12 ms, execute statement ====> insert into testnodejs.weather (ts, temperature, humidity) values(now, 20.5, 34) +Query OK, 1 row(s) in set (0.00401490s) +[ OK ] time cost: 10 ms, execute statement ====> select * from testnodejs.weather +Connection is closed + + ts | temperature | humidity | +===================================================================== +2020-10-27 18:49:15.547 | 20.5 | 34 | +``` + +## 指南 + +### 如何设置主机名和hosts + +在server上查看hostname和fqdn +```shell +查看hostname +# hostname +taos-server + +查看fqdn +# hostname -f +taos-server +``` + +windows下hosts文件位于: +C:\\Windows\System32\drivers\etc\hosts +修改hosts文件,添加server的ip和hostname + +``` +192.168.56.101 node5 +``` + +> 什么是FQDN? +> +> FQDN(Full qualified domain name)全限定域名,fqdn由2部分组成:hostname+domainname。 +> +> 例如,一个邮件服务器的fqdn可能是:mymail.somecollege.edu,其中mymail是hostname(主机名),somcollege.edu是domainname(域名)。本例中,.edu是顶级域名,.somecollege是二级域名。 +> +> 当连接服务器时,必须指定fqdn,然后,dns服务器通过查看dns表,将hostname解析为相应的ip地址。如果只指定hostname(不指定domainname),应用程序可能服务解析主机名。因为如果你试图访问不在本地的远程服务器时,本地的dns服务器和可能没有远程服务器的hostname列表。 +> +> 参考:https://kb.iu.edu/d/aiuv diff --git a/tests/examples/nodejs/nodejsChecker.js b/tests/examples/nodejs/nodejsChecker.js index c77944f75243a50e6e2c738e659cb4e64f3e5574..f838d5cc8465dba70b5372a5d7720a8cff69544a 100644 --- a/tests/examples/nodejs/nodejsChecker.js +++ b/tests/examples/nodejs/nodejsChecker.js @@ -42,8 +42,8 @@ function executeQuery(sql){ var start = new Date().getTime(); var promise = cursor.query(sql, true); var end = new Date().getTime(); - printSql(sql, promise != null,(end - start)); promise.then(function(result){ + printSql(sql, result != null,(end - start)); result.pretty(); }); } diff --git a/tests/examples/rust/.gitignore b/tests/examples/rust/.gitignore deleted file mode 100644 index 693699042b1a8ccf697636d3cd34b200f3a8278b..0000000000000000000000000000000000000000 --- a/tests/examples/rust/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/target -**/*.rs.bk -Cargo.lock diff --git a/tests/examples/rust/Cargo.toml b/tests/examples/rust/Cargo.toml deleted file mode 100644 index c9cff73bc0d510c534467973f9f16e7413d25b04..0000000000000000000000000000000000000000 --- a/tests/examples/rust/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "tdengine" -version = "0.1.0" -authors = ["Chunhua Jiang "] -edition = "2018" - -[dependencies] diff --git a/tests/examples/rust/README.md b/tests/examples/rust/README.md deleted file mode 100644 index 2ef8901ad6d30c0f33740e40662f1e9860dbb21a..0000000000000000000000000000000000000000 --- a/tests/examples/rust/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# TDengine driver connector for Rust - -It's a rust implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. - -## Dependencies -- Rust: -``` -curl https://sh.rustup.rs -sSf | sh -``` - -## Run with Sample - -Build and run basic sample: -``` -cargo run --example demo -``` -Build and run subscribe sample: -``` -cargo run --example subscribe -``` diff --git a/tests/examples/rust/build.rs b/tests/examples/rust/build.rs deleted file mode 100644 index f7276d3ef67e0261291d116cfc595c306ebd4969..0000000000000000000000000000000000000000 --- a/tests/examples/rust/build.rs +++ /dev/null @@ -1,10 +0,0 @@ -// build.rs - -use std::env; - -fn main() { - let project_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - - println!("cargo:rustc-link-search={}", project_dir); // the "-L" flag - println!("cargo:rustc-link-lib=taos"); // the "-l" flag -} diff --git a/tests/examples/rust/examples/demo.rs b/tests/examples/rust/examples/demo.rs deleted file mode 100644 index 182e46c8db4aa5ba68827309937320ef1e9b13e6..0000000000000000000000000000000000000000 --- a/tests/examples/rust/examples/demo.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::process; -use tdengine::Tdengine; - -fn main() { - let tde = Tdengine::new("127.0.0.1", "root", "taosdata", "demo", 0) - .unwrap_or_else(|err| { - eprintln!("Can't create Tdengine: {}", err); - process::exit(1) - }); - - tde.query("drop database demo"); - tde.query("create database demo"); - tde.query("use demo"); - tde.query("create table m1 (ts timestamp, speed int)"); - - for i in 0..10 { - tde.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str()); - } -} diff --git a/tests/examples/rust/examples/subscribe.rs b/tests/examples/rust/examples/subscribe.rs deleted file mode 100644 index 3255e36ee7cdc9af3d9233ef7accfd6669b912e2..0000000000000000000000000000000000000000 --- a/tests/examples/rust/examples/subscribe.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::process; -use tdengine::Subscriber; - -fn main() { - let subscriber = Subscriber::new("127.0.0.1", "root", "taosdata", "demo", "m1", 0, 1000) - .unwrap_or_else(|err| { - eprintln!("Can't create Subscriber: {}", err); - process::exit(1) - }); - - loop { - let row = subscriber.consume().unwrap_or_else(|err| { - eprintln!("consume exit: {}", err); - process::exit(1) - }); - - subscriber.print_row(&row); - } -} diff --git a/tests/examples/rust/src/bindings.rs b/tests/examples/rust/src/bindings.rs deleted file mode 100644 index fc13647130995b2a85b485236ec9a7ba30c1cc1b..0000000000000000000000000000000000000000 --- a/tests/examples/rust/src/bindings.rs +++ /dev/null @@ -1,332 +0,0 @@ -/* automatically generated by rust-bindgen */ -#![allow(unused)] -#![allow(non_camel_case_types)] - -pub const _STDINT_H: u32 = 1; -pub const _FEATURES_H: u32 = 1; -pub const _DEFAULT_SOURCE: u32 = 1; -pub const __USE_ISOC11: u32 = 1; -pub const __USE_ISOC99: u32 = 1; -pub const __USE_ISOC95: u32 = 1; -pub const __USE_POSIX_IMPLICITLY: u32 = 1; -pub const _POSIX_SOURCE: u32 = 1; -pub const _POSIX_C_SOURCE: u32 = 200809; -pub const __USE_POSIX: u32 = 1; -pub const __USE_POSIX2: u32 = 1; -pub const __USE_POSIX199309: u32 = 1; -pub const __USE_POSIX199506: u32 = 1; -pub const __USE_XOPEN2K: u32 = 1; -pub const __USE_XOPEN2K8: u32 = 1; -pub const _ATFILE_SOURCE: u32 = 1; -pub const __USE_MISC: u32 = 1; -pub const __USE_ATFILE: u32 = 1; -pub const __USE_FORTIFY_LEVEL: u32 = 0; -pub const _STDC_PREDEF_H: u32 = 1; -pub const __STDC_IEC_559__: u32 = 1; -pub const __STDC_IEC_559_COMPLEX__: u32 = 1; -pub const __STDC_ISO_10646__: u32 = 201505; -pub const __STDC_NO_THREADS__: u32 = 1; -pub const __GNU_LIBRARY__: u32 = 6; -pub const __GLIBC__: u32 = 2; -pub const __GLIBC_MINOR__: u32 = 23; -pub const _SYS_CDEFS_H: u32 = 1; -pub const __WORDSIZE: u32 = 64; -pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1; -pub const __SYSCALL_WORDSIZE: u32 = 64; -pub const _BITS_WCHAR_H: u32 = 1; -pub const INT8_MIN: i32 = -128; -pub const INT16_MIN: i32 = -32768; -pub const INT32_MIN: i32 = -2147483648; -pub const INT8_MAX: u32 = 127; -pub const INT16_MAX: u32 = 32767; -pub const INT32_MAX: u32 = 2147483647; -pub const UINT8_MAX: u32 = 255; -pub const UINT16_MAX: u32 = 65535; -pub const UINT32_MAX: u32 = 4294967295; -pub const INT_LEAST8_MIN: i32 = -128; -pub const INT_LEAST16_MIN: i32 = -32768; -pub const INT_LEAST32_MIN: i32 = -2147483648; -pub const INT_LEAST8_MAX: u32 = 127; -pub const INT_LEAST16_MAX: u32 = 32767; -pub const INT_LEAST32_MAX: u32 = 2147483647; -pub const UINT_LEAST8_MAX: u32 = 255; -pub const UINT_LEAST16_MAX: u32 = 65535; -pub const UINT_LEAST32_MAX: u32 = 4294967295; -pub const INT_FAST8_MIN: i32 = -128; -pub const INT_FAST16_MIN: i64 = -9223372036854775808; -pub const INT_FAST32_MIN: i64 = -9223372036854775808; -pub const INT_FAST8_MAX: u32 = 127; -pub const INT_FAST16_MAX: u64 = 9223372036854775807; -pub const INT_FAST32_MAX: u64 = 9223372036854775807; -pub const UINT_FAST8_MAX: u32 = 255; -pub const UINT_FAST16_MAX: i32 = -1; -pub const UINT_FAST32_MAX: i32 = -1; -pub const INTPTR_MIN: i64 = -9223372036854775808; -pub const INTPTR_MAX: u64 = 9223372036854775807; -pub const UINTPTR_MAX: i32 = -1; -pub const PTRDIFF_MIN: i64 = -9223372036854775808; -pub const PTRDIFF_MAX: u64 = 9223372036854775807; -pub const SIG_ATOMIC_MIN: i32 = -2147483648; -pub const SIG_ATOMIC_MAX: u32 = 2147483647; -pub const SIZE_MAX: i32 = -1; -pub const WINT_MIN: u32 = 0; -pub const WINT_MAX: u32 = 4294967295; -pub const TSDB_DATA_TYPE_NULL: u32 = 0; -pub const TSDB_DATA_TYPE_BOOL: u32 = 1; -pub const TSDB_DATA_TYPE_TINYINT: u32 = 2; -pub const TSDB_DATA_TYPE_SMALLINT: u32 = 3; -pub const TSDB_DATA_TYPE_INT: u32 = 4; -pub const TSDB_DATA_TYPE_BIGINT: u32 = 5; -pub const TSDB_DATA_TYPE_FLOAT: u32 = 6; -pub const TSDB_DATA_TYPE_DOUBLE: u32 = 7; -pub const TSDB_DATA_TYPE_BINARY: u32 = 8; -pub const TSDB_DATA_TYPE_TIMESTAMP: u32 = 9; -pub const TSDB_DATA_TYPE_NCHAR: u32 = 10; -pub type int_least8_t = ::std::os::raw::c_schar; -pub type int_least16_t = ::std::os::raw::c_short; -pub type int_least32_t = ::std::os::raw::c_int; -pub type int_least64_t = ::std::os::raw::c_long; -pub type uint_least8_t = ::std::os::raw::c_uchar; -pub type uint_least16_t = ::std::os::raw::c_ushort; -pub type uint_least32_t = ::std::os::raw::c_uint; -pub type uint_least64_t = ::std::os::raw::c_ulong; -pub type int_fast8_t = ::std::os::raw::c_schar; -pub type int_fast16_t = ::std::os::raw::c_long; -pub type int_fast32_t = ::std::os::raw::c_long; -pub type int_fast64_t = ::std::os::raw::c_long; -pub type uint_fast8_t = ::std::os::raw::c_uchar; -pub type uint_fast16_t = ::std::os::raw::c_ulong; -pub type uint_fast32_t = ::std::os::raw::c_ulong; -pub type uint_fast64_t = ::std::os::raw::c_ulong; -pub type intmax_t = ::std::os::raw::c_long; -pub type uintmax_t = ::std::os::raw::c_ulong; -pub const TSDB_OPTION_TSDB_OPTION_LOCALE: TSDB_OPTION = 0; -pub const TSDB_OPTION_TSDB_OPTION_CHARSET: TSDB_OPTION = 1; -pub const TSDB_OPTION_TSDB_OPTION_TIMEZONE: TSDB_OPTION = 2; -pub const TSDB_OPTION_TSDB_OPTION_CONFIGDIR: TSDB_OPTION = 3; -pub const TSDB_OPTION_TSDB_OPTION_SHELL_ACTIVITY_TIMER: TSDB_OPTION = 4; -pub const TSDB_OPTION_TSDB_MAX_OPTIONS: TSDB_OPTION = 5; -pub type TSDB_OPTION = u32; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct taosField { - pub name: [::std::os::raw::c_char; 64usize], - pub bytes: ::std::os::raw::c_short, - pub type_: ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout_taosField() { - assert_eq!( - ::std::mem::size_of::(), - 68usize, - concat!("Size of: ", stringify!(taosField)) - ); - assert_eq!( - ::std::mem::align_of::(), - 2usize, - concat!("Alignment of ", stringify!(taosField)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).name as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).bytes as *const _ as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(bytes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, - 66usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(type_) - ) - ); -} -pub type TAOS_FIELD = taosField; -extern "C" { - pub fn taos_init(); -} -extern "C" { - pub fn taos_options( - option: TSDB_OPTION, - arg: *const ::std::os::raw::c_void, - ... - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_connect( - ip: *mut ::std::os::raw::c_char, - user: *mut ::std::os::raw::c_char, - pass: *mut ::std::os::raw::c_char, - db: *mut ::std::os::raw::c_char, - port: ::std::os::raw::c_int, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_close(taos: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_query( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_use_result(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_fetch_row(res: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_result_precision(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_free_result(res: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_field_count(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_num_fields(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_affected_rows(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_fetch_fields(res: *mut ::std::os::raw::c_void) -> *mut TAOS_FIELD; -} -extern "C" { - pub fn taos_select_db( - taos: *mut ::std::os::raw::c_void, - db: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_print_row( - str: *mut ::std::os::raw::c_char, - row: *mut *mut ::std::os::raw::c_void, - fields: *mut TAOS_FIELD, - num_fields: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_stop_query(res: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_fetch_block( - res: *mut ::std::os::raw::c_void, - rows: *mut *mut *mut ::std::os::raw::c_void, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_validate_sql( - taos: *mut ::std::os::raw::c_void, - sql: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_get_server_info(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_get_client_info() -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_errstr(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_errno(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_query_a( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - code: ::std::os::raw::c_int, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_fetch_rows_a( - res: *mut ::std::os::raw::c_void, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - numOfRows: ::std::os::raw::c_int, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_fetch_row_a( - res: *mut ::std::os::raw::c_void, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - row: *mut *mut ::std::os::raw::c_void, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_subscribe( - host: *mut ::std::os::raw::c_char, - user: *mut ::std::os::raw::c_char, - pass: *mut ::std::os::raw::c_char, - db: *mut ::std::os::raw::c_char, - table: *mut ::std::os::raw::c_char, - time: i64, - mseconds: ::std::os::raw::c_int, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_consume(tsub: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_unsubscribe(tsub: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_open_stream( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - row: *mut *mut ::std::os::raw::c_void, - ), - >, - stime: i64, - param: *mut ::std::os::raw::c_void, - callback: ::std::option::Option, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_close_stream(tstr: *mut ::std::os::raw::c_void); -} -extern "C" { - pub static mut configDir: [::std::os::raw::c_char; 0usize]; -} diff --git a/tests/examples/rust/src/lib.rs b/tests/examples/rust/src/lib.rs deleted file mode 100644 index fe7216dfd06519b50a2fe5fdb226a0b674f8850d..0000000000000000000000000000000000000000 --- a/tests/examples/rust/src/lib.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![allow(unused)] -#![allow(non_camel_case_types)] - -pub mod subscriber; -pub use subscriber::*; - -pub mod tdengine; -pub use tdengine::*; - -pub mod utils; \ No newline at end of file diff --git a/tests/examples/rust/src/subscriber.rs b/tests/examples/rust/src/subscriber.rs deleted file mode 100644 index 78c6f5cd8d036be537da11f34f829d48750d2a73..0000000000000000000000000000000000000000 --- a/tests/examples/rust/src/subscriber.rs +++ /dev/null @@ -1,77 +0,0 @@ -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] - -#[path = "utils.rs"] -mod utils; -use utils::*; -use utils::bindings::*; - -use std::os::raw::{c_void, c_char, c_int, c_long}; - -pub struct Subscriber { - tsub: *mut c_void, - fields: *mut taosField, - fcount: c_int, -} - -impl Subscriber { - pub fn new(host: &str, - username: &str, - passwd: &str, - db: &str, - table:&str, - time: i64, - mseconds: i32 - ) -> Result { - unsafe { - let mut tsub = taos_subscribe(str_into_raw(host), - str_into_raw(username), - str_into_raw(passwd), - str_into_raw(db), - str_into_raw(table), - time as c_long, - mseconds as c_int); - if tsub.is_null() { - return Err("subscribe error") - } - println!("subscribed to {} user:{}, db:{}, tb:{}, time:{}, mseconds:{}", - host, username, db, table, time, mseconds); - - let mut fields = taos_fetch_fields(tsub); - if fields.is_null() { - taos_unsubscribe(tsub); - return Err("fetch fields error") - } - - let fcount = taos_field_count(tsub); - if fcount == 0 { - taos_unsubscribe(tsub); - return Err("fields count is 0") - } - - Ok(Subscriber{tsub, fields, fcount}) - } - } - - pub fn consume(self: &Subscriber) -> Result { - unsafe { - let taosRow = taos_consume(self.tsub); - if taosRow.is_null() { - return Err("consume error") - } - let taosRow= std::slice::from_raw_parts(taosRow, self.fcount as usize); - let row = raw_into_row(self.fields, self.fcount, &taosRow); - Ok(row) - } - } - - pub fn print_row(self: &Subscriber, row: &Row) { - println!("{}", format_row(row)); - } -} - -impl Drop for Subscriber { - fn drop(&mut self) { - unsafe {taos_unsubscribe(self.tsub);} - } -} diff --git a/tests/examples/rust/src/tdengine.rs b/tests/examples/rust/src/tdengine.rs deleted file mode 100644 index 41225d52e0fc7e985a227f52cf2a2e9e2874f9b8..0000000000000000000000000000000000000000 --- a/tests/examples/rust/src/tdengine.rs +++ /dev/null @@ -1,65 +0,0 @@ -#[path = "bindings.rs"] -mod bindings; -use bindings::*; - -#[path = "utils.rs"] -mod utils; -use utils::*; - -use std::os::raw::c_void; -use std::os::raw::c_char; -use std::os::raw::c_int; -use std::os::raw::c_long; - -pub struct Tdengine { - conn: *mut c_void, -} - -/// - **TODO**: doc -impl Tdengine { - - //! - **TODO**: implement default param. - //! - //! > refer to https://stackoverflow.com/questions/24047686/default-function-arguments-in-rust - pub fn new(ip: &str, username: &str, passwd: &str, db: &str, port: i32) -> Result { - unsafe { - taos_init(); - let mut conn = taos_connect(str_into_raw(ip), - str_into_raw(username), - str_into_raw(passwd), - str_into_raw(db), - port as c_int); - if conn.is_null() { - Err("connect error") - } else { - println!("connected to {}:{} user:{}, db:{}", ip, port, username, db); - Ok(Tdengine {conn}) - } - } - } - - // - **TODO**: check error code - pub fn query(self: &Tdengine, s: &str) { - unsafe { - if taos_query(self.conn, str_into_raw(s)) == 0 { - println!("query '{}' ok", s); - } else { - println!("query '{}' error: {}", s, raw_into_str(taos_errstr(self.conn))); - } - } - } -} - -impl Drop for Tdengine { - fn drop(&mut self) { - unsafe {taos_close(self.conn);} - } -} - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} \ No newline at end of file diff --git a/tests/examples/rust/src/utils.rs b/tests/examples/rust/src/utils.rs deleted file mode 100644 index 2875507275c69cf19ca01ab0190e343196537d3e..0000000000000000000000000000000000000000 --- a/tests/examples/rust/src/utils.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[path = "bindings.rs"] -pub mod bindings; -use bindings::*; - -use std::fmt; -use std::fmt::Display; -use std::os::raw::{c_void, c_char, c_int}; -use std::ffi::{CString, CStr}; - -// #[derive(Debug)] -pub enum Field { - tinyInt(i8), - smallInt(i16), - normalInt(i32), - bigInt(i64), - float(f32), - double(f64), - binary(String), - timeStamp(i64), - boolType(bool), -} - - -impl fmt::Display for Field { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match &*self { - Field::tinyInt(v) => write!(f, "{}", v), - Field::smallInt(v) => write!(f, "{}", v), - Field::normalInt(v) => write!(f, "{}", v), - Field::bigInt(v) => write!(f, "{}", v), - Field::float(v) => write!(f, "{}", v), - Field::double(v) => write!(f, "{}", v), - Field::binary(v) => write!(f, "{}", v), - Field::tinyInt(v) => write!(f, "{}", v), - Field::timeStamp(v) => write!(f, "{}", v), - Field::boolType(v) => write!(f, "{}", v), - } - } -} - -// pub type Fields = Vec; -pub type Row = Vec; - -pub fn format_row(row: &Row) -> String { - let mut s = String::new(); - for field in row { - s.push_str(format!("{} ", field).as_str()); - // println!("{}", field); - } - s -} - -pub fn str_into_raw(s: &str) -> *mut c_char { - if s.is_empty() { - 0 as *mut c_char - } else { - CString::new(s).unwrap().into_raw() - } -} - -pub fn raw_into_str<'a>(raw: *mut c_char) -> &'static str { - unsafe {CStr::from_ptr(raw).to_str().unwrap()} -} - - -pub fn raw_into_field(raw: *mut TAOS_FIELD, fcount: c_int) -> Vec { - let mut fields: Vec = Vec::new(); - - for i in 0..fcount as isize { - fields.push( - taosField { - name: unsafe {(*raw.offset(i as isize))}.name, - bytes: unsafe {(*raw.offset(i as isize))}.bytes, - type_: unsafe {(*raw.offset(i as isize))}.type_, - } - ); - } - - /// TODO: error[E0382]: use of moved value: `fields` - // for field in &fields { - // println!("type: {}, bytes: {}", field.type_, field.bytes); - // } - - fields -} - - pub fn raw_into_row(fields: *mut TAOS_FIELD, fcount: c_int, raw_row: &[*mut c_void]) -> Row { - let mut row: Row= Vec::new(); - let fields = raw_into_field(fields, fcount); - - for (i, field) in fields.iter().enumerate() { - // println!("index: {}, type: {}, bytes: {}", i, field.type_, field.bytes); - unsafe { - match field.type_ as u32 { - TSDB_DATA_TYPE_TINYINT => { - row.push(Field::tinyInt(*(raw_row[i] as *mut i8))); - } - TSDB_DATA_TYPE_SMALLINT => { - row.push(Field::smallInt(*(raw_row[i] as *mut i16))); - } - TSDB_DATA_TYPE_INT => { - row.push(Field::normalInt(*(raw_row[i] as *mut i32))); - } - TSDB_DATA_TYPE_BIGINT => { - row.push(Field::bigInt(*(raw_row[i] as *mut i64))); - } - TSDB_DATA_TYPE_FLOAT => { - row.push(Field::float(*(raw_row[i] as *mut f32))); - } - TSDB_DATA_TYPE_DOUBLE => { - row.push(Field::double(*(raw_row[i] as *mut f64))); - } - TSDB_DATA_TYPE_BINARY | TSDB_DATA_TYPE_NCHAR => { - // row.push(Field::binary(*(raw_row[i] as *mut f64))); - } - TSDB_DATA_TYPE_TIMESTAMP => { - row.push(Field::timeStamp(*(raw_row[i] as *mut i64))); - } - TSDB_DATA_TYPE_BOOL => { - // row.push(Field::boolType(*(raw_row[i] as *mut i8) as bool)); - } - _ => println!(""), - } - } - } - row - } \ No newline at end of file diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat old mode 100644 new mode 100755 index abe9a58f319068d5e11017abcd721a4c54d6aca9..efd8961bb0be2eb6f20e291114b92b00469b984f --- a/tests/gotest/batchtest.bat +++ b/tests/gotest/batchtest.bat @@ -7,6 +7,9 @@ set serverPort=%2 if "%severIp%"=="" (set severIp=127.0.0.1) if "%serverPort%"=="" (set serverPort=6030) +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct + cd case001 case001.bat %severIp% %serverPort% diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh old mode 100644 new mode 100755 index e8ed9ecbed9f70c98e6b5db052c3e69082c1794d..0fbbf40714b3349651beea9302e66628b31a22ac --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -13,6 +13,9 @@ if [ ! -n "$serverPort" ]; then serverPort=6030 fi +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct + bash ./case001/case001.sh $severIp $serverPort #bash ./case002/case002.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh new file mode 100755 index 0000000000000000000000000000000000000000..51bb9b36c3c55802fa904de223c2ae4ea2ea7151 --- /dev/null +++ b/tests/perftest-scripts/perftest-query.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +today=`date +"%Y%m%d"` +WORK_DIR=/home/ubuntu/pxiao +PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-test-report-$today.log + +# Coloured Echoes # +function red_echo { echo -e "\033[31m$@\033[0m"; } # +function green_echo { echo -e "\033[32m$@\033[0m"; } # +function yellow_echo { echo -e "\033[33m$@\033[0m"; } # +function white_echo { echo -e "\033[1;37m$@\033[0m"; } # +# Coloured Printfs # +function red_printf { printf "\033[31m$@\033[0m"; } # +function green_printf { printf "\033[32m$@\033[0m"; } # +function yellow_printf { printf "\033[33m$@\033[0m"; } # +function white_printf { printf "\033[1;37m$@\033[0m"; } # +# Debugging Outputs # +function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } # +function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } # +function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } # +function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } # + + +function stopTaosd { + echo "Stop taosd" + systemctl stop taosd + snap stop tdengine + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + +function buildTDengine { + echoInfo "Build TDengine" + cd $WORK_DIR/TDengine + + git remote update > /dev/null + REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` + LOCAL_COMMIT=`git rev-parse --short @` + + echo " LOCAL: $LOCAL_COMMIT" + echo "REMOTE: $REMOTE_COMMIT" + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + echo "repo up-to-date" + else + echo "repo need to pull" + git pull > /dev/null + + LOCAL_COMMIT=`git rev-parse --short @` + cd debug + rm -rf * + cmake .. > /dev/null + make > /dev/null + make install + fi +} + +function runQueryPerfTest { + [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT + nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 & + echoInfo "Run Performance Test" + cd $WORK_DIR/TDengine/tests/pytest + + python3 query/queryPerformance.py 0 | tee -a $PERFORMANCE_TEST_REPORT +} + + +function sendReport { + echo "send report" + receiver="pxiao@taosdata.com" + mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" + + cd $TDENGINE_DIR + + sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT + BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` + echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ + /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" +} + + +stopTaosd +buildTDengine +runQueryPerfTest + +echoInfo "Send Report" +sendReport +echoInfo "End of Test" diff --git a/tests/pytest/alter/db_update_options.py b/tests/pytest/alter/db_update_options.py new file mode 100644 index 0000000000000000000000000000000000000000..224e0f25b074deed55802b9ba847f7f845716a23 --- /dev/null +++ b/tests/pytest/alter/db_update_options.py @@ -0,0 +1,71 @@ + +# -*- coding: utf-8 -*- + +import random +import string +import subprocess +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + def run(self): + tdLog.debug("check database") + tdSql.prepare() + + # check default update value + sql = "create database if not exists db" + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + sql = "alter database db update 1" + + # check update value + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,1) + + + sql = "alter database db update 0" + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + sql = "alter database db update -1" + tdSql.error(sql) + + sql = "alter database db update 100" + tdSql.error(sql) + + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + tdSql.execute('drop database db') + tdSql.error('create database db update 100') + tdSql.error('create database db update -1') + + tdSql.execute('create database db update 1') + + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,1) + + tdSql.execute('drop database db') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/cluster/bananceTest.py b/tests/pytest/cluster/bananceTest.py new file mode 100644 index 0000000000000000000000000000000000000000..ef25afa7d2f7ea3b5358f8ba74d6702d28d54c85 --- /dev/null +++ b/tests/pytest/cluster/bananceTest.py @@ -0,0 +1,57 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random +import time + +class ClusterTestcase: + + ## test case 32 ## + def run(self): + + nodes = Nodes() + nodes.addConfigs("maxVgroupsPerDb", "10") + nodes.addConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + dnodes = [] + for i in range(10): + dnodes.append(int(tdSql.getData(i, 4))) + + s = set(dnodes) + if len(s) < 3: + tdLog.exit("cluster is not balanced") + + tdLog.info("cluster is balanced") + + nodes.removeConfigs("maxVgroupsPerDb", "10") + nodes.removeConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/basicTest.py b/tests/pytest/cluster/basicTest.py new file mode 100644 index 0000000000000000000000000000000000000000..b990d7fd982a490383939707a32635d37e546b13 --- /dev/null +++ b/tests/pytest/cluster/basicTest.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 1, 33 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + ## Test case 1 ## + tdLog.info("Test case 1 repeat %d times" % ctest.repeat) + for i in range(ctest.repeat): + tdLog.info("Start Round %d" % (i + 1)) + replica = random.randint(1,3) + ctest.createSTable(replica) + ctest.run() + tdLog.sleep(10) + tdSql.query("select count(*) from %s.%s" %(ctest.dbName, ctest.stbName)) + tdSql.checkData(0, 0, ctest.numberOfRecords * ctest.numberOfTables) + tdLog.info("Round %d completed" % (i + 1)) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/changeReplicaTest.py b/tests/pytest/cluster/changeReplicaTest.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa68edbfee2db599076befdf9bed5f4b4be3c83 --- /dev/null +++ b/tests/pytest/cluster/changeReplicaTest.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 7, ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + + tdSql.execute("alter database %s replica 2" % ctest.dbName) + tdLog.sleep(30) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + tdSql.checkData(i, 7, "slave") + + tdSql.execute("alter database %s replica 3" % ctest.dbName) + tdLog.sleep(30) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + tdSql.checkData(i, 7, "slave") + tdSql.checkData(i, 9, "slave") + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py new file mode 100644 index 0000000000000000000000000000000000000000..36af8ac42e56e1b8a7ab2237305a6bf286103552 --- /dev/null +++ b/tests/pytest/cluster/clusterSetup.py @@ -0,0 +1,202 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from fabric import Connection +from util.sql import * +from util.log import * +import taos +import random +import threading +import logging + +class Node: + def __init__(self, index, username, hostIP, hostName, password, homeDir): + self.index = index + self.username = username + self.hostIP = hostIP + self.hostName = hostName + self.homeDir = homeDir + self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)}) + + def startTaosd(self): + try: + self.conn.run("sudo systemctl start taosd") + except Exception as e: + print("Start Taosd error for node %d " % self.index) + logging.exception(e) + + def stopTaosd(self): + try: + self.conn.run("sudo systemctl stop taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + + def restartTaosd(self): + try: + self.conn.run("sudo systemctl restart taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + + def removeTaosd(self): + try: + self.conn.run("rmtaos") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def installTaosd(self, packagePath): + self.conn.put(packagePath, self.homeDir) + self.conn.cd(self.homeDir) + self.conn.run("tar -zxf $(basename '%s')" % packagePath) + with self.conn.cd("TDengine-enterprise-server"): + self.conn.run("yes|./install.sh") + + def configTaosd(self, taosConfigKey, taosConfigValue): + self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + + def removeTaosConfig(self, taosConfigKey, taosConfigValue): + self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + + def configHosts(self, ip, name): + self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + + def removeData(self): + try: + self.conn.run("sudo rm -rf /var/lib/taos/*") + except Exception as e: + print("remove taosd data error for node %d " % self.index) + logging.exception(e) + + def removeLog(self): + try: + self.conn.run("sudo rm -rf /var/log/taos/*") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def removeDataForMnode(self): + try: + self.conn.run("sudo rm -rf /var/lib/taos/*") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def removeDataForVnode(self, id): + try: + self.conn.run("sudo rm -rf /var/lib/taos/vnode%d/*.data" % id) + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + +class Nodes: + def __init__(self): + self.node1 = Node(1, 'ubuntu', '192.168.1.52', 'node1', 'tbase125!', '/home/ubuntu') + self.node2 = Node(2, 'ubuntu', '192.168.1.53', 'node2', 'tbase125!', '/home/ubuntu') + self.node3 = Node(3, 'ubuntu', '192.168.1.54', 'node3', 'tbase125!', '/home/ubuntu') + + def stopAllTaosd(self): + self.node1.stopTaosd() + self.node2.stopTaosd() + self.node3.stopTaosd() + + def startAllTaosd(self): + self.node1.startTaosd() + self.node2.startTaosd() + self.node3.startTaosd() + + def restartAllTaosd(self): + self.node1.restartTaosd() + self.node2.restartTaosd() + self.node3.restartTaosd() + + def addConfigs(self, configKey, configValue): + self.node1.configTaosd(configKey, configValue) + self.node2.configTaosd(configKey, configValue) + self.node3.configTaosd(configKey, configValue) + + def removeConfigs(self, configKey, configValue): + self.node1.removeTaosConfig(configKey, configValue) + self.node2.removeTaosConfig(configKey, configValue) + self.node3.removeTaosConfig(configKey, configValue) + + def removeAllDataFiles(self): + self.node1.removeData() + self.node2.removeData() + self.node3.removeData() + +class ClusterTest: + def __init__(self, hostName): + self.host = hostName + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos" + self.dbName = "mytest" + self.stbName = "meters" + self.numberOfThreads = 20 + self.numberOfTables = 10000 + self.numberOfRecords = 1000 + self.tbPrefix = "t" + self.ts = 1538548685000 + self.repeat = 1 + + def connectDB(self): + self.conn = taos.connect( + host=self.host, + user=self.user, + password=self.password, + config=self.config) + + def createSTable(self, replica): + cursor = self.conn.cursor() + tdLog.info("drop database if exists %s" % self.dbName) + cursor.execute("drop database if exists %s" % self.dbName) + tdLog.info("create database %s replica %d" % (self.dbName, replica)) + cursor.execute("create database %s replica %d" % (self.dbName, replica)) + tdLog.info("use %s" % self.dbName) + cursor.execute("use %s" % self.dbName) + tdLog.info("drop table if exists %s" % self.stbName) + cursor.execute("drop table if exists %s" % self.stbName) + tdLog.info("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) + cursor.execute("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) + cursor.close() + + def insertData(self, threadID): + print("Thread %d: starting" % threadID) + cursor = self.conn.cursor() + tablesPerThread = int(self.numberOfTables / self.numberOfThreads) + baseTableID = tablesPerThread * threadID + for i in range (tablesPerThread): + cursor.execute("create table %s%d using %s tags(%d)" % (self.tbPrefix, baseTableID + i, self.stbName, baseTableID + i)) + query = "insert into %s%d values" % (self.tbPrefix, baseTableID + i) + base = self.numberOfRecords * i + for j in range(self.numberOfRecords): + query += "(%d, %f, %d, %d)" % (self.ts + base + j, random.random(), random.randint(210, 230), random.randint(0, 10)) + cursor.execute(query) + cursor.close() + print("Thread %d: finishing" % threadID) + + def run(self): + threads = [] + tdLog.info("Inserting data") + for i in range(self.numberOfThreads): + thread = threading.Thread(target=self.insertData, args=(i,)) + threads.append(thread) + thread.start() + + for i in range(self.numberOfThreads): + threads[i].join() \ No newline at end of file diff --git a/tests/pytest/cluster/dataFileRecoveryTest.py b/tests/pytest/cluster/dataFileRecoveryTest.py new file mode 100644 index 0000000000000000000000000000000000000000..089d3fffc1499a8d9cafc87a8d94252111fcd604 --- /dev/null +++ b/tests/pytest/cluster/dataFileRecoveryTest.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 20, 21, 22 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(3) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + nodes.node2.stopTaosd() + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + vnodeID = tdSql.getData(0, 0) + nodes.node2.removeDataForVnode(vnodeID) + nodes.node2.startTaosd() + + # Wait for vnode file to recover + for i in range(10): + tdSql.query("select count(*) from t0") + + tdLog.sleep(10) + + for i in range(10): + tdSql.query("select count(*) from t0") + tdSql.checkData(0, 0, 1000) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/fullDnodesTest.py b/tests/pytest/cluster/fullDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4b10d97a24dfbb156122aa0afdbb5d22ce3941 --- /dev/null +++ b/tests/pytest/cluster/fullDnodesTest.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ##Cover test case 5 ## + def run(self): + # cluster environment set up + nodes = Nodes() + nodes.addConfigs("maxVgroupsPerDb", "10") + nodes.addConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + + tdSql.init(ctest.conn.cursor(), False) + tdSql.execute("use %s" % ctest.dbName) + tdSql.error("create table tt1 using %s tags(1)" % ctest.stbName) + + nodes.removeConfigs("maxVgroupsPerDb", "10") + nodes.removeConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/killAndRestartDnodesTest.py b/tests/pytest/cluster/killAndRestartDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..be927e862f616c7fbe490e733a18984b6971ef1f --- /dev/null +++ b/tests/pytest/cluster/killAndRestartDnodesTest.py @@ -0,0 +1,75 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 7, 10 ## + def run(self): + # cluster environment set up + tdLog.info("Test case 7, 10") + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + nodes.node1.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "offline") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node1.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node2.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "offline") + tdSql.checkData(2, 4, "ready") + + nodes.node2.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node3.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "offline") + + nodes.node3.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/offlineThresholdTest.py b/tests/pytest/cluster/offlineThresholdTest.py new file mode 100644 index 0000000000000000000000000000000000000000..8373424f93c8217250907e09620c8523d63071ad --- /dev/null +++ b/tests/pytest/cluster/offlineThresholdTest.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## cover test case 6, 8, 9, 11 ## + def run(self): + # cluster environment set up + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + nodes.addConfigs("offlineThreshold", "10") + nodes.removeAllDataFiles() + nodes.restartAllTaosd() + nodes.node3.stopTaosd() + + tdLog.sleep(10) + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(2, 4, "offline") + + tdLog.sleep(60) + tdSql.checkRows(3) + tdSql.checkData(2, 4, "dropping") + + tdLog.sleep(300) + tdSql.checkRows(2) + + nodes.removeConfigs("offlineThreshold", "10") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/oneReplicaOfflineTest.py b/tests/pytest/cluster/oneReplicaOfflineTest.py new file mode 100644 index 0000000000000000000000000000000000000000..0223dfe01add9faca7987d7767f5c41a58b8edd2 --- /dev/null +++ b/tests/pytest/cluster/oneReplicaOfflineTest.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 28, 29, 30, 31 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(3) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + + nodes.node2.stopTaosd() + for i in range(100): + tdSql.execute("drop table t%d" % i) + + nodes.node2.startTaosd() + tdSql.query("show tables") + tdSql.checkRows(9900) + + nodes.node2.stopTaosd() + for i in range(10): + tdSql.execute("create table a%d using meters tags(2)" % i) + + nodes.node2.startTaosd() + tdSql.query("show tables") + tdSql.checkRows(9910) + + nodes.node2.stopTaosd() + tdSql.execute("alter table meters add col col6 int") + nodes.node2.startTaosd() + + nodes.node2.stopTaosd() + tdSql.execute("drop database %s" % ctest.dbName) + + nodes.node2.startTaosd() + tdSql.query("show databases") + tdSql.checkRows(0) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/queryTimeTest.py b/tests/pytest/cluster/queryTimeTest.py new file mode 100644 index 0000000000000000000000000000000000000000..74a9081ccf4fd8abc175e2e0c82b0c6feedcbb26 --- /dev/null +++ b/tests/pytest/cluster/queryTimeTest.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random +import time + +class ClusterTestcase: + + ## test case 32 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + totalTime = 0 + for i in range(10): + startTime = time.time() + tdSql.query("select * from %s" % ctest.stbName) + totalTime += time.time() - startTime + print("replica 1: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10)) + + tdSql.execute("alter database %s replica 3" % ctest.dbName) + tdLog.sleep(60) + totalTime = 0 + for i in range(10): + startTime = time.time() + tdSql.query("select * from %s" % ctest.stbName) + totalTime += time.time() - startTime + print("replica 3: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10)) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/stopAllDnodesTest.py b/tests/pytest/cluster/stopAllDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..a71ae52e3d7a640bb589f3bafe16b2e4d45c7b93 --- /dev/null +++ b/tests/pytest/cluster/stopAllDnodesTest.py @@ -0,0 +1,45 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 19 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + tdSql.init(ctest.conn.cursor(), False) + + tdSql.query("show databases") + count = tdSql.queryRows; + + nodes.stopAllTaosd() + nodes.node1.startTaosd() + tdSql.error("show databases") + + nodes.node2.startTaosd() + tdSql.error("show databases") + + nodes.node3.startTaosd() + tdLog.sleep(10) + tdSql.query("show databases") + tdSql.checkRows(count) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/stopTwoDnodesTest.py b/tests/pytest/cluster/stopTwoDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9958e2d32018b6a89a3e0d08da2c1597151ff2 --- /dev/null +++ b/tests/pytest/cluster/stopTwoDnodesTest.py @@ -0,0 +1,48 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 17, 18 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.query("show databases") + count = tdSql.queryRows; + tdSql.execute("use %s" % ctest.dbName) + tdSql.execute("alter database %s replica 3" % ctest.dbName) + nodes.node2.stopTaosd() + nodes.node3.stopTaosd() + tdSql.error("show databases") + + nodes.node2.startTaosd() + tdSql.error("show databases") + + nodes.node3.startTaosd() + tdSql.query("show databases") + tdSql.checkRows(count) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/syncingTest.py b/tests/pytest/cluster/syncingTest.py new file mode 100644 index 0000000000000000000000000000000000000000..96be048d231e35f67e40fc4785d2e19337ed408b --- /dev/null +++ b/tests/pytest/cluster/syncingTest.py @@ -0,0 +1,50 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 24, 25, 26, 27 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + + tdSql.execute("use %s" % ctest.dbName) + tdSql.execute("alter database %s replica 3" % ctest.dbName) + + for i in range(100): + tdSql.execute("drop table t%d" % i) + + for i in range(100): + tdSql.execute("create table a%d using meters tags(1)" % i) + + tdSql.execute("alter table meters add col col5 int") + tdSql.execute("alter table meters drop col col5 int") + tdSql.execute("drop database %s" % ctest.dbName) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/testcluster.sh b/tests/pytest/cluster/testcluster.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e15a498c0a73db450699fe66d63d07c3b18dbe5 --- /dev/null +++ b/tests/pytest/cluster/testcluster.sh @@ -0,0 +1,12 @@ +python3 basicTest.py +python3 bananceTest.py +python3 changeReplicaTest.py +python3 dataFileRecoveryTest.py +python3 fullDnodesTest.py +python3 killAndRestartDnodesTest.py +python3 offlineThresholdTest.py +python3 oneReplicaOfflineTest.py +python3 queryTimeTest.py +python3 stopAllDnodesTest.py +python3 stopTwoDnodesTest.py +python3 syncingTest.py \ No newline at end of file diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 39a4cb48fdc22060f63f443a4ac8142cd6a6903e..5d1e9a75374c7518367ce9613bf2fd43b39bb865 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -16,112 +16,202 @@ import sys import json import time import random -# query sql -query_sql = [ -# first supertable -"select count(*) from test.meters ;", -"select count(*) from test.meters where t3 > 2;", -"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters interval(1n) order by ts desc;", -#"select max(c0) from test.meters group by tbname", -"select first(ts) from test.meters where t5 >5000 and t5<5100;", -"select last(ts) from test.meters where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters;", -"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.t1;", -"select diff(c1) from test.t1;", -"select leastsquares(c1, 1, 1) from test.t1 ;", -"select max(c1) from test.meters where t5 >5000 and t5<5100;", -"select min(c1) from test.meters where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", -"select percentile(c1, 50) from test.t1;", -"select spread(c1) from test.t1 ;", -"select stddev(c1) from test.t1;", -"select sum(c1) from test.meters where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" -"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c4) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", -"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", -"select leastsquares(c4, 1, 1) from test.t1 ;", -"select max(c4) from test.meters where t5 >5000 and t5<5100;", -"select min(c4) from test.meters where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", -"select percentile(c5, 50) from test.t1;", -"select spread(c5) from test.t1 ;", -"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", -"select sum(c5) from test.meters where t5 >5000 and t5<5100;", -"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", -#all vnode -"select count(*) from test.meters where t5 >5000 and t5<5100", -"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", -# second supertable -"select count(*) from test.meters1 where t3 > 2;", -"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters1 interval(1n) order by ts desc;", -#"select max(c0) from test.meters1 group by tbname", -"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters1 ;", -"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", -"select diff(c1) from test.m1 ;", -"select leastsquares(c1, 1, 1) from test.m1 ;", -"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", -"select percentile(c1, 50) from test.m1;", -"select spread(c1) from test.m1 ;", -"select stddev(c1) from test.m1;", -"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", -"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c5, 2) from test.m1;", -"select diff(c5) from test.m1;", -"select leastsquares(c5, 1, 1) from test.m1 ;", -"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c0 from test.m1;", -"select percentile(c4, 50) from test.m1;", -"select spread(c4) from test.m1 ;", -"select stddev(c4) from test.m1;", -"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", -"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -#all vnode -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", -#join -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", -# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" +import requests +from requests.auth import HTTPBasicAuth +func_list=['avg','count','twa','sum','stddev','leastsquares','min', +'max','first','last','top','bottom','percentile','apercentile', +'last_row','diff','spread'] +condition_list=[ + "where _c0 > now -10d ", + 'interval(10s)', + 'limit 10', + 'group by', + 'order by', + 'fill(null)' + ] - +where_list = ['_c0>now-10d',' <50'," like \'%a%\'"] class ConcurrentInquiry: - def initConnection(self): - self.numOfTherads = 50 + def __init__(self,n_Therads=25,r_Therads=25): + self.n_numOfTherads = n_Therads + self.r_numOfTherads = r_Therads self.ts=1500000001000 - + self.dbname='test' + self.stb_list=[] + self.subtb_list=[] + self.stb_stru_list=[] + self.subtb_stru_list=[] + self.stb_tag_list=[] + self.subtb_tag_list=[] + def SetThreadsNum(self,num): self.numOfTherads=num - def query_thread(self,threadID): - host = "10.211.55.14" + + def ret_fcol(self,cl,sql): #返回结果的第一列 + cl.execute(sql) + fcol_list=[] + for data in cl: + fcol_list.append(data[0]) + return fcol_list + + def r_stb_list(self,cl): #返回超级表列表 + sql='show '+self.dbname+'.stables' + self.stb_list=self.ret_fcol(cl,sql) + + def r_subtb_list(self,cl,stablename): #每个超级表返回2个子表 + sql='select tbname from '+self.dbname+'.'+stablename+' limit 2;' + self.subtb_list+=self.ret_fcol(cl,sql) + + def cal_struct(self,cl,tbname): #查看表结构 + tb=[] + tag=[] + sql='describe '+self.dbname+'.'+tbname+';' + cl.execute(sql) + for data in cl: + if data[3]: + tag.append(data[0]) + else: + tb.append(data[0]) + return tb,tag + + def r_stb_stru(self,cl): #获取所有超级表的表结构 + for i in self.stb_list: + tb,tag=self.cal_struct(cl,i) + self.stb_stru_list.append(tb) + self.stb_tag_list.append(tag) + + def r_subtb_stru(self,cl): #返回所有子表的表结构 + for i in self.subtb_list: + tb,tag=self.cal_struct(cl,i) + self.subtb_stru_list.append(tb) + self.subtb_tag_list.append(tag) + + def get_full(self): #获取所有的表、表结构 + host = "127.0.0.1" + user = "root" + password = "taosdata" + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + self.r_stb_list(cl) + for i in self.stb_list: + self.r_subtb_list(cl,i) + self.r_stb_stru(cl) + self.r_subtb_stru(cl) + cl.close() + conn.close() + + #query condition + def con_where(self,tlist): + l=[] + for i in range(random.randint(0,len(tlist))): + c = random.choice(where_list) + if c == '_c0>now-10d': + l.append(c) + else: + l.append(random.choice(tlist)+c) + return 'where '+random.choice([' and ',' or ']).join(l) + + def con_interval(self,tlist): + return random.choice(['interval(10s)','interval(10d)','interval(1n)']) + + def con_limit(self,tlist): + return random.choice(['limit 10','limit 10 offset 10','slimit 10','slimit 10 offset 10','limit 10 slimit 10','limit 10 offset 5 slimit 5 soffset 10']) + + def con_fill(self,tlist): + return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)']) + + def con_group(self,tlist): + return 'group by '+random.choice(tlist) + + def con_order(self,tlist): + return 'order by '+random.choice(tlist) + + def gen_query_sql(self): #生成查询语句 + tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表 + tbname='' + col_list=[] + tag_list=[] + is_stb=0 + if tbi>len(self.stb_list) : + tbi=tbi-len(self.stb_list) + tbname=self.subtb_list[tbi-1] + col_list=self.subtb_stru_list[tbi-1] + tag_list=self.subtb_tag_list[tbi-1] + else: + tbname=self.stb_list[tbi-1] + col_list=self.stb_stru_list[tbi-1] + tag_list=self.stb_tag_list[tbi-1] + is_stb=1 + tlist=col_list+tag_list + con_rand=random.randint(0,len(condition_list)) + func_rand=random.randint(0,len(func_list)) + col_rand=random.randint(0,len(col_list)) + tag_rand=random.randint(0,len(tag_list)) + t_rand=random.randint(0,len(tlist)) + sql='select ' #select + random.shuffle(col_list) + random.shuffle(func_list) + sel_col_list=[] + col_rand=random.randint(0,len(col_list)) + for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + if j == 'leastsquares': + sel_col_list.append(j+'('+i+',1,1)') + elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': + sel_col_list.append(j+'('+i+',1)') + else: + sel_col_list.append(j+'('+i+')') + sql=sql+','.join(sel_col_list)+' from '+random.choice(self.stb_list+self.subtb_list)+' ' #select col & func + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + sel_con=random.sample(con_func,random.randint(0,len(con_func))) + sel_con_list=[] + for i in sel_con: + sel_con_list.append(i(tlist)) #获取对应的条件函数 + sql+=' '.join(sel_con_list) # condition + print(sql) + return sql + + def rest_query(self,sql): #rest 接口 + host = "127.0.0.1" + user = "root" + password = "taosdata" + port =6041 + url = "http://{}:{}/rest/sql".format(host, port ) + try: + r = requests.post(url, + data = 'use test', + auth = HTTPBasicAuth('root', 'taosdata')) + r = requests.post(url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + except: + print("REST API Failure (TODO: more info here)") + raise + rj = r.json() + if ('status' not in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + return nRows + + def query_thread_n(self,threadID): #使用原生python接口查询 + host = "127.0.0.1" user = "root" password = "taosdata" conn = taos.connect( @@ -135,35 +225,59 @@ class ConcurrentInquiry: print("Thread %d: starting" % threadID) while True: - ran_query_sql=query_sql - random.shuffle(ran_query_sql) - for i in ran_query_sql: - print("Thread %d : %s"% (threadID,i)) + try: + sql=self.gen_query_sql() + print("sql is ",sql) start = time.time() - cl.execute(i) - cl.fetchall + cl.execute(sql) + cl.fetchall() end = time.time() print("time cost :",end-start) except Exception as e: print( "Failure thread%d, sql: %s,exception: %s" % - (threadID, str(i),str(e))) - exit(-1) + (threadID, str(sql),str(e))) + #exit(-1) - print("Thread %d: finishing" % threadID) + print("Thread %d: finishing" % threadID) - + def query_thread_r(self,threadID): #使用rest接口查询 + print("Thread %d: starting" % threadID) + while True: + try: + sql=self.gen_query_sql() + print("sql is ",sql) + start = time.time() + self.rest_query(sql) + end = time.time() + print("time cost :",end-start) + except Exception as e: + print( + "Failure thread%d, sql: %s,exception: %s" % + (threadID, str(sql),str(e))) + #exit(-1) + + + print("Thread %d: finishing" % threadID) def run(self): - + print(self.n_numOfTherads,self.r_numOfTherads) threads = [] - for i in range(self.numOfTherads): - thread = threading.Thread(target=self.query_thread, args=(i,)) + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_n, args=(i,)) threads.append(thread) thread.start() - -q = ConcurrentInquiry() -q.initConnection() + for i in range(self.r_numOfTherads): + # for i in range(1): + thread = threading.Thread(target=self.query_thread_r, args=(i,)) + threads.append(thread) + thread.start() +if len(sys.argv)>1: + q = ConcurrentInquiry(n_Therads=sys.argv[1],r_Therads=sys.argv[2]) +else: + q = ConcurrentInquiry() +q.get_full() +#q.gen_query_sql() q.run() diff --git a/tests/pytest/cq.py b/tests/pytest/cq.py new file mode 100644 index 0000000000000000000000000000000000000000..7778969619f2d0679c2596581d8d76101d41ed9f --- /dev/null +++ b/tests/pytest/cq.py @@ -0,0 +1,169 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import threading +import taos +import sys +import json +import time +import random +# query sql +query_sql = [ +# first supertable +"select count(*) from test.meters ;", +"select count(*) from test.meters where t3 > 2;", +"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", +"select count(*) from test.meters interval(1n) order by ts desc;", +#"select max(c0) from test.meters group by tbname", +"select first(ts) from test.meters where t5 >5000 and t5<5100;", +"select last(ts) from test.meters where t5 >5000 and t5<5100;", +"select last_row(*) from test.meters;", +"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c1) from test.meters where t5 >5000 and t5<5100;", +"select bottom(c1, 2) from test.t1;", +"select diff(c1) from test.t1;", +"select leastsquares(c1, 1, 1) from test.t1 ;", +"select max(c1) from test.meters where t5 >5000 and t5<5100;", +"select min(c1) from test.meters where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", +"select percentile(c1, 50) from test.t1;", +"select spread(c1) from test.t1 ;", +"select stddev(c1) from test.t1;", +"select sum(c1) from test.meters where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" +"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c4) from test.meters where t5 >5000 and t5<5100;", +"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", +"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", +"select leastsquares(c4, 1, 1) from test.t1 ;", +"select max(c4) from test.meters where t5 >5000 and t5<5100;", +"select min(c4) from test.meters where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", +"select percentile(c5, 50) from test.t1;", +"select spread(c5) from test.t1 ;", +"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", +"select sum(c5) from test.meters where t5 >5000 and t5<5100;", +"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", +#all vnode +"select count(*) from test.meters where t5 >5000 and t5<5100", +"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", +# second supertable +"select count(*) from test.meters1 where t3 > 2;", +"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", +"select count(*) from test.meters1 interval(1n) order by ts desc;", +#"select max(c0) from test.meters1 group by tbname", +"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last_row(*) from test.meters1 ;", +"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", +"select diff(c1) from test.m1 ;", +"select leastsquares(c1, 1, 1) from test.m1 ;", +"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", +"select percentile(c1, 50) from test.m1;", +"select spread(c1) from test.m1 ;", +"select stddev(c1) from test.m1;", +"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", +"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c5, 2) from test.m1;", +"select diff(c5) from test.m1;", +"select leastsquares(c5, 1, 1) from test.m1 ;", +"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c0 from test.m1;", +"select percentile(c4, 50) from test.m1;", +"select spread(c4) from test.m1 ;", +"select stddev(c4) from test.m1;", +"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", +"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +#all vnode +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", +#join +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", +# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" +] + +class ConcurrentInquiry: + def initConnection(self): + self.numOfTherads = 50 + self.ts=1500000001000 + + def SetThreadsNum(self,num): + self.numOfTherads=num + def query_thread(self,threadID): + host = "10.211.55.14" + user = "root" + password = "taosdata" + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + cl.execute("use test;") + + print("Thread %d: starting" % threadID) + + while True: + ran_query_sql=query_sql + random.shuffle(ran_query_sql) + for i in ran_query_sql: + print("Thread %d : %s"% (threadID,i)) + try: + start = time.time() + cl.execute(i) + cl.fetchall() + end = time.time() + print("time cost :",end-start) + except Exception as e: + print( + "Failure thread%d, sql: %s,exception: %s" % + (threadID, str(i),str(e))) + exit(-1) + + + print("Thread %d: finishing" % threadID) + + + + def run(self): + + threads = [] + for i in range(self.numOfTherads): + thread = threading.Thread(target=self.query_thread, args=(i,)) + threads.append(thread) + thread.start() + +q = ConcurrentInquiry() +q.initConnection() +q.run() diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index 4ffe35fc3c94edbdd194e03171696a1d681387c1..0af09634df5a5c418797ae4bd352c319fcbc74fa 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -54,6 +54,7 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR # Now we are all let, and let's see if we can find a crash. Note we pass all params +CRASH_GEN_EXEC=crash_gen_bootstrap.py if [[ $1 == '--valgrind' ]]; then shift export PYTHONMALLOC=malloc @@ -66,14 +67,16 @@ if [[ $1 == '--valgrind' ]]; then --leak-check=yes \ --suppressions=crash_gen/valgrind_taos.supp \ $PYTHON_EXEC \ - ./crash_gen/crash_gen.py $@ > $VALGRIND_OUT 2> $VALGRIND_ERR + $CRASH_GEN_EXEC $@ > $VALGRIND_OUT 2> $VALGRIND_ERR elif [[ $1 == '--helgrind' ]]; then shift + HELGRIND_OUT=helgrind.out + HELGRIND_ERR=helgrind.err valgrind \ --tool=helgrind \ $PYTHON_EXEC \ - ./crash_gen/crash_gen.py $@ + $CRASH_GEN_EXEC $@ > $HELGRIND_OUT 2> $HELGRIND_ERR else - $PYTHON_EXEC ./crash_gen/crash_gen.py $@ + $PYTHON_EXEC $CRASH_GEN_EXEC $@ fi diff --git a/tests/pytest/crash_gen/README.md b/tests/pytest/crash_gen/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6788ab1a63d0a7c515558695605d1ec8ac5fb7f9 --- /dev/null +++ b/tests/pytest/crash_gen/README.md @@ -0,0 +1,130 @@ +

User's Guide to the Crash_Gen Tool

+ +# Introduction + +To effectively test and debug our TDengine product, we have developed a simple tool to +exercise various functions of the system in a randomized fashion, hoping to expose +maximum number of problems, hopefully without a pre-determined scenario. + +# Preparation + +To run this tool, please ensure the followed preparation work is done first. + +1. Fetch a copy of the TDengine source code, and build it successfully in the `build/` + directory +1. Ensure that the system has Python3.8 or above properly installed. We use + Ubuntu 20.04LTS as our own development environment, and suggest you also use such + an environment if possible. + +# Simple Execution + +To run the tool with the simplest method, follow the steps below: + +1. Open a terminal window, start the `taosd` service in the `build/` directory + (or however you prefer to start the `taosd` service) +1. Open another terminal window, go into the `tests/pytest/` directory, and + run `./crash_gen.sh -p -t 3 -s 10` (change the two parameters here as you wish) +1. Watch the output to the end and see if you get a `SUCCESS` or `FAILURE` + +That's it! + +# Running Clusters + +This tool also makes it easy to test/verify the clustering capabilities of TDengine. You +can start a cluster quite easily with the following command: + +``` +$ cd tests/pytest/ +$ ./crash_gen.sh -e -o 3 +``` + +The `-e` option above tells the tool to start the service, and do not run any tests, while +the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster. +Obviously you can adjust the the number here. + +## Behind the Scenes + +When the tool runs a cluster, it users a number of directories, each holding the information +for a single DNode, see: + +``` +$ ls build/cluster* +build/cluster_dnode_0: +cfg data log + +build/cluster_dnode_1: +cfg data log + +build/cluster_dnode_2: +cfg data log +``` + +Therefore, when something goes wrong and you want to reset everything with the cluster, simple +erase all the files: + +``` +$ rm -rf build/cluster_dnode_* +``` + +## Addresses and Ports + +The DNodes in the cluster all binds the the `127.0.0.1` IP address (for now anyway), and +uses port 6030 for the first DNode, and 6130 for the 2nd one, and so on. + +## Testing Against a Cluster + +In a separate terminal window, you can invoke the tool in client mode and test against +a cluster, such as: + +``` +$ ./crash_gen.sh -p -t 10 -s 100 -i 3 +``` + +Here the `-i` option tells the tool to always create tables with 3 replicas, and run +all tests against such tables. + +# Additional Features + +The exhaustive features of the tool is available through the `-h` option: + +``` +$ ./crash_gen.sh -h +usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r] + [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x] + +TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) +--------------------------------------------------------------------- +1. You build TDengine in the top level ./build directory, as described in offical docs +2. You run the server there before this script: ./build/bin/taosd -c test/cfg + +optional arguments: + -h, --help show this help message and exit + -a, --auto-start-service + Automatically start/stop the TDengine service (default: false) + -b MAX_DBS, --max-dbs MAX_DBS + Maximum number of DBs to keep, set to disable dropping DB. (default: 0) + -c CONNECTOR_TYPE, --connector-type CONNECTOR_TYPE + Connector type to use: native, rest, or mixed (default: 10) + -d, --debug Turn on DEBUG mode for more logging (default: false) + -e, --run-tdengine Run TDengine service in foreground (default: false) + -g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS + Ignore error codes, comma separated, 0x supported (default: None) + -i MAX_REPLICAS, --max-replicas MAX_REPLICAS + Maximum number of replicas to use, when testing against clusters. (default: 1) + -l, --larger-data Write larger amount of data during write operations (default: false) + -n, --dynamic-db-table-names + Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false) + -o NUM_DNODES, --num-dnodes NUM_DNODES + Number of Dnodes to initialize, used with -e option. (default: 1) + -p, --per-thread-db-connection + Use a single shared db connection (default: false) + -r, --record-ops Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false) + -s MAX_STEPS, --max-steps MAX_STEPS + Maximum number of steps to run (default: 100) + -t NUM_THREADS, --num-threads NUM_THREADS + Number of threads to run (default: 10) + -v, --verify-data Verify data written in a number of places by reading back (default: false) + -x, --continue-on-exception + Continue execution after encountering unexpected/disallowed errors/exceptions (default: false) +``` + diff --git a/tests/pytest/crash_gen/crash_gen.py b/tests/pytest/crash_gen/crash_gen_main.py similarity index 56% rename from tests/pytest/crash_gen/crash_gen.py rename to tests/pytest/crash_gen/crash_gen_main.py index 48196ab383c974b5c5d3f5ebc54773cd846353e6..8d68457ec8d07c9c00f8b8fd0a11e2f25284ce4a 100755 --- a/tests/pytest/crash_gen/crash_gen.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -14,42 +14,36 @@ # For type hinting before definition, ref: # https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel from __future__ import annotations -import taos -from util.sql import * -from util.cases import * -from util.dnodes import * -from util.log import * -from queue import Queue, Empty -from typing import IO + from typing import Set from typing import Dict from typing import List -from requests.auth import HTTPBasicAuth +from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none + import textwrap -import datetime -import logging import time +import datetime import random +import logging import threading -import requests import copy import argparse import getopt import sys import os -import io import signal import traceback import resource from guppy import hpy import gc -try: - import psutil -except: - print("Psutil module needed, please install: sudo pip3 install psutil") - sys.exit(-1) +from crash_gen.service_manager import ServiceManager, TdeInstance +from crash_gen.misc import Logging, Status, CrashGenError, Dice, Helper, Progress +from crash_gen.db import DbConn, MyTDSql, DbConnNative, DbManager + +import taos +import requests # Require Python 3 if sys.version_info[0] < 3: @@ -59,41 +53,37 @@ if sys.version_info[0] < 3: # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace -gConfig = argparse.Namespace() # Dummy value, will be replaced later -gSvcMgr = None # TODO: refactor this hack, use dep injection -logger = None # type: Logger - -def runThread(wt: WorkerThread): - wt.run() +gConfig: argparse.Namespace +gSvcMgr: ServiceManager # TODO: refactor this hack, use dep injection +# logger: logging.Logger +gContainer: Container -class CrashGenError(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self.errno = errno - - def __str__(self): - return self.msg +# def runThread(wt: WorkerThread): +# wt.run() class WorkerThread: - def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator, - # te: TaskExecutor, - ): # note: main thread context! + def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator): + """ + Note: this runs in the main thread context + """ # self._curStep = -1 self._pool = pool self._tid = tid self._tc = tc # type: ThreadCoordinator # self.threadIdent = threading.get_ident() - self._thread = threading.Thread(target=runThread, args=(self,)) + # self._thread = threading.Thread(target=runThread, args=(self,)) + self._thread = threading.Thread(target=self.run) self._stepGate = threading.Event() # Let us have a DB connection of our own if (gConfig.per_thread_db_connection): # type: ignore # print("connector_type = {}".format(gConfig.connector_type)) - if gConfig.connector_type == 'native': - self._dbConn = DbConn.createNative() + tInst = gContainer.defTdeInstance + if gConfig.connector_type == 'native': + self._dbConn = DbConn.createNative(tInst.getDbTarget()) elif gConfig.connector_type == 'rest': - self._dbConn = DbConn.createRest() + self._dbConn = DbConn.createRest(tInst.getDbTarget()) elif gConfig.connector_type == 'mixed': if Dice.throw(2) == 0: # 1/2 chance self._dbConn = DbConn.createNative() @@ -105,10 +95,10 @@ class WorkerThread: # self._dbInUse = False # if "use db" was executed already def logDebug(self, msg): - logger.debug(" TRD[{}] {}".format(self._tid, msg)) + Logging.debug(" TRD[{}] {}".format(self._tid, msg)) def logInfo(self, msg): - logger.info(" TRD[{}] {}".format(self._tid, msg)) + Logging.info(" TRD[{}] {}".format(self._tid, msg)) # def dbInUse(self): # return self._dbInUse @@ -127,10 +117,10 @@ class WorkerThread: def run(self): # initialization after thread starts, in the thread context # self.isSleeping = False - logger.info("Starting to run thread: {}".format(self._tid)) + Logging.info("Starting to run thread: {}".format(self._tid)) if (gConfig.per_thread_db_connection): # type: ignore - logger.debug("Worker thread openning database connection") + Logging.debug("Worker thread openning database connection") self._dbConn.open() self._doTaskLoop() @@ -140,7 +130,7 @@ class WorkerThread: if self._dbConn.isOpen: #sometimes it is not open self._dbConn.close() else: - logger.warning("Cleaning up worker thread, dbConn already closed") + Logging.warning("Cleaning up worker thread, dbConn already closed") def _doTaskLoop(self): # while self._curStep < self._pool.maxSteps: @@ -151,15 +141,15 @@ class WorkerThread: tc.crossStepBarrier() # shared barrier first, INCLUDING the last one except threading.BrokenBarrierError as err: # main thread timed out print("_bto", end="") - logger.debug("[TRD] Worker thread exiting due to main thread barrier time-out") + Logging.debug("[TRD] Worker thread exiting due to main thread barrier time-out") break - logger.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) self.crossStepGate() # then per-thread gate, after being tapped - logger.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) if not self._tc.isRunning(): print("_wts", end="") - logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") + Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) @@ -178,15 +168,15 @@ class WorkerThread: raise # Fetch a task from the Thread Coordinator - logger.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + Logging.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) task = tc.fetchTask() # Execute such a task - logger.debug("[TRD] Worker thread [{}] about to execute task: {}".format( + Logging.debug("[TRD] Worker thread [{}] about to execute task: {}".format( self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) - logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) # self._dbInUse = False # there may be changes between steps # print("_wtd", end=None) # worker thread died @@ -209,7 +199,7 @@ class WorkerThread: self.verifyThreadSelf() # only allowed by ourselves # Wait again at the "gate", waiting to be "tapped" - logger.debug( + Logging.debug( "[TRD] Worker thread {} about to cross the step gate".format( self._tid)) self._stepGate.wait() @@ -222,7 +212,7 @@ class WorkerThread: self.verifyThreadMain() # only allowed for main thread if self._thread.is_alive(): - logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) + Logging.debug("[TRD] Tapping worker thread {}".format(self._tid)) self._stepGate.set() # wake up! time.sleep(0) # let the released thread run a bit else: @@ -253,7 +243,7 @@ class WorkerThread: class ThreadCoordinator: - WORKER_THREAD_TIMEOUT = 60 # one minute + WORKER_THREAD_TIMEOUT = 120 # Normal: 120 def __init__(self, pool: ThreadPool, dbManager: DbManager): self._curStep = -1 # first step is 0 @@ -267,7 +257,7 @@ class ThreadCoordinator: self._stepBarrier = threading.Barrier( self._pool.numThreads + 1) # one barrier for all threads self._execStats = ExecutionStats() - self._runStatus = MainExec.STATUS_RUNNING + self._runStatus = Status.STATUS_RUNNING self._initDbs() def getTaskExecutor(self): @@ -280,14 +270,14 @@ class ThreadCoordinator: self._stepBarrier.wait(timeout) def requestToStop(self): - self._runStatus = MainExec.STATUS_STOPPING + self._runStatus = Status.STATUS_STOPPING self._execStats.registerFailure("User Interruption") def _runShouldEnd(self, transitionFailed, hasAbortedTask, workerTimeout): maxSteps = gConfig.max_steps # type: ignore if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 return True - if self._runStatus != MainExec.STATUS_RUNNING: + if self._runStatus != Status.STATUS_RUNNING: return True if transitionFailed: return True @@ -308,7 +298,7 @@ class ThreadCoordinator: def _releaseAllWorkerThreads(self, transitionFailed): self._curStep += 1 # we are about to get into next step. TODO: race condition here! # Now not all threads had time to go to sleep - logger.debug( + Logging.debug( "--\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step @@ -316,7 +306,7 @@ class ThreadCoordinator: if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) - logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( + Logging.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( self._curStep)) # Now not all threads had time to go to sleep # Worker threads will wake up at this point, and each execute it's own task self.tapAllThreads() # release all worker thread from their "gates" @@ -325,10 +315,10 @@ class ThreadCoordinator: # Now main thread (that's us) is ready to enter a step # let other threads go past the pool barrier, but wait at the # thread gate - logger.debug("[TRD] Main thread about to cross the barrier") + Logging.debug("[TRD] Main thread about to cross the barrier") self.crossStepBarrier(timeout=self.WORKER_THREAD_TIMEOUT) self._stepBarrier.reset() # Other worker threads should now be at the "gate" - logger.debug("[TRD] Main thread finished crossing the barrier") + Logging.debug("[TRD] Main thread finished crossing the barrier") def _doTransition(self): transitionFailed = False @@ -336,11 +326,11 @@ class ThreadCoordinator: for x in self._dbs: db = x # type: Database sm = db.getStateMachine() - logger.debug("[STT] starting transitions for DB: {}".format(db.getName())) + Logging.debug("[STT] starting transitions for DB: {}".format(db.getName())) # at end of step, transiton the DB state tasksForDb = db.filterTasks(self._executedTasks) sm.transition(tasksForDb, self.getDbManager().getDbConn()) - logger.debug("[STT] transition ended for DB: {}".format(db.getName())) + Logging.debug("[STT] transition ended for DB: {}".format(db.getName())) # Due to limitation (or maybe not) of the TD Python library, # we cannot share connections across threads @@ -348,14 +338,14 @@ class ThreadCoordinator: # Moving below to task loop # if sm.hasDatabase(): # for t in self._pool.threadList: - # logger.debug("[DB] use db for all worker threads") + # Logging.debug("[DB] use db for all worker threads") # t.useDb() # t.execSql("use db") # main thread executing "use # db" on behalf of every worker thread except taos.error.ProgrammingError as err: if (err.msg == 'network unavailable'): # broken DB connection - logger.info("DB connection broken, execution failed") + Logging.info("DB connection broken, execution failed") traceback.print_stack() transitionFailed = True self._te = None # Not running any more @@ -368,7 +358,7 @@ class ThreadCoordinator: self.resetExecutedTasks() # clear the tasks after we are done # Get ready for next step - logger.debug("<-- Step {} finished, trasition failed = {}".format(self._curStep, transitionFailed)) + Logging.debug("<-- Step {} finished, trasition failed = {}".format(self._curStep, transitionFailed)) return transitionFailed def run(self): @@ -382,8 +372,9 @@ class ThreadCoordinator: hasAbortedTask = False workerTimeout = False while not self._runShouldEnd(transitionFailed, hasAbortedTask, workerTimeout): - if not gConfig.debug: # print this only if we are not in debug mode - print(".", end="", flush=True) + if not gConfig.debug: # print this only if we are not in debug mode + Progress.emit(Progress.STEP_BOUNDARY) + # print(".", end="", flush=True) # if (self._curStep % 2) == 0: # print memory usage once every 10 steps # memUsage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # print("[m:{}]".format(memUsage), end="", flush=True) # print memory usage @@ -395,10 +386,11 @@ class ThreadCoordinator: try: self._syncAtBarrier() # For now just cross the barrier + Progress.emit(Progress.END_THREAD_STEP) except threading.BrokenBarrierError as err: - logger.info("Main loop aborted, caused by worker thread time-out") + Logging.info("Main loop aborted, caused by worker thread(s) time-out") self._execStats.registerFailure("Aborted due to worker thread timeout") - print("\n\nWorker Thread time-out detected, important thread info:") + print("\n\nWorker Thread time-out detected, TAOS related threads are:") ts = ThreadStacks() ts.print(filterInternal=True) workerTimeout = True @@ -409,7 +401,7 @@ class ThreadCoordinator: # threads are QUIET. hasAbortedTask = self._hasAbortedTask() # from previous step if hasAbortedTask: - logger.info("Aborted task encountered, exiting test program") + Logging.info("Aborted task encountered, exiting test program") self._execStats.registerFailure("Aborted Task Encountered") break # do transition only if tasks are error free @@ -420,29 +412,30 @@ class ThreadCoordinator: transitionFailed = True errno2 = Helper.convertErrno(err.errno) # correct error scheme errMsg = "Transition failed: errno=0x{:X}, msg: {}".format(errno2, err) - logger.info(errMsg) + Logging.info(errMsg) traceback.print_exc() self._execStats.registerFailure(errMsg) # Then we move on to the next step + Progress.emit(Progress.BEGIN_THREAD_STEP) self._releaseAllWorkerThreads(transitionFailed) if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate" - logger.debug("Abnormal ending of main thraed") + Logging.debug("Abnormal ending of main thraed") elif workerTimeout: - logger.debug("Abnormal ending of main thread, due to worker timeout") + Logging.debug("Abnormal ending of main thread, due to worker timeout") else: # regular ending, workers waiting at "barrier" - logger.debug("Regular ending, main thread waiting for all worker threads to stop...") + Logging.debug("Regular ending, main thread waiting for all worker threads to stop...") self._syncAtBarrier() self._te = None # No more executor, time to end - logger.debug("Main thread tapping all threads one last time...") + Logging.debug("Main thread tapping all threads one last time...") self.tapAllThreads() # Let the threads run one last time - logger.debug("\r\n\n--> Main thread ready to finish up...") - logger.debug("Main thread joining all threads") + Logging.debug("\r\n\n--> Main thread ready to finish up...") + Logging.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - logger.info("\nAll worker threads finished") + Logging.info(". . . All worker threads finished") # No CR/LF before self._execStats.endExec() def cleanup(self): # free resources @@ -474,7 +467,7 @@ class ThreadCoordinator: wakeSeq.append(i) else: wakeSeq.insert(0, i) - logger.debug( + Logging.debug( "[TRD] Main thread waking up worker threads: {}".format( str(wakeSeq))) # TODO: set dice seed to a deterministic value @@ -492,9 +485,11 @@ class ThreadCoordinator: dbc = self.getDbManager().getDbConn() if gConfig.max_dbs == 0: self._dbs.append(Database(0, dbc)) - else: + else: + baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic + )*333) % 888 if gConfig.dynamic_db_table_names else 0 for i in range(gConfig.max_dbs): - self._dbs.append(Database(i, dbc)) + self._dbs.append(Database(baseDbNumber + i, dbc)) def pickDatabase(self): idxDb = 0 @@ -512,7 +507,7 @@ class ThreadCoordinator: # pick a task type for current state db = self.pickDatabase() - taskType = db.getStateMachine().pickTaskType() # type: Task + taskType = db.getStateMachine().pickTaskType() # dynamic name of class return taskType(self._execStats, db) # create a task from it def resetExecutedTasks(self): @@ -522,13 +517,6 @@ class ThreadCoordinator: with self._lock: self._executedTasks.append(task) -# We define a class to run a number of threads in locking steps. - -class Helper: - @classmethod - def convertErrno(cls, errno): - return errno if (errno > 0) else 0x80000000 + errno - class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads @@ -546,7 +534,7 @@ class ThreadPool: def joinAll(self): for workerThread in self.threadList: - logger.debug("Joining thread...") + Logging.debug("Joining thread...") workerThread._thread.join() def cleanup(self): @@ -603,7 +591,7 @@ class LinearQueue(): def allocate(self, i): with self._lock: - # logger.debug("LQ allocating item {}".format(i)) + # Logging.debug("LQ allocating item {}".format(i)) if (i in self.inUse): raise RuntimeError( "Cannot re-use same index in queue: {}".format(i)) @@ -611,7 +599,7 @@ class LinearQueue(): def release(self, i): with self._lock: - # logger.debug("LQ releasing item {}".format(i)) + # Logging.debug("LQ releasing item {}".format(i)) self.inUse.remove(i) # KeyError possible, TODO: why? def size(self): @@ -633,357 +621,6 @@ class LinearQueue(): return ret -class DbConn: - TYPE_NATIVE = "native-c" - TYPE_REST = "rest-api" - TYPE_INVALID = "invalid" - - @classmethod - def create(cls, connType): - if connType == cls.TYPE_NATIVE: - return DbConnNative() - elif connType == cls.TYPE_REST: - return DbConnRest() - else: - raise RuntimeError( - "Unexpected connection type: {}".format(connType)) - - @classmethod - def createNative(cls): - return cls.create(cls.TYPE_NATIVE) - - @classmethod - def createRest(cls): - return cls.create(cls.TYPE_REST) - - def __init__(self): - self.isOpen = False - self._type = self.TYPE_INVALID - self._lastSql = None - - def getLastSql(self): - return self._lastSql - - def open(self): - if (self.isOpen): - raise RuntimeError("Cannot re-open an existing DB connection") - - # below implemented by child classes - self.openByType() - - logger.debug("[DB] data connection opened, type = {}".format(self._type)) - self.isOpen = True - - def queryScalar(self, sql) -> int: - return self._queryAny(sql) - - def queryString(self, sql) -> str: - return self._queryAny(sql) - - def _queryAny(self, sql): # actual query result as an int - if (not self.isOpen): - raise RuntimeError("Cannot query database until connection is open") - nRows = self.query(sql) - if nRows != 1: - raise taos.error.ProgrammingError( - "Unexpected result for query: {}, rows = {}".format(sql, nRows), - (0x991 if nRows==0 else 0x992) - ) - if self.getResultRows() != 1 or self.getResultCols() != 1: - raise RuntimeError("Unexpected result set for query: {}".format(sql)) - return self.getQueryResult()[0][0] - - def use(self, dbName): - self.execute("use {}".format(dbName)) - - def existsDatabase(self, dbName: str): - ''' Check if a certain database exists ''' - self.query("show databases") - dbs = [v[0] for v in self.getQueryResult()] # ref: https://stackoverflow.com/questions/643823/python-list-transformation - # ret2 = dbName in dbs - # print("dbs = {}, str = {}, ret2={}, type2={}".format(dbs, dbName,ret2, type(dbName))) - return dbName in dbs # TODO: super weird type mangling seen, once here - - def hasTables(self): - return self.query("show tables") > 0 - - def execute(self, sql): - ''' Return the number of rows affected''' - raise RuntimeError("Unexpected execution, should be overriden") - - def safeExecute(self, sql): - '''Safely execute any SQL query, returning True/False upon success/failure''' - try: - self.execute(sql) - return True # ignore num of results, return success - except taos.error.ProgrammingError as err: - return False # failed, for whatever TAOS reason - # Not possile to reach here, non-TAOS exception would have been thrown - - def query(self, sql) -> int: # return num rows returned - ''' Return the number of rows affected''' - raise RuntimeError("Unexpected execution, should be overriden") - - def openByType(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getQueryResult(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getResultRows(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getResultCols(self): - raise RuntimeError("Unexpected execution, should be overriden") - -# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql - - -class DbConnRest(DbConn): - def __init__(self): - super().__init__() - self._type = self.TYPE_REST - self._url = "http://localhost:6041/rest/sql" # fixed for now - self._result = None - - def openByType(self): # Open connection - pass # do nothing, always open - - def close(self): - if (not self.isOpen): - raise RuntimeError("Cannot clean up database until connection is open") - # Do nothing for REST - logger.debug("[DB] REST Database connection closed") - self.isOpen = False - - def _doSql(self, sql): - self._lastSql = sql # remember this, last SQL attempted - try: - r = requests.post(self._url, - data = sql, - auth = HTTPBasicAuth('root', 'taosdata')) - except: - print("REST API Failure (TODO: more info here)") - raise - rj = r.json() - # Sanity check for the "Json Result" - if ('status' not in rj): - raise RuntimeError("No status in REST response") - - if rj['status'] == 'error': # clearly reported error - if ('code' not in rj): # error without code - raise RuntimeError("REST error return without code") - errno = rj['code'] # May need to massage this in the future - # print("Raising programming error with REST return: {}".format(rj)) - raise taos.error.ProgrammingError( - rj['desc'], errno) # todo: check existance of 'desc' - - if rj['status'] != 'succ': # better be this - raise RuntimeError( - "Unexpected REST return status: {}".format( - rj['status'])) - - nRows = rj['rows'] if ('rows' in rj) else 0 - self._result = rj - return nRows - - def execute(self, sql): - if (not self.isOpen): - raise RuntimeError( - "Cannot execute database commands until connection is open") - logger.debug("[SQL-REST] Executing SQL: {}".format(sql)) - nRows = self._doSql(sql) - logger.debug( - "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) - return nRows - - def query(self, sql): # return rows affected - return self.execute(sql) - - def getQueryResult(self): - return self._result['data'] - - def getResultRows(self): - print(self._result) - raise RuntimeError("TBD") - # return self._tdSql.queryRows - - def getResultCols(self): - print(self._result) - raise RuntimeError("TBD") - - # Duplicate code from TDMySQL, TODO: merge all this into DbConnNative - - -class MyTDSql: - # Class variables - _clsLock = threading.Lock() # class wide locking - longestQuery = None # type: str - longestQueryTime = 0.0 # seconds - lqStartTime = 0.0 - # lqEndTime = 0.0 # Not needed, as we have the two above already - - def __init__(self, hostAddr, cfgPath): - # Make the DB connection - self._conn = taos.connect(host=hostAddr, config=cfgPath) - self._cursor = self._conn.cursor() - - self.queryRows = 0 - self.queryCols = 0 - self.affectedRows = 0 - - # def init(self, cursor, log=True): - # self.cursor = cursor - # if (log): - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # self.cursor.log(caller.filename + ".sql") - - def close(self): - self._cursor.close() # can we double close? - self._conn.close() # TODO: very important, cursor close does NOT close DB connection! - self._cursor.close() - - def _execInternal(self, sql): - startTime = time.time() - ret = self._cursor.execute(sql) - # print("\nSQL success: {}".format(sql)) - queryTime = time.time() - startTime - # Record the query time - cls = self.__class__ - if queryTime > (cls.longestQueryTime + 0.01) : - with cls._clsLock: - cls.longestQuery = sql - cls.longestQueryTime = queryTime - cls.lqStartTime = startTime - return ret - - def query(self, sql): - self.sql = sql - try: - self._execInternal(sql) - self.queryResult = self._cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self._cursor.description) - except Exception as e: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # args = (caller.filename, caller.lineno, sql, repr(e)) - # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) - raise - return self.queryRows - - def execute(self, sql): - self.sql = sql - try: - self.affectedRows = self._execInternal(sql) - except Exception as e: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # args = (caller.filename, caller.lineno, sql, repr(e)) - # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) - raise - return self.affectedRows - - -class DbConnNative(DbConn): - # Class variables - _lock = threading.Lock() - _connInfoDisplayed = False - totalConnections = 0 # Not private - - def __init__(self): - super().__init__() - self._type = self.TYPE_NATIVE - self._conn = None - # self._cursor = None - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("communit")] - else: - projPath = selfPath[:selfPath.find("tests")] - - buildPath = None - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - if buildPath == None: - raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}" - .format(selfPath, projPath)) - return buildPath - - - def openByType(self): # Open connection - cfgPath = self.getBuildPath() + "/test/cfg" - hostAddr = "127.0.0.1" - - cls = self.__class__ # Get the class, to access class variables - with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!! - if not cls._connInfoDisplayed: - cls._connInfoDisplayed = True # updating CLASS variable - logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath)) - # Make the connection - # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable - # self._cursor = self._conn.cursor() - # Record the count in the class - self._tdSql = MyTDSql(hostAddr, cfgPath) # making DB connection - cls.totalConnections += 1 - - self._tdSql.execute('reset query cache') - # self._cursor.execute('use db') # do this at the beginning of every - - # Open connection - # self._tdSql = MyTDSql() - # self._tdSql.init(self._cursor) - - def close(self): - if (not self.isOpen): - raise RuntimeError("Cannot clean up database until connection is open") - self._tdSql.close() - # Decrement the class wide counter - cls = self.__class__ # Get the class, to access class variables - with cls._lock: - cls.totalConnections -= 1 - - logger.debug("[DB] Database connection closed") - self.isOpen = False - - def execute(self, sql): - if (not self.isOpen): - raise RuntimeError("Cannot execute database commands until connection is open") - logger.debug("[SQL] Executing SQL: {}".format(sql)) - self._lastSql = sql - nRows = self._tdSql.execute(sql) - logger.debug( - "[SQL] Execution Result, nRows = {}, SQL = {}".format( - nRows, sql)) - return nRows - - def query(self, sql): # return rows affected - if (not self.isOpen): - raise RuntimeError( - "Cannot query database until connection is open") - logger.debug("[SQL] Executing SQL: {}".format(sql)) - self._lastSql = sql - nRows = self._tdSql.query(sql) - logger.debug( - "[SQL] Query Result, nRows = {}, SQL = {}".format( - nRows, sql)) - return nRows - # results are in: return self._tdSql.queryResult - - def getQueryResult(self): - return self._tdSql.queryResult - - def getResultRows(self): - return self._tdSql.queryRows - - def getResultCols(self): - return self._tdSql.queryCols - - class AnyState: STATE_INVALID = -1 STATE_EMPTY = 0 # nothing there, no even a DB @@ -1232,7 +869,7 @@ class StateMechine: def init(self, dbc: DbConn): # late initailization, don't save the dbConn self._curState = self._findCurrentState(dbc) # starting state - logger.debug("Found Starting State: {}".format(self._curState)) + Logging.debug("Found Starting State: {}".format(self._curState)) # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -1270,7 +907,7 @@ class StateMechine: raise RuntimeError( "No suitable task types found for state: {}".format( self._curState)) - logger.debug( + Logging.debug( "[OPS] Tasks found for state {}: {}".format( self._curState, typesToStrings(taskTypes))) @@ -1280,27 +917,27 @@ class StateMechine: ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state dbName =self._db.getName() if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! - logger.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) + Logging.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) return StateEmpty() # did not do this when openning connection, and this is NOT the worker # thread, which does this on their own dbc.use(dbName) if not dbc.hasTables(): # no tables - logger.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) return StateDbOnly() sTable = self._db.getFixedSuperTable() if sTable.hasRegTables(dbc, dbName): # no regular tables - logger.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) return StateSuperTableOnly() else: # has actual tables - logger.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) return StateHasData() # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): if (len(tasks) == 0): # before 1st step, or otherwise empty - logger.debug("[STT] Starting State: {}".format(self._curState)) + Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing # this should show up in the server log, separating steps @@ -1336,7 +973,7 @@ class StateMechine: # Nothing for sure newState = self._findCurrentState(dbc) - logger.debug("[STT] New DB state determined: {}".format(newState)) + Logging.debug("[STT] New DB state determined: {}".format(newState)) # can old state move to new state through the tasks? self._curState.verifyTasksToState(tasks, newState) self._curState = newState @@ -1354,7 +991,7 @@ class StateMechine: # read data task, default to 10: TODO: change to a constant weights.append(10) i = self._weighted_choice_sub(weights) - # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) + # Logging.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) return taskTypes[i] # ref: @@ -1372,6 +1009,8 @@ class Database: possibly in a cluster environment. For now we use it to manage state transitions in that database + + TODO: consider moving, but keep in mind it contains "StateMachine" ''' _clsLock = threading.Lock() # class wide lock _lastInt = 101 # next one is initial integer @@ -1433,17 +1072,18 @@ class Database: t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above - logger.info("Setting up TICKS to start from: {}".format(t4)) + Logging.debug("Setting up TICKS to start from: {}".format(t4)) return t4 @classmethod def getNextTick(cls): with cls._clsLock: # prevent duplicate tick - if cls._lastLaggingTick==0: + if cls._lastLaggingTick==0 or cls._lastTick==0 : # not initialized # 10k at 1/20 chance, should be enough to avoid overlaps - cls._lastLaggingTick = cls.setupLastTick() + datetime.timedelta(0, -10000) - if cls._lastTick==0: # should be quite a bit into the future - cls._lastTick = cls.setupLastTick() + tick = cls.setupLastTick() + cls._lastTick = tick + cls._lastLaggingTick = tick + datetime.timedelta(0, -10000) + # if : # should be quite a bit into the future if Dice.throw(20) == 0: # 1 in 20 chance, return lagging tick cls._lastLaggingTick += datetime.timedelta(0, 1) # Go back in time 100 seconds @@ -1468,64 +1108,6 @@ class Database: return ret -class DbManager(): - ''' This is a wrapper around DbConn(), to make it easier to use. - - TODO: rename this to DbConnManager - ''' - def __init__(self): - self.tableNumQueue = LinearQueue() # TODO: delete? - # self.openDbServerConnection() - self._dbConn = DbConn.createNative() if ( - gConfig.connector_type == 'native') else DbConn.createRest() - try: - self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected - except taos.error.ProgrammingError as err: - # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) - if (err.msg == 'client disconnected'): # cannot open DB connection - print( - "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") - sys.exit(2) - else: - print("Failed to connect to DB, errno = {}, msg: {}" - .format(Helper.convertErrno(err.errno), err.msg)) - raise - except BaseException: - print("[=] Unexpected exception") - raise - - # Do this after dbConn is in proper shape - # Moved to Database() - # self._stateMachine = StateMechine(self._dbConn) - - def getDbConn(self): - return self._dbConn - - # TODO: not used any more, to delete - def pickAndAllocateTable(self): # pick any table, and "use" it - return self.tableNumQueue.pickAndAllocate() - - # TODO: Not used any more, to delete - def addTable(self): - with self._lock: - tIndex = self.tableNumQueue.push() - return tIndex - - # Not used any more, to delete - def releaseTable(self, i): # return the table back, so others can use it - self.tableNumQueue.release(i) - - # TODO: not used any more, delete - def getTableNameToDelete(self): - tblNum = self.tableNumQueue.pop() # TODO: race condition! - if (not tblNum): # maybe false - return False - - return "table_{}".format(tblNum) - - def cleanUp(self): - self._dbConn.close() - class TaskExecutor(): class BoundedList: def __init__(self, size=10): @@ -1584,10 +1166,10 @@ class TaskExecutor(): self._boundedList.add(n) # def logInfo(self, msg): - # logger.info(" T[{}.x]: ".format(self._curStep) + msg) + # Logging.info(" T[{}.x]: ".format(self._curStep) + msg) # def logDebug(self, msg): - # logger.debug(" T[{}.x]: ".format(self._curStep) + msg) + # Logging.debug(" T[{}.x]: ".format(self._curStep) + msg) class Task(): @@ -1596,27 +1178,31 @@ class Task(): instead. But a task is always associated with a DB ''' taskSn = 100 + _lock = threading.Lock() + _tableLocks: Dict[str, threading.Lock] = {} @classmethod def allocTaskNum(cls): Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy - # logger.debug("Allocating taskSN: {}".format(Task.taskSn)) + # Logging.debug("Allocating taskSN: {}".format(Task.taskSn)) return Task.taskSn def __init__(self, execStats: ExecutionStats, db: Database): self._workerThread = None - self._err = None # type: Exception + self._err: Optional[Exception] = None self._aborted = False self._curStep = None self._numRows = None # Number of rows affected # Assign an incremental task serial number self._taskNum = self.allocTaskNum() - # logger.debug("Creating new task {}...".format(self._taskNum)) + # Logging.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats self._db = db # A task is always associated/for a specific DB + + def isSuccess(self): return self._err is None @@ -1645,15 +1231,23 @@ class Task(): "To be implemeted by child classes, class name: {}".format( self.__class__.__name__)) + def _isServiceStable(self): + if not gSvcMgr: + return True # we don't run service, so let's assume it's stable + return gSvcMgr.isStable() # otherwise let's examine the service + def _isErrAcceptable(self, errno, msg): if errno in [ 0x05, # TSDB_CODE_RPC_NOT_READY 0x0B, # Unable to establish connection, more details in TD-1648 - # 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 + 0x213, # "Disconnected from service", result of "kill connection ???" 0x217, # "db not selected", client side defined error code - 0x218, # "Table does not exist" client side defined error code - 0x360, 0x362, - 0x369, # tag already exists + # 0x218, # "Table does not exist" client side defined error code + 0x360, # Table already exists + 0x362, + # 0x369, # tag already exists 0x36A, 0x36B, 0x36D, 0x381, 0x380, # "db not selected" @@ -1662,12 +1256,17 @@ class Task(): 0x503, 0x510, # vnode not in ready state 0x14, # db not ready, errno changed - 0x600, + 0x600, # Invalid table ID, why? 1000 # REST catch-all error ]: return True # These are the ALWAYS-ACCEPTABLE ones - elif (errno in [ 0x0B ]) and gConfig.auto_start_service: - return True # We may get "network unavilable" when restarting service + # This case handled below already. + # elif (errno in [ 0x0B ]) and gConfig.auto_start_service: + # return True # We may get "network unavilable" when restarting service + elif gConfig.ignore_errors: # something is specified on command line + moreErrnos = [int(v, 0) for v in gConfig.ignore_errors.split(',')] + if errno in moreErrnos: + return True elif errno == 0x200 : # invalid SQL, we need to div in a bit more if msg.find("invalid column name") != -1: return True @@ -1675,8 +1274,8 @@ class Task(): return True elif msg.find("duplicated column names") != -1: # also alter table tag issues return True - elif (gSvcMgr!=None) and gSvcMgr.isRestarting(): - logger.info("Ignoring error when service is restarting: errno = {}, msg = {}".format(errno, msg)) + elif not self._isServiceStable(): # We are managing service, and ... + Logging.info("Ignoring error when service starting/stopping: errno = {}, msg = {}".format(errno, msg)) return True return False # Not an acceptable error @@ -1725,7 +1324,7 @@ class Task(): self._err = err self._aborted = True except Exception as e: - self.logInfo("Non-TAOS exception encountered") + Logging.info("Non-TAOS exception encountered with: {}".format(self.__class__.__name__)) self._err = e self._aborted = True traceback.print_exc() @@ -1735,10 +1334,11 @@ class Task(): self._aborted = True traceback.print_exc() except BaseException: # TODO: what is this again??!! - self.logDebug( - "[=] Unexpected exception, SQL: {}".format( - wt.getDbConn().getLastSql())) - raise + raise RuntimeError("Punt") + # self.logDebug( + # "[=] Unexpected exception, SQL: {}".format( + # wt.getDbConn().getLastSql())) + # raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) self.logDebug("[X] task execution completed, {}, status: {}".format( @@ -1757,6 +1357,24 @@ class Task(): def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread return wt.getQueryResult() + def lockTable(self, ftName): # full table name + # print(" <<" + ftName + '_', end="", flush=True) + with Task._lock: + if not ftName in Task._tableLocks: + Task._tableLocks[ftName] = threading.Lock() + + Task._tableLocks[ftName].acquire() + + def unlockTable(self, ftName): + # print('_' + ftName + ">> ", end="", flush=True) + with Task._lock: + if not ftName in self._tableLocks: + raise RuntimeError("Corrupt state, no such lock") + lock = Task._tableLocks[ftName] + if not lock.locked(): + raise RuntimeError("Corrupte state, already unlocked") + lock.release() + class ExecutionStats: def __init__(self): @@ -1817,14 +1435,14 @@ class ExecutionStats: self._failureReason = reason def printStats(self): - logger.info( + Logging.info( "----------------------------------------------------------------------") - logger.info( + Logging.info( "| Crash_Gen test {}, with the following stats:". format( "FAILED (reason: {})".format( self._failureReason) if self._failed else "SUCCEEDED")) - logger.info("| Task Execution Times (success/total):") - execTimesAny = 0 + Logging.info("| Task Execution Times (success/total):") + execTimesAny = 0.0 for k, n in self._execTimes.items(): execTimesAny += n[0] errStr = None @@ -1834,28 +1452,28 @@ class ExecutionStats: errStrs = ["0x{:X}:{}".format(eno, n) for (eno, n) in errors.items()] # print("error strings = {}".format(errStrs)) errStr = ", ".join(errStrs) - logger.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) + Logging.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) - logger.info( + Logging.info( "| Total Tasks Executed (success or not): {} ".format(execTimesAny)) - logger.info( + Logging.info( "| Total Tasks In Progress at End: {}".format( self._tasksInProgress)) - logger.info( + Logging.info( "| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format( self._accRunTime)) - logger.info( + Logging.info( "| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime / execTimesAny)) - logger.info( + Logging.info( "| Total Elapsed Time (from wall clock): {:.3f} seconds".format( self._elapsedTime)) - logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) - logger.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) - logger.info("| Longest native query time: {:.3f} seconds, started: {}". + Logging.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) + Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) + Logging.info("| Longest native query time: {:.3f} seconds, started: {}". format(MyTDSql.longestQueryTime, time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))) ) - logger.info("| Longest native query: {}".format(MyTDSql.longestQuery)) - logger.info( + Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery)) + Logging.info( "----------------------------------------------------------------------") @@ -1865,11 +1483,14 @@ class StateTransitionTask(Task): LARGE_NUMBER_OF_RECORDS = 50 SMALL_NUMBER_OF_RECORDS = 3 + _baseTableNumber = None + + _endState = None # TODO: no longter used? + @classmethod def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - - _endState = None + @classmethod def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") @@ -1889,7 +1510,10 @@ class StateTransitionTask(Task): @classmethod def getRegTableName(cls, i): - return "reg_table_{}".format(i) + if ( StateTransitionTask._baseTableNumber is None): # Set it one time + StateTransitionTask._baseTableNumber = Dice.throw( + 999) if gConfig.dynamic_db_table_names else 0 + return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) def execute(self, wt: WorkerThread): super().execute(wt) @@ -1909,7 +1533,8 @@ class TaskCreateDb(StateTransitionTask): # was: self.execWtSql(wt, "create database db") repStr = "" if gConfig.max_replicas != 1: - numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N + # numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N + numReplica = gConfig.max_replicas # fixed, always repStr = "replica {}".format(numReplica) self.execWtSql(wt, "create database {} {}" .format(self._db.getName(), repStr) ) @@ -1925,7 +1550,7 @@ class TaskDropDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): self.execWtSql(wt, "drop database {}".format(self._db.getName())) - logger.debug("[OPS] database dropped at {}".format(time.time())) + Logging.debug("[OPS] database dropped at {}".format(time.time())) class TaskCreateSuperTable(StateTransitionTask): @classmethod @@ -1938,13 +1563,16 @@ class TaskCreateSuperTable(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): if not self._db.exists(wt.getDbConn()): - logger.debug("Skipping task, no DB yet") + Logging.debug("Skipping task, no DB yet") return sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place + sTable.create(wt.getDbConn(), self._db.getName(), - {'ts':'timestamp', 'speed':'int'}, {'b':'binary(200)', 'f':'float'}) + {'ts':'timestamp', 'speed':'int'}, {'b':'binary(200)', 'f':'float'}, + dropIfExists = True + ) # self.execWtSql(wt,"create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) # No need to create the regular tables, INSERT will do that # automatically @@ -1957,14 +1585,41 @@ class TdSuperTable: def getName(self): return self._stName + def drop(self, dbc, dbName, skipCheck = False): + if self.exists(dbc, dbName) : # if myself exists + fullTableName = dbName + '.' + self._stName + dbc.execute("DROP TABLE {}".format(fullTableName)) + else: + if not skipCheck: + raise CrashGenError("Cannot drop non-existant super table: {}".format(self._stName)) + + def exists(self, dbc, dbName): + dbc.execute("USE " + dbName) + return dbc.existsSuperTable(self._stName) + # TODO: odd semantic, create() method is usually static? - def create(self, dbc, dbName, cols: dict, tags: dict): + def create(self, dbc, dbName, cols: dict, tags: dict, + dropIfExists = False + ): + '''Creating a super table''' - sql = "CREATE TABLE {}.{} ({}) TAGS ({})".format( - dbName, - self._stName, - ",".join(['%s %s'%(k,v) for (k,v) in cols.items()]), - ",".join(['%s %s'%(k,v) for (k,v) in tags.items()]) + dbc.execute("USE " + dbName) + fullTableName = dbName + '.' + self._stName + if dbc.existsSuperTable(self._stName): + if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + else: # error + raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) + + # Now let's create + sql = "CREATE TABLE {} ({})".format( + fullTableName, + ",".join(['%s %s'%(k,v) for (k,v) in cols.items()])) + if tags is None : + sql += " TAGS (dummy int) " + else: + sql += " TAGS ({})".format( + ",".join(['%s %s'%(k,v) for (k,v) in tags.items()]) ) dbc.execute(sql) @@ -1973,7 +1628,7 @@ class TdSuperTable: dbc.query("select TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) - logger.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) + Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) raise qr = dbc.getQueryResult() @@ -1982,14 +1637,25 @@ class TdSuperTable: def hasRegTables(self, dbc: DbConn, dbName: str): return dbc.query("SELECT * FROM {}.{}".format(dbName, self._stName)) > 0 - def ensureTable(self, dbc: DbConn, dbName: str, regTableName: str): + def ensureTable(self, task: Task, dbc: DbConn, dbName: str, regTableName: str): sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) if dbc.query(sql) >= 1 : # reg table exists already return - sql = "CREATE TABLE {}.{} USING {}.{} tags ({})".format( - dbName, regTableName, dbName, self._stName, self._getTagStrForSql(dbc, dbName) - ) - dbc.execute(sql) + + # acquire a lock first, so as to be able to *verify*. More details in TD-1471 + fullTableName = dbName + '.' + regTableName + if task is not None: # optional lock + task.lockTable(fullTableName) + Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table + # print("(" + fullTableName[-3:] + ")", end="", flush=True) + try: + sql = "CREATE TABLE {} USING {}.{} tags ({})".format( + fullTableName, dbName, self._stName, self._getTagStrForSql(dbc, dbName) + ) + dbc.execute(sql) + finally: + if task is not None: + task.unlockTable(fullTableName) # no matter what def _getTagStrForSql(self, dbc, dbName: str) : tags = self._getTags(dbc, dbName) @@ -2045,15 +1711,39 @@ class TaskReadData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canReadData() + # def _canRestartService(self): + # if not gSvcMgr: + # return True # always + # return gSvcMgr.isActive() # only if it's running TODO: race condition here + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): sTable = self._db.getFixedSuperTable() - # 1 in 5 chance, simulate a broken connection. - if random.randrange(5) == 0: # TODO: break connection in all situations - wt.getDbConn().close() - wt.getDbConn().open() - print("_r", end="", flush=True) - + # 1 in 5 chance, simulate a broken connection, only if service stable (not restarting) + if random.randrange(20)==0: # and self._canRestartService(): # TODO: break connection in all situations + # Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG + Progress.emit(Progress.SERVICE_RECONNECT_START) + try: + wt.getDbConn().close() + wt.getDbConn().open() + except ConnectionError as err: # may fail + if not gSvcMgr: + Logging.error("Failed to reconnect in client-only mode") + raise # Not OK if we are running in client-only mode + if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to + Logging.error("Failed to reconnect when managed server is running") + raise # Not OK if we are running normally + + Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) + # Logging.info("Ignoring DB reconnect error") + + # print("_r", end="", flush=True) + Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) + # The above might have taken a lot of time, service might be running + # by now, causing error below to be incorrectly handled due to timing issue + return # TODO: fix server restart status race condtion + + dbc = wt.getDbConn() dbName = self._db.getName() for rTbName in sTable.getRegTables(dbc, dbName): # regular tables @@ -2088,7 +1778,7 @@ class TaskReadData(StateTransitionTask): dbc.execute("select {} from {}.{}".format(aggExpr, dbName, sTable.getName())) except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) - logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) + Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) raise class TaskDropSuperTable(StateTransitionTask): @@ -2119,7 +1809,7 @@ class TaskDropSuperTable(StateTransitionTask): errno2 = Helper.convertErrno(err.errno) if (errno2 in [0x362]): # mnode invalid table name isSuccess = False - logger.debug("[DB] Acceptable error when dropping a table") + Logging.debug("[DB] Acceptable error when dropping a table") continue # try to delete next regular table if (not tickOutput): @@ -2184,7 +1874,7 @@ class TaskRestartService(StateTransitionTask): with self._classLock: if self._isRunning: - print("Skipping restart task, another running already") + Logging.info("Skipping restart task, another running already") return self._isRunning = True @@ -2199,20 +1889,19 @@ class TaskAddData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() - # We use these two files to record operations to DB, useful for power-off - # tests - fAddLogReady = None - fAddLogDone = None + # We use these two files to record operations to DB, useful for power-off tests + fAddLogReady = None # type: TextIOWrapper + fAddLogDone = None # type: TextIOWrapper @classmethod def prepToRecordOps(cls): if gConfig.record_ops: if (cls.fAddLogReady is None): - logger.info( + Logging.info( "Recording in a file operations to be performed...") cls.fAddLogReady = open("add_log_ready.txt", "w") if (cls.fAddLogDone is None): - logger.info("Recording in a file operations completed...") + Logging.info("Recording in a file operations completed...") cls.fAddLogDone = open("add_log_done.txt", "w") @classmethod @@ -2223,13 +1912,88 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName + + sql = "insert into {} values ".format(fullTableName) + for j in range(numRecords): # number of records per table + nextInt = db.getNextInt() + nextTick = db.getNextTick() + sql += "('{}', {});".format(nextTick, nextInt) + dbc.execute(sql) + + def _addData(self, db, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + + for j in range(numRecords): # number of records per table + nextInt = db.getNextInt() + nextTick = db.getNextTick() + if gConfig.record_ops: + self.prepToRecordOps() + self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.flush() + os.fsync(self.fAddLogReady) + + # TODO: too ugly trying to lock the table reliably, refactor... + fullTableName = db.getName() + '.' + regTableName + if gConfig.verify_data: + self.lockTable(fullTableName) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + + try: + sql = "insert into {} values ('{}', {});".format( # removed: tags ('{}', {}) + fullTableName, + # ds.getFixedSuperTableName(), + # ds.getNextBinary(), ds.getNextFloat(), + nextTick, nextInt) + dbc.execute(sql) + except: # Any exception at all + if gConfig.verify_data: + self.unlockTable(fullTableName) + raise + + # Now read it back and verify, we might encounter an error if table is dropped + if gConfig.verify_data: # only if command line asks for it + try: + readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". + format(db.getName(), regTableName, nextTick)) + if readBack != nextInt : + raise taos.error.ProgrammingError( + "Failed to read back same data, wrote: {}, read: {}" + .format(nextInt, readBack), 0x999) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x991, 0x992] : # not a single result + raise taos.error.ProgrammingError( + "Failed to read back same data for tick: {}, wrote: {}, read: {}" + .format(nextTick, nextInt, "Empty Result" if errno==0x991 else "Multiple Result"), + errno) + elif errno in [0x218, 0x362]: # table doesn't exist + # do nothing + dummy = 0 + else: + # Re-throw otherwise + raise + finally: + self.unlockTable(fullTableName) # Unlock the table no matter what + + # Successfully wrote the data into the DB, let's record it somehow + te.recordDataMark(nextInt) + + if gConfig.record_ops: + self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.flush() + os.fsync(self.fAddLogDone) + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - tblSeq = list(range( - self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) - random.shuffle(tblSeq) + numTables = self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + tblSeq = list(range(numTables )) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active print("x", end="", flush=True) # concurrent insertion @@ -2237,541 +2001,19 @@ class TaskAddData(StateTransitionTask): self.activeTable.add(i) # marking it active sTable = db.getFixedSuperTable() - regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) - sTable.ensureTable(wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists + regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) + fullTableName = db.getName() + '.' + regTableName + # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" + sTable.ensureTable(self, wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists + # self._unlockTable(fullTableName) - for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS): # number of records per table - nextInt = db.getNextInt() - nextTick = db.getNextTick() - if gConfig.record_ops: - self.prepToRecordOps() - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) - self.fAddLogReady.flush() - os.fsync(self.fAddLogReady) - sql = "insert into {}.{} values ('{}', {});".format( # removed: tags ('{}', {}) - db.getName(), - regTableName, - # ds.getFixedSuperTableName(), - # ds.getNextBinary(), ds.getNextFloat(), - nextTick, nextInt) - dbc.execute(sql) - # Successfully wrote the data into the DB, let's record it - # somehow - te.recordDataMark(nextInt) - if gConfig.record_ops: - self.fAddLogDone.write( - "Wrote {} to {}\n".format( - nextInt, regTableName)) - self.fAddLogDone.flush() - os.fsync(self.fAddLogDone) - - # Now read it back and verify, we might encounter an error if table is dropped - if gConfig.verify_data: # only if command line asks for it - try: - readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts= '{}'". - format(db.getName(), regTableName, nextTick)) - if readBack != nextInt : - raise taos.error.ProgrammingError( - "Failed to read back same data, wrote: {}, read: {}" - .format(nextInt, readBack), 0x999) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x991, 0x992] : # not a single result - raise taos.error.ProgrammingError( - "Failed to read back same data for tick: {}, wrote: {}, read: {}" - .format(nextTick, nextInt, "Empty Result" if errno==0x991 else "Multiple Result"), - errno) - # Re-throw no matter what - raise - - - self.activeTable.discard(i) # not raising an error, unlike remove - - -# Deterministic random number generator -class Dice(): - seeded = False # static, uninitialized - - @classmethod - def seed(cls, s): # static - if (cls.seeded): - raise RuntimeError( - "Cannot seed the random generator more than once") - cls.verifyRNG() - random.seed(s) - cls.seeded = True # TODO: protect against multi-threading - - @classmethod - def verifyRNG(cls): # Verify that the RNG is determinstic - random.seed(0) - x1 = random.randrange(0, 1000) - x2 = random.randrange(0, 1000) - x3 = random.randrange(0, 1000) - if (x1 != 864 or x2 != 394 or x3 != 776): - raise RuntimeError("System RNG is not deterministic") - - @classmethod - def throw(cls, stop): # get 0 to stop-1 - return cls.throwRange(0, stop) - - @classmethod - def throwRange(cls, start, stop): # up to stop-1 - if (not cls.seeded): - raise RuntimeError("Cannot throw dice before seeding it") - return random.randrange(start, stop) - - @classmethod - def choice(cls, cList): - return random.choice(cList) - - -class LoggingFilter(logging.Filter): - def filter(self, record: logging.LogRecord): - if (record.levelno >= logging.INFO): - return True # info or above always log - - # Commenting out below to adjust... - - # if msg.startswith("[TRD]"): - # return False - return True - - -class MyLoggingAdapter(logging.LoggerAdapter): - def process(self, msg, kwargs): - return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs - # return '[%s] %s' % (self.extra['connid'], msg), kwargs - - -class SvcManager: - def __init__(self): - print("Starting TDengine Service Manager") - # signal.signal(signal.SIGTERM, self.sigIntHandler) # Moved to MainExec - # signal.signal(signal.SIGINT, self.sigIntHandler) - # signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! - - self.inSigHandler = False - # self._status = MainExec.STATUS_RUNNING # set inside - # _startTaosService() - self.svcMgrThread = None # type: ServiceManagerThread - self._lock = threading.Lock() - self._isRestarting = False - - def _doMenu(self): - choice = "" - while True: - print("\nInterrupting Service Program, Choose an Action: ") - print("1: Resume") - print("2: Terminate") - print("3: Restart") - # Remember to update the if range below - # print("Enter Choice: ", end="", flush=True) - while choice == "": - choice = input("Enter Choice: ") - if choice != "": - break # done with reading repeated input - if choice in ["1", "2", "3"]: - break # we are done with whole method - print("Invalid choice, please try again.") - choice = "" # reset - return choice - - def sigUsrHandler(self, signalNumber, frame): - print("Interrupting main thread execution upon SIGUSR1") - if self.inSigHandler: # already - print("Ignoring repeated SIG...") - return # do nothing if it's already not running - self.inSigHandler = True - - choice = self._doMenu() - if choice == "1": - # TODO: can the sub-process be blocked due to us not reading from - # queue? - self.sigHandlerResume() - elif choice == "2": - self.stopTaosService() - elif choice == "3": # Restart - self.restart() - else: - raise RuntimeError("Invalid menu choice: {}".format(choice)) - - self.inSigHandler = False - - def sigIntHandler(self, signalNumber, frame): - print("SvcManager: INT Signal Handler starting...") - if self.inSigHandler: - print("Ignoring repeated SIG_INT...") - return - self.inSigHandler = True - - self.stopTaosService() - print("SvcManager: INT Signal Handler returning...") - self.inSigHandler = False - - def sigHandlerResume(self): - print("Resuming TDengine service manager thread (main thread)...\n\n") - - def _checkServiceManagerThread(self): - if self.svcMgrThread: # valid svc mgr thread - if self.svcMgrThread.isStopped(): # done? - self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? - self.svcMgrThread = None # no more - - def _procIpcAll(self): - while self.isRunning() or self.isRestarting() : # for as long as the svc mgr thread is still here - if self.isRunning(): - self.svcMgrThread.procIpcBatch() # regular processing, - self._checkServiceManagerThread() - elif self.isRetarting(): - print("Service restarting...") - time.sleep(0.5) # pause, before next round - print( - "Service Manager Thread (with subprocess) has ended, main thread now exiting...") - - def startTaosService(self): - with self._lock: - if self.svcMgrThread: - raise RuntimeError("Cannot start TAOS service when one may already be running") - - # Find if there's already a taosd service, and then kill it - for proc in psutil.process_iter(): - if proc.name() == 'taosd': - print("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupe") - time.sleep(2.0) - proc.kill() - # print("Process: {}".format(proc.name())) - - - self.svcMgrThread = ServiceManagerThread() # create the object - print("Attempting to start TAOS service started, printing out output...") - self.svcMgrThread.start() - self.svcMgrThread.procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines - print("TAOS service started") - - def stopTaosService(self, outputLines=20): - with self._lock: - if not self.isRunning(): - logger.warning("Cannot stop TAOS service, not running") - return - - print("Terminating Service Manager Thread (SMT) execution...") - self.svcMgrThread.stop() - if self.svcMgrThread.isStopped(): - self.svcMgrThread.procIpcBatch(outputLines) # one last time - self.svcMgrThread = None - print("End of TDengine Service Output") - print("----- TDengine Service (managed by SMT) is now terminated -----\n") + if Dice.throw(1) == 0: # 1 in 2 chance + self._addData(db, dbc, regTableName, te) else: - print("WARNING: SMT did not terminate as expected") - - def run(self): - self.startTaosService() - self._procIpcAll() # pump/process all the messages, may encounter SIG + restart - if self.isRunning(): # if sig handler hasn't destroyed it by now - self.stopTaosService() # should have started already - - def restart(self): - if self._isRestarting: - logger.warning("Cannot restart service when it's already restarting") - return - - self._isRestarting = True - if self.isRunning(): - self.stopTaosService() - else: - logger.warning("Service not running when restart requested") + self._addDataInBatch(db, dbc, regTableName, te) - self.startTaosService() - self._isRestarting = False - - def isRunning(self): - return self.svcMgrThread != None - - def isRestarting(self): - return self._isRestarting - -class ServiceManagerThread: - MAX_QUEUE_SIZE = 10000 - - def __init__(self): - self._tdeSubProcess = None # type: TdeSubProcess - self._thread = None - self._status = None - - def getStatus(self): - return self._status - - def isRunning(self): - # return self._thread and self._thread.is_alive() - return self._status == MainExec.STATUS_RUNNING - - def isStopping(self): - return self._status == MainExec.STATUS_STOPPING - - def isStopped(self): - return self._status == MainExec.STATUS_STOPPED - - # Start the thread (with sub process), and wait for the sub service - # to become fully operational - def start(self): - if self._thread: - raise RuntimeError("Unexpected _thread") - if self._tdeSubProcess: - raise RuntimeError("TDengine sub process already created/running") - - self._status = MainExec.STATUS_STARTING - - self._tdeSubProcess = TdeSubProcess() - self._tdeSubProcess.start() - - self._ipcQueue = Queue() - self._thread = threading.Thread( # First thread captures server OUTPUT - target=self.svcOutputReader, - args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) - self._thread.daemon = True # thread dies with the program - self._thread.start() - - self._thread2 = threading.Thread( # 2nd thread captures server ERRORs - target=self.svcErrorReader, - args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) - self._thread2.daemon = True # thread dies with the program - self._thread2.start() - - # wait for service to start - for i in range(0, 100): - time.sleep(1.0) - # self.procIpcBatch() # don't pump message during start up - print("_zz_", end="", flush=True) - if self._status == MainExec.STATUS_RUNNING: - logger.info("[] TDengine service READY to process requests") - return # now we've started - # TODO: handle this better? - self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output - raise RuntimeError("TDengine service did not start successfully") - - def stop(self): - # can be called from both main thread or signal handler - print("Terminating TDengine service running as the sub process...") - if self.isStopped(): - print("Service already stopped") - return - if self.isStopping(): - print("Service is already being stopped") - return - # Linux will send Control-C generated SIGINT to the TDengine process - # already, ref: - # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes - if not self._tdeSubProcess: - raise RuntimeError("sub process object missing") - - self._status = MainExec.STATUS_STOPPING - retCode = self._tdeSubProcess.stop() - print("Attempted to stop sub process, got return code: {}".format(retCode)) - if (retCode==-11): # SGV - logger.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") - - if self._tdeSubProcess.isRunning(): # still running - print("FAILED to stop sub process, it is still running... pid = {}".format( - self._tdeSubProcess.getPid())) - else: - self._tdeSubProcess = None # not running any more - self.join() # stop the thread, change the status, etc. - - def join(self): - # TODO: sanity check - if not self.isStopping(): - raise RuntimeError( - "Unexpected status when ending svc mgr thread: {}".format( - self._status)) - - if self._thread: - self._thread.join() - self._thread = None - self._status = MainExec.STATUS_STOPPED - # STD ERR thread - self._thread2.join() - self._thread2 = None - else: - print("Joining empty thread, doing nothing") - - def _trimQueue(self, targetSize): - if targetSize <= 0: - return # do nothing - q = self._ipcQueue - if (q.qsize() <= targetSize): # no need to trim - return - - logger.debug("Triming IPC queue to target size: {}".format(targetSize)) - itemsToTrim = q.qsize() - targetSize - for i in range(0, itemsToTrim): - try: - q.get_nowait() - except Empty: - break # break out of for loop, no more trimming - - TD_READY_MSG = "TDengine is initialized successfully" - - def procIpcBatch(self, trimToTarget=0, forceOutput=False): - self._trimQueue(trimToTarget) # trim if necessary - # Process all the output generated by the underlying sub process, - # managed by IO thread - print("<", end="", flush=True) - while True: - try: - line = self._ipcQueue.get_nowait() # getting output at fast speed - self._printProgress("_o") - except Empty: - # time.sleep(2.3) # wait only if there's no output - # no more output - print(".>", end="", flush=True) - return # we are done with THIS BATCH - else: # got line, printing out - if forceOutput: - logger.info(line) - else: - logger.debug(line) - print(">", end="", flush=True) - - _ProgressBars = ["--", "//", "||", "\\\\"] - - def _printProgress(self, msg): # TODO: assuming 2 chars - print(msg, end="", flush=True) - pBar = self._ProgressBars[Dice.throw(4)] - print(pBar, end="", flush=True) - print('\b\b\b\b', end="", flush=True) - - def svcOutputReader(self, out: IO, queue): - # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python - # print("This is the svcOutput Reader...") - # for line in out : - for line in iter(out.readline, b''): - # print("Finished reading a line: {}".format(line)) - # print("Adding item to queue...") - try: - line = line.decode("utf-8").rstrip() - except UnicodeError: - print("\nNon-UTF8 server output: {}\n".format(line)) - - # This might block, and then causing "out" buffer to block - queue.put(line) - self._printProgress("_i") - - if self._status == MainExec.STATUS_STARTING: # we are starting, let's see if we have started - if line.find(self.TD_READY_MSG) != -1: # found - logger.info("Waiting for the service to become FULLY READY") - time.sleep(1.0) # wait for the server to truly start. TODO: remove this - logger.info("Service is now FULLY READY") - self._status = MainExec.STATUS_RUNNING - - # Trim the queue if necessary: TODO: try this 1 out of 10 times - self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size - - if self.isStopping(): # TODO: use thread status instead - # WAITING for stopping sub process to finish its outptu - print("_w", end="", flush=True) - - # queue.put(line) - # meaning sub process must have died - print("\nNo more output from IO thread managing TDengine service") - out.close() - - def svcErrorReader(self, err: IO, queue): - for line in iter(err.readline, b''): - print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line)) - - -class TdeSubProcess: - def __init__(self): - self.subProcess = None - - def getStdOut(self): - return self.subProcess.stdout - - def getStdErr(self): - return self.subProcess.stderr - - def isRunning(self): - return self.subProcess is not None - - def getPid(self): - return self.subProcess.pid - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("communit")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - return buildPath + self.activeTable.discard(i) # not raising an error, unlike remove - def start(self): - ON_POSIX = 'posix' in sys.builtin_module_names - - taosdPath = self.getBuildPath() + "/build/bin/taosd" - cfgPath = self.getBuildPath() + "/test/cfg" - - # Delete the log files - logPath = self.getBuildPath() + "/test/log" - # ref: https://stackoverflow.com/questions/1995373/deleting-all-files-in-a-directory-with-python/1995397 - # filelist = [ f for f in os.listdir(logPath) ] # if f.endswith(".bak") ] - # for f in filelist: - # filePath = os.path.join(logPath, f) - # print("Removing log file: {}".format(filePath)) - # os.remove(filePath) - if os.path.exists(logPath): - logPathSaved = logPath + "_" + time.strftime('%Y-%m-%d-%H-%M-%S') - logger.info("Saving old log files to: {}".format(logPathSaved)) - os.rename(logPath, logPathSaved) - # os.mkdir(logPath) # recreate, no need actually, TDengine will auto-create with proper perms - - svcCmd = [taosdPath, '-c', cfgPath] - # svcCmdSingle = "{} -c {}".format(taosdPath, cfgPath) - # svcCmd = ['vmstat', '1'] - if self.subProcess: # already there - raise RuntimeError("Corrupt process state") - - # print("Starting service: {}".format(svcCmd)) - self.subProcess = subprocess.Popen( - svcCmd, shell=False, - # svcCmdSingle, shell=True, # capture core dump? - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # bufsize=1, # not supported in binary mode - close_fds=ON_POSIX - ) # had text=True, which interferred with reading EOF - - def stop(self): - if not self.subProcess: - print("Sub process already stopped") - return -1 - - retCode = self.subProcess.poll() # contains real sub process return code - if retCode: # valid return code, process ended - self.subProcess = None - else: # process still alive, let's interrupt it - print( - "Sub process is running, sending SIG_INT and waiting for it to terminate...") - # sub process should end, then IPC queue should end, causing IO - # thread to end - self.subProcess.send_signal(signal.SIGINT) - try: - self.subProcess.wait(10) - retCode = self.subProcess.returncode - except subprocess.TimeoutExpired as err: - print("Time out waiting for TDengine service process to exit") - retCode = -3 - else: - print("TDengine service process terminated successfully from SIG_INT") - retCode = -4 - self.subProcess = None - return retCode class ThreadStacks: # stack info for all threads def __init__(self): @@ -2794,31 +2036,33 @@ class ThreadStacks: # stack info for all threads '__init__']: # the thread that extracted the stack continue # ignore # Now print - print("\n<----- Thread Info for ID: {}".format(thNid)) + print("\n<----- Thread Info for LWP/ID: {} (Execution stopped at Bottom Frame) <-----".format(thNid)) + stackFrame = 0 for frame in stack: # print(frame) - print("File {filename}, line {lineno}, in {name}".format( - filename=frame.filename, lineno=frame.lineno, name=frame.name)) + print("[{sf}] File {filename}, line {lineno}, in {name}".format( + sf=stackFrame, filename=frame.filename, lineno=frame.lineno, name=frame.name)) print(" {}".format(frame.line)) - print("-----> End of Thread Info\n") + stackFrame += 1 + print("-----> End of Thread Info ----->\n") class ClientManager: def __init__(self): - print("Starting service manager") + Logging.info("Starting service manager") # signal.signal(signal.SIGTERM, self.sigIntHandler) # signal.signal(signal.SIGINT, self.sigIntHandler) - self._status = MainExec.STATUS_RUNNING + self._status = Status.STATUS_RUNNING self.tc = None self.inSigHandler = False def sigIntHandler(self, signalNumber, frame): - if self._status != MainExec.STATUS_RUNNING: + if self._status != Status.STATUS_RUNNING: print("Repeated SIGINT received, forced exit...") # return # do nothing if it's already not running sys.exit(-1) - self._status = MainExec.STATUS_STOPPING # immediately set our status + self._status = Status.STATUS_STOPPING # immediately set our status print("ClientManager: Terminating program...") self.tc.requestToStop() @@ -2898,15 +2142,20 @@ class ClientManager: # self._printLastNumbers() global gConfig - dbManager = DbManager() # Regular function + # Prepare Tde Instance + global gContainer + tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" + + dbManager = DbManager(gConfig.connector_type, tInst.getDbTarget()) # Regular function thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) + Logging.info("Starting client instance: {}".format(tInst)) self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) if svcMgr: # gConfig.auto_start_service: - svcMgr.stopTaosService() + svcMgr.stopTaosServices() svcMgr = None # Print exec status, etc., AFTER showing messages from the server self.conclude() @@ -2936,18 +2185,10 @@ class ClientManager: # self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections self.tc.printStats() - - - class MainExec: - STATUS_STARTING = 1 - STATUS_RUNNING = 2 - STATUS_STOPPING = 3 - STATUS_STOPPED = 4 - def __init__(self): self._clientMgr = None - self._svcMgr = None + self._svcMgr = None # type: ServiceManager signal.signal(signal.SIGTERM, self.sigIntHandler) signal.signal(signal.SIGINT, self.sigIntHandler) @@ -2960,219 +2201,185 @@ class MainExec: self._svcMgr.sigUsrHandler(signalNumber, frame) def sigIntHandler(self, signalNumber, frame): - if self._svcMgr: + if self._svcMgr: self._svcMgr.sigIntHandler(signalNumber, frame) - if self._clientMgr: + if self._clientMgr: self._clientMgr.sigIntHandler(signalNumber, frame) def runClient(self): global gSvcMgr if gConfig.auto_start_service: - self._svcMgr = SvcManager() - gSvcMgr = self._svcMgr # hack alert - self._svcMgr.startTaosService() # we start, don't run + gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert + gSvcMgr.startTaosServices() # we start, don't run self._clientMgr = ClientManager() ret = None try: ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside except requests.exceptions.ConnectionError as err: - logger.warning("Failed to open REST connection to DB: {}".format(err.getMessage())) + Logging.warning("Failed to open REST connection to DB: {}".format(err.getMessage())) # don't raise return ret def runService(self): global gSvcMgr - self._svcMgr = SvcManager() - gSvcMgr = self._svcMgr # save it in a global variable TODO: hack alert - - self._svcMgr.run() # run to some end state - self._svcMgr = None - gSvcMgr = None - - def runTemp(self): # for debugging purposes - # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix - # dbc = dbState.getDbConn() - # sTbName = dbState.getFixedSuperTableName() - # dbc.execute("create database if not exists db") - # if not dbState.getState().equals(StateEmpty()): - # dbc.execute("use db") - - # rTables = None - # try: # the super table may not exist - # sql = "select TBNAME from db.{}".format(sTbName) - # logger.info("Finding out tables in super table: {}".format(sql)) - # dbc.query(sql) # TODO: analyze result set later - # logger.info("Fetching result") - # rTables = dbc.getQueryResult() - # logger.info("Result: {}".format(rTables)) - # except taos.error.ProgrammingError as err: - # logger.info("Initial Super table OPS error: {}".format(err)) - - # # sys.exit() - # if ( not rTables == None): - # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - # try: - # for rTbName in rTables : # regular tables - # ds = dbState - # logger.info("Inserting into table: {}".format(rTbName[0])) - # sql = "insert into db.{} values ('{}', {});".format( - # rTbName[0], - # ds.getNextTick(), ds.getNextInt()) - # dbc.execute(sql) - # for rTbName in rTables : # regular tables - # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure - # logger.info("Initial READING operation is successful") - # except taos.error.ProgrammingError as err: - # logger.info("Initial WRITE/READ error: {}".format(err)) - - # Sandbox testing code - # dbc = dbState.getDbConn() - # while True: - # rows = dbc.query("show databases") - # print("Rows: {}, time={}".format(rows, time.time())) - return - - -def main(): - # Super cool Python argument library: - # https://docs.python.org/3/library/argparse.html - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description=textwrap.dedent('''\ - TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) - --------------------------------------------------------------------- - 1. You build TDengine in the top level ./build directory, as described in offical docs - 2. You run the server there before this script: ./build/bin/taosd -c test/cfg - - ''')) - - # parser.add_argument('-a', '--auto-start-service', action='store_true', - # help='Automatically start/stop the TDengine service (default: false)') - # parser.add_argument('-c', '--connector-type', action='store', default='native', type=str, - # help='Connector type to use: native, rest, or mixed (default: 10)') - # parser.add_argument('-d', '--debug', action='store_true', - # help='Turn on DEBUG mode for more logging (default: false)') - # parser.add_argument('-e', '--run-tdengine', action='store_true', - # help='Run TDengine service in foreground (default: false)') - # parser.add_argument('-l', '--larger-data', action='store_true', - # help='Write larger amount of data during write operations (default: false)') - # parser.add_argument('-p', '--per-thread-db-connection', action='store_true', - # help='Use a single shared db connection (default: false)') - # parser.add_argument('-r', '--record-ops', action='store_true', - # help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - # parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int, - # help='Maximum number of steps to run (default: 100)') - # parser.add_argument('-t', '--num-threads', action='store', default=5, type=int, - # help='Number of threads to run (default: 10)') - # parser.add_argument('-x', '--continue-on-exception', action='store_true', - # help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') - - parser.add_argument( - '-a', - '--auto-start-service', - action='store_true', - help='Automatically start/stop the TDengine service (default: false)') - parser.add_argument( - '-b', - '--max-dbs', - action='store', - default=0, - type=int, - help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') - parser.add_argument( - '-c', - '--connector-type', - action='store', - default='native', - type=str, - help='Connector type to use: native, rest, or mixed (default: 10)') - parser.add_argument( - '-d', - '--debug', - action='store_true', - help='Turn on DEBUG mode for more logging (default: false)') - parser.add_argument( - '-e', - '--run-tdengine', - action='store_true', - help='Run TDengine service in foreground (default: false)') - parser.add_argument( - '-i', - '--max-replicas', - action='store', - default=1, - type=int, - help='Maximum number of replicas to use, when testing against clusters. (default: 1)') - parser.add_argument( - '-l', - '--larger-data', - action='store_true', - help='Write larger amount of data during write operations (default: false)') - parser.add_argument( - '-p', - '--per-thread-db-connection', - action='store_true', - help='Use a single shared db connection (default: false)') - parser.add_argument( - '-r', - '--record-ops', - action='store_true', - help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument( - '-s', - '--max-steps', - action='store', - default=1000, - type=int, - help='Maximum number of steps to run (default: 100)') - parser.add_argument( - '-t', - '--num-threads', - action='store', - default=5, - type=int, - help='Number of threads to run (default: 10)') - parser.add_argument( - '-v', - '--verify-data', - action='store_true', - help='Verify data written in a number of places by reading back (default: false)') - parser.add_argument( - '-x', - '--continue-on-exception', - action='store_true', - help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') - - global gConfig - gConfig = parser.parse_args() - - # Logging Stuff - global logger - _logger = logging.getLogger('CrashGen') # real logger - _logger.addFilter(LoggingFilter()) - ch = logging.StreamHandler() - _logger.addHandler(ch) - - # Logging adapter, to be used as a logger - logger = MyLoggingAdapter(_logger, []) - - if (gConfig.debug): - logger.setLevel(logging.DEBUG) # default seems to be INFO - else: - logger.setLevel(logging.INFO) - - Dice.seed(0) # initial seeding of dice - - # Run server or client - mExec = MainExec() - if gConfig.run_tdengine: # run server - mExec.runService() - else: - return mExec.runClient() - - -if __name__ == "__main__": - exitCode = main() - # print("Exiting with code: {}".format(exitCode)) - sys.exit(exitCode) + gSvcMgr = self._svcMgr = ServiceManager(gConfig.num_dnodes) # save it in a global variable TODO: hack alert + + gSvcMgr.run() # run to some end state + gSvcMgr = self._svcMgr = None + + def init(self): # TODO: refactor + global gContainer + gContainer = Container() # micky-mouse DI + + global gSvcMgr # TODO: refactor away + gSvcMgr = None + + # Super cool Python argument library: + # https://docs.python.org/3/library/argparse.html + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=textwrap.dedent('''\ + TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) + --------------------------------------------------------------------- + 1. You build TDengine in the top level ./build directory, as described in offical docs + 2. You run the server there before this script: ./build/bin/taosd -c test/cfg + + ''')) + + parser.add_argument( + '-a', + '--auto-start-service', + action='store_true', + help='Automatically start/stop the TDengine service (default: false)') + parser.add_argument( + '-b', + '--max-dbs', + action='store', + default=0, + type=int, + help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') + parser.add_argument( + '-c', + '--connector-type', + action='store', + default='native', + type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') + parser.add_argument( + '-d', + '--debug', + action='store_true', + help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument( + '-e', + '--run-tdengine', + action='store_true', + help='Run TDengine service in foreground (default: false)') + parser.add_argument( + '-g', + '--ignore-errors', + action='store', + default=None, + type=str, + help='Ignore error codes, comma separated, 0x supported (default: None)') + parser.add_argument( + '-i', + '--max-replicas', + action='store', + default=1, + type=int, + help='Maximum number of replicas to use, when testing against clusters. (default: 1)') + parser.add_argument( + '-l', + '--larger-data', + action='store_true', + help='Write larger amount of data during write operations (default: false)') + parser.add_argument( + '-n', + '--dynamic-db-table-names', + action='store_true', + help='Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false)') + parser.add_argument( + '-o', + '--num-dnodes', + action='store', + default=1, + type=int, + help='Number of Dnodes to initialize, used with -e option. (default: 1)') + parser.add_argument( + '-p', + '--per-thread-db-connection', + action='store_true', + help='Use a single shared db connection (default: false)') + parser.add_argument( + '-r', + '--record-ops', + action='store_true', + help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') + parser.add_argument( + '-s', + '--max-steps', + action='store', + default=1000, + type=int, + help='Maximum number of steps to run (default: 100)') + parser.add_argument( + '-t', + '--num-threads', + action='store', + default=5, + type=int, + help='Number of threads to run (default: 10)') + parser.add_argument( + '-v', + '--verify-data', + action='store_true', + help='Verify data written in a number of places by reading back (default: false)') + parser.add_argument( + '-x', + '--continue-on-exception', + action='store_true', + help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') + + global gConfig + gConfig = parser.parse_args() + + Logging.clsInit(gConfig) + + Dice.seed(0) # initial seeding of dice + + def run(self): + if gConfig.run_tdengine: # run server + try: + self.runService() + return 0 # success + except ConnectionError as err: + Logging.error("Failed to make DB connection, please check DB instance manually") + return -1 # failure + else: + return self.runClient() + + +class Container(): + _propertyList = {'defTdeInstance'} + + def __init__(self): + self._cargo = {} # No cargo at the beginning + + def _verifyValidProperty(self, name): + if not name in self._propertyList: + raise CrashGenError("Invalid container property: {}".format(name)) + + # Called for an attribute, when other mechanisms fail (compare to __getattribute__) + def __getattr__(self, name): + self._verifyValidProperty(name) + return self._cargo[name] # just a simple lookup + + def __setattr__(self, name, value): + if name == '_cargo' : # reserved vars + super().__setattr__(name, value) + return + self._verifyValidProperty(name) + self._cargo[name] = value + diff --git a/tests/pytest/crash_gen/db.py b/tests/pytest/crash_gen/db.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4b362f82c4516195becf78ef9771b7c62c3c41 --- /dev/null +++ b/tests/pytest/crash_gen/db.py @@ -0,0 +1,441 @@ +from __future__ import annotations + +import sys +import time +import threading +import requests +from requests.auth import HTTPBasicAuth + +import taos +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.log import * + +from .misc import Logging, CrashGenError, Helper, Dice +import os +import datetime +# from .service_manager import TdeInstance + +class DbConn: + TYPE_NATIVE = "native-c" + TYPE_REST = "rest-api" + TYPE_INVALID = "invalid" + + @classmethod + def create(cls, connType, dbTarget): + if connType == cls.TYPE_NATIVE: + return DbConnNative(dbTarget) + elif connType == cls.TYPE_REST: + return DbConnRest(dbTarget) + else: + raise RuntimeError( + "Unexpected connection type: {}".format(connType)) + + @classmethod + def createNative(cls, dbTarget) -> DbConn: + return cls.create(cls.TYPE_NATIVE, dbTarget) + + @classmethod + def createRest(cls, dbTarget) -> DbConn: + return cls.create(cls.TYPE_REST, dbTarget) + + def __init__(self, dbTarget): + self.isOpen = False + self._type = self.TYPE_INVALID + self._lastSql = None + self._dbTarget = dbTarget + + def __repr__(self): + return "[DbConn: type={}, target={}]".format(self._type, self._dbTarget) + + def getLastSql(self): + return self._lastSql + + def open(self): + if (self.isOpen): + raise RuntimeError("Cannot re-open an existing DB connection") + + # below implemented by child classes + self.openByType() + + Logging.debug("[DB] data connection opened: {}".format(self)) + self.isOpen = True + + def close(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def queryScalar(self, sql) -> int: + return self._queryAny(sql) + + def queryString(self, sql) -> str: + return self._queryAny(sql) + + def _queryAny(self, sql): # actual query result as an int + if (not self.isOpen): + raise RuntimeError("Cannot query database until connection is open") + nRows = self.query(sql) + if nRows != 1: + raise taos.error.ProgrammingError( + "Unexpected result for query: {}, rows = {}".format(sql, nRows), + (0x991 if nRows==0 else 0x992) + ) + if self.getResultRows() != 1 or self.getResultCols() != 1: + raise RuntimeError("Unexpected result set for query: {}".format(sql)) + return self.getQueryResult()[0][0] + + def use(self, dbName): + self.execute("use {}".format(dbName)) + + def existsDatabase(self, dbName: str): + ''' Check if a certain database exists ''' + self.query("show databases") + dbs = [v[0] for v in self.getQueryResult()] # ref: https://stackoverflow.com/questions/643823/python-list-transformation + # ret2 = dbName in dbs + # print("dbs = {}, str = {}, ret2={}, type2={}".format(dbs, dbName,ret2, type(dbName))) + return dbName in dbs # TODO: super weird type mangling seen, once here + + def existsSuperTable(self, stName): + self.query("show stables") + sts = [v[0] for v in self.getQueryResult()] + return stName in sts + + def hasTables(self): + return self.query("show tables") > 0 + + def execute(self, sql): + ''' Return the number of rows affected''' + raise RuntimeError("Unexpected execution, should be overriden") + + def safeExecute(self, sql): + '''Safely execute any SQL query, returning True/False upon success/failure''' + try: + self.execute(sql) + return True # ignore num of results, return success + except taos.error.ProgrammingError as err: + return False # failed, for whatever TAOS reason + # Not possile to reach here, non-TAOS exception would have been thrown + + def query(self, sql) -> int: # return num rows returned + ''' Return the number of rows affected''' + raise RuntimeError("Unexpected execution, should be overriden") + + def openByType(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getQueryResult(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultRows(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultCols(self): + raise RuntimeError("Unexpected execution, should be overriden") + +# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql + + +class DbConnRest(DbConn): + REST_PORT_INCREMENT = 11 + + def __init__(self, dbTarget: DbTarget): + super().__init__(dbTarget) + self._type = self.TYPE_REST + restPort = dbTarget.port + 11 + self._url = "http://{}:{}/rest/sql".format( + dbTarget.hostAddr, dbTarget.port + self.REST_PORT_INCREMENT) + self._result = None + + def openByType(self): # Open connection + pass # do nothing, always open + + def close(self): + if (not self.isOpen): + raise RuntimeError("Cannot clean up database until connection is open") + # Do nothing for REST + Logging.debug("[DB] REST Database connection closed") + self.isOpen = False + + def _doSql(self, sql): + self._lastSql = sql # remember this, last SQL attempted + try: + r = requests.post(self._url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + except: + print("REST API Failure (TODO: more info here)") + raise + rj = r.json() + # Sanity check for the "Json Result" + if ('status' not in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + self._result = rj + return nRows + + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") + Logging.debug("[SQL-REST] Executing SQL: {}".format(sql)) + nRows = self._doSql(sql) + Logging.debug( + "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + return nRows + + def query(self, sql): # return rows affected + return self.execute(sql) + + def getQueryResult(self): + return self._result['data'] + + def getResultRows(self): + print(self._result) + raise RuntimeError("TBD") # TODO: finish here to support -v under -c rest + # return self._tdSql.queryRows + + def getResultCols(self): + print(self._result) + raise RuntimeError("TBD") + + # Duplicate code from TDMySQL, TODO: merge all this into DbConnNative + + +class MyTDSql: + # Class variables + _clsLock = threading.Lock() # class wide locking + longestQuery = None # type: str + longestQueryTime = 0.0 # seconds + lqStartTime = 0.0 + # lqEndTime = 0.0 # Not needed, as we have the two above already + + def __init__(self, hostAddr, cfgPath): + # Make the DB connection + self._conn = taos.connect(host=hostAddr, config=cfgPath) + self._cursor = self._conn.cursor() + + self.queryRows = 0 + self.queryCols = 0 + self.affectedRows = 0 + + # def init(self, cursor, log=True): + # self.cursor = cursor + # if (log): + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # self.cursor.log(caller.filename + ".sql") + + def close(self): + self._cursor.close() # can we double close? + self._conn.close() # TODO: very important, cursor close does NOT close DB connection! + self._cursor.close() + + def _execInternal(self, sql): + startTime = time.time() + # Logging.debug("Executing SQL: " + sql) + ret = self._cursor.execute(sql) + # print("\nSQL success: {}".format(sql)) + queryTime = time.time() - startTime + # Record the query time + cls = self.__class__ + if queryTime > (cls.longestQueryTime + 0.01) : + with cls._clsLock: + cls.longestQuery = sql + cls.longestQueryTime = queryTime + cls.lqStartTime = startTime + return ret + + def query(self, sql): + self.sql = sql + try: + self._execInternal(sql) + self.queryResult = self._cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self._cursor.description) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.queryRows + + def execute(self, sql): + self.sql = sql + try: + self.affectedRows = self._execInternal(sql) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.affectedRows + +class DbTarget: + def __init__(self, cfgPath, hostAddr, port): + self.cfgPath = cfgPath + self.hostAddr = hostAddr + self.port = port + + def __repr__(self): + return "[DbTarget: cfgPath={}, host={}:{}]".format( + Helper.getFriendlyPath(self.cfgPath), self.hostAddr, self.port) + + def getEp(self): + return "{}:{}".format(self.hostAddr, self.port) + +class DbConnNative(DbConn): + # Class variables + _lock = threading.Lock() + # _connInfoDisplayed = False # TODO: find another way to display this + totalConnections = 0 # Not private + + def __init__(self, dbTarget): + super().__init__(dbTarget) + self._type = self.TYPE_NATIVE + self._conn = None + # self._cursor = None + + def openByType(self): # Open connection + # global gContainer + # tInst = tInst or gContainer.defTdeInstance # set up in ClientManager, type: TdeInstance + # cfgPath = self.getBuildPath() + "/test/cfg" + # cfgPath = tInst.getCfgDir() + # hostAddr = tInst.getHostAddr() + + cls = self.__class__ # Get the class, to access class variables + with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!! + dbTarget = self._dbTarget + # if not cls._connInfoDisplayed: + # cls._connInfoDisplayed = True # updating CLASS variable + Logging.debug("Initiating TAOS native connection to {}".format(dbTarget)) + # Make the connection + # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable + # self._cursor = self._conn.cursor() + # Record the count in the class + self._tdSql = MyTDSql(dbTarget.hostAddr, dbTarget.cfgPath) # making DB connection + cls.totalConnections += 1 + + self._tdSql.execute('reset query cache') + # self._cursor.execute('use db') # do this at the beginning of every + + # Open connection + # self._tdSql = MyTDSql() + # self._tdSql.init(self._cursor) + + def close(self): + if (not self.isOpen): + raise RuntimeError("Cannot clean up database until connection is open") + self._tdSql.close() + # Decrement the class wide counter + cls = self.__class__ # Get the class, to access class variables + with cls._lock: + cls.totalConnections -= 1 + + Logging.debug("[DB] Database connection closed") + self.isOpen = False + + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError("Cannot execute database commands until connection is open") + Logging.debug("[SQL] Executing SQL: {}".format(sql)) + self._lastSql = sql + nRows = self._tdSql.execute(sql) + Logging.debug( + "[SQL] Execution Result, nRows = {}, SQL = {}".format( + nRows, sql)) + return nRows + + def query(self, sql): # return rows affected + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") + Logging.debug("[SQL] Executing SQL: {}".format(sql)) + self._lastSql = sql + nRows = self._tdSql.query(sql) + Logging.debug( + "[SQL] Query Result, nRows = {}, SQL = {}".format( + nRows, sql)) + return nRows + # results are in: return self._tdSql.queryResult + + def getQueryResult(self): + return self._tdSql.queryResult + + def getResultRows(self): + return self._tdSql.queryRows + + def getResultCols(self): + return self._tdSql.queryCols + + +class DbManager(): + ''' This is a wrapper around DbConn(), to make it easier to use. + + TODO: rename this to DbConnManager + ''' + def __init__(self, cType, dbTarget): + # self.tableNumQueue = LinearQueue() # TODO: delete? + # self.openDbServerConnection() + self._dbConn = DbConn.createNative(dbTarget) if ( + cType == 'native') else DbConn.createRest(dbTarget) + try: + self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected + except taos.error.ProgrammingError as err: + # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) + if (err.msg == 'client disconnected'): # cannot open DB connection + print( + "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") + sys.exit(2) + else: + print("Failed to connect to DB, errno = {}, msg: {}" + .format(Helper.convertErrno(err.errno), err.msg)) + raise + except BaseException: + print("[=] Unexpected exception") + raise + + # Do this after dbConn is in proper shape + # Moved to Database() + # self._stateMachine = StateMechine(self._dbConn) + + def getDbConn(self): + return self._dbConn + + # TODO: not used any more, to delete + def pickAndAllocateTable(self): # pick any table, and "use" it + return self.tableNumQueue.pickAndAllocate() + + # TODO: Not used any more, to delete + def addTable(self): + with self._lock: + tIndex = self.tableNumQueue.push() + return tIndex + + # Not used any more, to delete + def releaseTable(self, i): # return the table back, so others can use it + self.tableNumQueue.release(i) + + # TODO: not used any more, delete + def getTableNameToDelete(self): + tblNum = self.tableNumQueue.pop() # TODO: race condition! + if (not tblNum): # maybe false + return False + + return "table_{}".format(tblNum) + + def cleanUp(self): + self._dbConn.close() + diff --git a/tests/pytest/crash_gen/misc.py b/tests/pytest/crash_gen/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2ce99d95582809a8940a8d777bc166b633747c --- /dev/null +++ b/tests/pytest/crash_gen/misc.py @@ -0,0 +1,186 @@ +import threading +import random +import logging +import os + + +class CrashGenError(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self.errno = errno + + def __str__(self): + return self.msg + + +class LoggingFilter(logging.Filter): + def filter(self, record: logging.LogRecord): + if (record.levelno >= logging.INFO): + return True # info or above always log + + # Commenting out below to adjust... + + # if msg.startswith("[TRD]"): + # return False + return True + + +class MyLoggingAdapter(logging.LoggerAdapter): + def process(self, msg, kwargs): + return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs + # return '[%s] %s' % (self.extra['connid'], msg), kwargs + + +class Logging: + logger = None + + @classmethod + def getLogger(cls): + return logger + + @classmethod + def clsInit(cls, gConfig): # TODO: refactor away gConfig + if cls.logger: + return + + # Logging Stuff + # global misc.logger + _logger = logging.getLogger('CrashGen') # real logger + _logger.addFilter(LoggingFilter()) + ch = logging.StreamHandler() + _logger.addHandler(ch) + + # Logging adapter, to be used as a logger + # print("setting logger variable") + # global logger + cls.logger = MyLoggingAdapter(_logger, []) + + if (gConfig.debug): + cls.logger.setLevel(logging.DEBUG) # default seems to be INFO + else: + cls.logger.setLevel(logging.INFO) + + @classmethod + def info(cls, msg): + cls.logger.info(msg) + + @classmethod + def debug(cls, msg): + cls.logger.debug(msg) + + @classmethod + def warning(cls, msg): + cls.logger.warning(msg) + + @classmethod + def error(cls, msg): + cls.logger.error(msg) + +class Status: + STATUS_STARTING = 1 + STATUS_RUNNING = 2 + STATUS_STOPPING = 3 + STATUS_STOPPED = 4 + + def __init__(self, status): + self.set(status) + + def __repr__(self): + return "[Status: v={}]".format(self._status) + + def set(self, status): + self._status = status + + def get(self): + return self._status + + def isStarting(self): + return self._status == Status.STATUS_STARTING + + def isRunning(self): + # return self._thread and self._thread.is_alive() + return self._status == Status.STATUS_RUNNING + + def isStopping(self): + return self._status == Status.STATUS_STOPPING + + def isStopped(self): + return self._status == Status.STATUS_STOPPED + + def isStable(self): + return self.isRunning() or self.isStopped() + +# Deterministic random number generator +class Dice(): + seeded = False # static, uninitialized + + @classmethod + def seed(cls, s): # static + if (cls.seeded): + raise RuntimeError( + "Cannot seed the random generator more than once") + cls.verifyRNG() + random.seed(s) + cls.seeded = True # TODO: protect against multi-threading + + @classmethod + def verifyRNG(cls): # Verify that the RNG is determinstic + random.seed(0) + x1 = random.randrange(0, 1000) + x2 = random.randrange(0, 1000) + x3 = random.randrange(0, 1000) + if (x1 != 864 or x2 != 394 or x3 != 776): + raise RuntimeError("System RNG is not deterministic") + + @classmethod + def throw(cls, stop): # get 0 to stop-1 + return cls.throwRange(0, stop) + + @classmethod + def throwRange(cls, start, stop): # up to stop-1 + if (not cls.seeded): + raise RuntimeError("Cannot throw dice before seeding it") + return random.randrange(start, stop) + + @classmethod + def choice(cls, cList): + return random.choice(cList) + +class Helper: + @classmethod + def convertErrno(cls, errno): + return errno if (errno > 0) else 0x80000000 + errno + + @classmethod + def getFriendlyPath(cls, path): # returns .../xxx/yyy + ht1 = os.path.split(path) + ht2 = os.path.split(ht1[0]) + return ".../" + ht2[1] + '/' + ht1[1] + + +class Progress: + STEP_BOUNDARY = 0 + BEGIN_THREAD_STEP = 1 + END_THREAD_STEP = 2 + SERVICE_HEART_BEAT= 3 + SERVICE_RECONNECT_START = 4 + SERVICE_RECONNECT_SUCCESS = 5 + SERVICE_RECONNECT_FAILURE = 6 + SERVICE_START_NAP = 7 + CREATE_TABLE_ATTEMPT = 8 + + tokens = { + STEP_BOUNDARY: '.', + BEGIN_THREAD_STEP: '[', + END_THREAD_STEP: '] ', + SERVICE_HEART_BEAT: '.Y.', + SERVICE_RECONNECT_START: '', + SERVICE_RECONNECT_FAILURE: '.xr>', + SERVICE_START_NAP: '_zz', + CREATE_TABLE_ATTEMPT: '_c', + } + + @classmethod + def emit(cls, token): + print(cls.tokens[token], end="", flush=True) diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..d249abc4396462b6dbacbfcbb1f6619e48161c3b --- /dev/null +++ b/tests/pytest/crash_gen/service_manager.py @@ -0,0 +1,773 @@ +import os +import io +import sys +import threading +import signal +import logging +import time +import subprocess + +from typing import IO, List + +try: + import psutil +except: + print("Psutil module needed, please install: sudo pip3 install psutil") + sys.exit(-1) + +from queue import Queue, Empty + +from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress +from .db import DbConn, DbTarget + +class TdeInstance(): + """ + A class to capture the *static* information of a TDengine instance, + including the location of the various files/directories, and basica + configuration. + """ + + @classmethod + def _getBuildPath(cls): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("communit")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = None + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + if buildPath == None: + raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}" + .format(selfPath, projPath)) + return buildPath + + @classmethod + def prepareGcovEnv(cls, env): + # Ref: https://gcc.gnu.org/onlinedocs/gcc/Cross-profiling.html + bPath = cls._getBuildPath() # build PATH + numSegments = len(bPath.split('/')) - 1 # "/x/TDengine/build" should yield 3 + numSegments = numSegments - 1 # DEBUG only + env['GCOV_PREFIX'] = bPath + '/svc_gcov' + env['GCOV_PREFIX_STRIP'] = str(numSegments) # Strip every element, plus, ENV needs strings + Logging.info("Preparing GCOV environement to strip {} elements and use path: {}".format( + numSegments, env['GCOV_PREFIX'] )) + + def __init__(self, subdir='test', tInstNum=0, port=6030, fepPort=6030): + self._buildDir = self._getBuildPath() + self._subdir = '/' + subdir # TODO: tolerate "/" + self._port = port # TODO: support different IP address too + self._fepPort = fepPort + + self._tInstNum = tInstNum + self._smThread = ServiceManagerThread() + + def getDbTarget(self): + return DbTarget(self.getCfgDir(), self.getHostAddr(), self._port) + + def getPort(self): + return self._port + + def __repr__(self): + return "[TdeInstance: {}, subdir={}]".format( + self._buildDir, Helper.getFriendlyPath(self._subdir)) + + def generateCfgFile(self): + # print("Logger = {}".format(logger)) + # buildPath = self.getBuildPath() + # taosdPath = self._buildPath + "/build/bin/taosd" + + cfgDir = self.getCfgDir() + cfgFile = cfgDir + "/taos.cfg" # TODO: inquire if this is fixed + if os.path.exists(cfgFile): + if os.path.isfile(cfgFile): + Logging.warning("Config file exists already, skip creation: {}".format(cfgFile)) + return # cfg file already exists, nothing to do + else: + raise CrashGenError("Invalid config file: {}".format(cfgFile)) + # Now that the cfg file doesn't exist + if os.path.exists(cfgDir): + if not os.path.isdir(cfgDir): + raise CrashGenError("Invalid config dir: {}".format(cfgDir)) + # else: good path + else: + os.makedirs(cfgDir, exist_ok=True) # like "mkdir -p" + # Now we have a good cfg dir + cfgValues = { + 'runDir': self.getRunDir(), + 'ip': '127.0.0.1', # TODO: change to a network addressable ip + 'port': self._port, + 'fepPort': self._fepPort, + } + cfgTemplate = """ +dataDir {runDir}/data +logDir {runDir}/log + +charset UTF-8 + +firstEp {ip}:{fepPort} +fqdn {ip} +serverPort {port} + +# was all 135 below +dDebugFlag 135 +cDebugFlag 135 +rpcDebugFlag 135 +qDebugFlag 135 +# httpDebugFlag 143 +# asyncLog 0 +# tables 10 +maxtablesPerVnode 10 +rpcMaxTime 101 +# cache 2 +keep 36500 +# walLevel 2 +walLevel 1 +# +# maxConnections 100 +""" + cfgContent = cfgTemplate.format_map(cfgValues) + f = open(cfgFile, "w") + f.write(cfgContent) + f.close() + + def rotateLogs(self): + logPath = self.getLogDir() + # ref: https://stackoverflow.com/questions/1995373/deleting-all-files-in-a-directory-with-python/1995397 + if os.path.exists(logPath): + logPathSaved = logPath + "_" + time.strftime('%Y-%m-%d-%H-%M-%S') + Logging.info("Saving old log files to: {}".format(logPathSaved)) + os.rename(logPath, logPathSaved) + # os.mkdir(logPath) # recreate, no need actually, TDengine will auto-create with proper perms + + + def getExecFile(self): # .../taosd + return self._buildDir + "/build/bin/taosd" + + def getRunDir(self): # TODO: rename to "root dir" ?! + return self._buildDir + self._subdir + + def getCfgDir(self): # path, not file + return self.getRunDir() + "/cfg" + + def getLogDir(self): + return self.getRunDir() + "/log" + + def getHostAddr(self): + return "127.0.0.1" + + def getServiceCmdLine(self): # to start the instance + return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + + def _getDnodes(self, dbc): + dbc.query("show dnodes") + cols = dbc.getQueryResult() # id,end_point,vnodes,cores,status,role,create_time,offline reason + return {c[1]:c[4] for c in cols} # {'xxx:6030':'ready', 'xxx:6130':'ready'} + + def createDnode(self, dbt: DbTarget): + """ + With a connection to the "first" EP, let's create a dnode for someone else who + wants to join. + """ + dbc = DbConn.createNative(self.getDbTarget()) + dbc.open() + + if dbt.getEp() in self._getDnodes(dbc): + Logging.info("Skipping DNode creation for: {}".format(dbt)) + dbc.close() + return + + sql = "CREATE DNODE \"{}\"".format(dbt.getEp()) + dbc.execute(sql) + dbc.close() + + def getStatus(self): + return self._smThread.getStatus() + + def getSmThread(self): + return self._smThread + + def start(self): + if not self.getStatus().isStopped(): + raise CrashGenError("Cannot start instance from status: {}".format(self.getStatus())) + + Logging.info("Starting TDengine instance: {}".format(self)) + self.generateCfgFile() # service side generates config file, client does not + self.rotateLogs() + + self._smThread.start(self.getServiceCmdLine()) + + def stop(self): + self._smThread.stop() + + def isFirst(self): + return self._tInstNum == 0 + + +class TdeSubProcess: + """ + A class to to represent the actual sub process that is the run-time + of a TDengine instance. + + It takes a TdeInstance object as its parameter, with the rationale being + "a sub process runs an instance". + """ + + # RET_ALREADY_STOPPED = -1 + # RET_TIME_OUT = -3 + # RET_SUCCESS = -4 + + def __init__(self): + self.subProcess = None + # if tInst is None: + # raise CrashGenError("Empty instance not allowed in TdeSubProcess") + # self._tInst = tInst # Default create at ServiceManagerThread + + def __repr__(self): + if self.subProcess is None: + return '[TdeSubProc: Empty]' + return '[TdeSubProc: pid = {}]'.format(self.getPid()) + + def getStdOut(self): + return self.subProcess.stdout + + def getStdErr(self): + return self.subProcess.stderr + + def isRunning(self): + return self.subProcess is not None + + def getPid(self): + return self.subProcess.pid + + def start(self, cmdLine): + ON_POSIX = 'posix' in sys.builtin_module_names + + # Sanity check + if self.subProcess: # already there + raise RuntimeError("Corrupt process state") + + # Prepare environment variables for coverage information + # Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment + myEnv = os.environ.copy() + TdeInstance.prepareGcovEnv(myEnv) + + # print(myEnv) + # print(myEnv.items()) + # print("Starting TDengine via Shell: {}".format(cmdLineStr)) + + useShell = True + self.subProcess = subprocess.Popen( + ' '.join(cmdLine) if useShell else cmdLine, + shell=useShell, + # svcCmdSingle, shell=True, # capture core dump? + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + # bufsize=1, # not supported in binary mode + close_fds=ON_POSIX, + env=myEnv + ) # had text=True, which interferred with reading EOF + + STOP_SIGNAL = signal.SIGKILL # What signal to use (in kill) to stop a taosd process? + + def stop(self): + """ + Stop a sub process, and try to return a meaningful return code. + + Common POSIX signal values (from man -7 signal): + SIGHUP 1 + SIGINT 2 + SIGQUIT 3 + SIGILL 4 + SIGTRAP 5 + SIGABRT 6 + SIGIOT 6 + SIGBUS 7 + SIGEMT - + SIGFPE 8 + SIGKILL 9 + SIGUSR1 10 + SIGSEGV 11 + SIGUSR2 12 + """ + if not self.subProcess: + Logging.error("Sub process already stopped") + return # -1 + + retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) + if retCode: # valid return code, process ended + retCode = -retCode # only if valid + Logging.warning("TSP.stop(): process ended itself") + self.subProcess = None + return retCode + + # process still alive, let's interrupt it + Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL)) + # sub process should end, then IPC queue should end, causing IO thread to end + topSubProc = psutil.Process(self.subProcess.pid) + for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False + child.send_signal(self.STOP_SIGNAL) + time.sleep(0.2) # 200 ms + # topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell) + + self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell) + self.subProcess.wait(20) + retCode = self.subProcess.returncode # should always be there + # May throw subprocess.TimeoutExpired exception above, therefore + # The process is guranteed to have ended by now + self.subProcess = None + if retCode != 0: # != (- signal.SIGINT): + Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format( + self.STOP_SIGNAL, retCode)) + else: + Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL)) + return - retCode + +class ServiceManager: + PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process + + def __init__(self, numDnodes): # >1 when we run a cluster + Logging.info("TDengine Service Manager (TSM) created") + self._numDnodes = numDnodes # >1 means we have a cluster + self._lock = threading.Lock() + # signal.signal(signal.SIGTERM, self.sigIntHandler) # Moved to MainExec + # signal.signal(signal.SIGINT, self.sigIntHandler) + # signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! + + self.inSigHandler = False + # self._status = MainExec.STATUS_RUNNING # set inside + # _startTaosService() + self._runCluster = (numDnodes > 1) + self._tInsts : List[TdeInstance] = [] + for i in range(0, numDnodes): + ti = self._createTdeInstance(i) # construct tInst + self._tInsts.append(ti) + + # self.svcMgrThreads : List[ServiceManagerThread] = [] + # for i in range(0, numDnodes): + # thread = self._createThread(i) # construct tInst + # self.svcMgrThreads.append(thread) + + def _createTdeInstance(self, dnIndex): + if not self._runCluster: # single instance + subdir = 'test' + else: # Create all threads in a cluster + subdir = 'cluster_dnode_{}'.format(dnIndex) + fepPort= 6030 # firstEP Port + port = fepPort + dnIndex * 100 + return TdeInstance(subdir, dnIndex, port, fepPort) + # return ServiceManagerThread(dnIndex, ti) + + def _doMenu(self): + choice = "" + while True: + print("\nInterrupting Service Program, Choose an Action: ") + print("1: Resume") + print("2: Terminate") + print("3: Restart") + # Remember to update the if range below + # print("Enter Choice: ", end="", flush=True) + while choice == "": + choice = input("Enter Choice: ") + if choice != "": + break # done with reading repeated input + if choice in ["1", "2", "3"]: + break # we are done with whole method + print("Invalid choice, please try again.") + choice = "" # reset + return choice + + def sigUsrHandler(self, signalNumber, frame): + print("Interrupting main thread execution upon SIGUSR1") + if self.inSigHandler: # already + print("Ignoring repeated SIG...") + return # do nothing if it's already not running + self.inSigHandler = True + + choice = self._doMenu() + if choice == "1": + self.sigHandlerResume() # TODO: can the sub-process be blocked due to us not reading from queue? + elif choice == "2": + self.stopTaosServices() + elif choice == "3": # Restart + self.restart() + else: + raise RuntimeError("Invalid menu choice: {}".format(choice)) + + self.inSigHandler = False + + def sigIntHandler(self, signalNumber, frame): + print("ServiceManager: INT Signal Handler starting...") + if self.inSigHandler: + print("Ignoring repeated SIG_INT...") + return + self.inSigHandler = True + + self.stopTaosServices() + print("ServiceManager: INT Signal Handler returning...") + self.inSigHandler = False + + def sigHandlerResume(self): + print("Resuming TDengine service manager (main thread)...\n\n") + + # def _updateThreadStatus(self): + # if self.svcMgrThread: # valid svc mgr thread + # if self.svcMgrThread.isStopped(): # done? + # self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? + # self.svcMgrThread = None # no more + + def isActive(self): + """ + Determine if the service/cluster is active at all, i.e. at least + one thread is not "stopped". + """ + for ti in self._tInsts: + if not ti.getStatus().isStopped(): + return True + return False + + def isRunning(self): + for ti in self._tInsts: + if not ti.getStatus().isRunning(): + return False + return True + + + # def isRestarting(self): + # """ + # Determine if the service/cluster is being "restarted", i.e., at least + # one thread is in "restarting" status + # """ + # for thread in self.svcMgrThreads: + # if thread.isRestarting(): + # return True + # return False + + def isStable(self): + """ + Determine if the service/cluster is "stable", i.e. all of the + threads are in "stable" status. + """ + for ti in self._tInsts: + if not ti.getStatus().isStable(): + return False + return True + + def _procIpcAll(self): + while self.isActive(): + Progress.emit(Progress.SERVICE_HEART_BEAT) + for ti in self._tInsts: # all thread objects should always be valid + # while self.isRunning() or self.isRestarting() : # for as long as the svc mgr thread is still here + status = ti.getStatus() + if status.isRunning(): + th = ti.getSmThread() + th.procIpcBatch() # regular processing, + if status.isStopped(): + th.procIpcBatch() # one last time? + # self._updateThreadStatus() + + time.sleep(self.PAUSE_BETWEEN_IPC_CHECK) # pause, before next round + # raise CrashGenError("dummy") + Logging.info("Service Manager Thread (with subprocess) ended, main thread exiting...") + + def _getFirstInstance(self): + return self._tInsts[0] + + def startTaosServices(self): + with self._lock: + if self.isActive(): + raise RuntimeError("Cannot start TAOS service(s) when one/some may already be running") + + # Find if there's already a taosd service, and then kill it + for proc in psutil.process_iter(): + if proc.name() == 'taosd': + Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") + time.sleep(2.0) + proc.kill() + # print("Process: {}".format(proc.name())) + + # self.svcMgrThread = ServiceManagerThread() # create the object + + for ti in self._tInsts: + ti.start() + if not ti.isFirst(): + tFirst = self._getFirstInstance() + tFirst.createDnode(ti.getDbTarget()) + ti.getSmThread().procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines + + def stopTaosServices(self): + with self._lock: + if not self.isActive(): + Logging.warning("Cannot stop TAOS service(s), already not active") + return + + for ti in self._tInsts: + ti.stop() + + def run(self): + self.startTaosServices() + self._procIpcAll() # pump/process all the messages, may encounter SIG + restart + if self.isActive(): # if sig handler hasn't destroyed it by now + self.stopTaosServices() # should have started already + + def restart(self): + if not self.isStable(): + Logging.warning("Cannot restart service/cluster, when not stable") + return + + # self._isRestarting = True + if self.isActive(): + self.stopTaosServices() + else: + Logging.warning("Service not active when restart requested") + + self.startTaosServices() + # self._isRestarting = False + + # def isRunning(self): + # return self.svcMgrThread != None + + # def isRestarting(self): + # return self._isRestarting + +class ServiceManagerThread: + """ + A class representing a dedicated thread which manages the "sub process" + of the TDengine service, interacting with its STDOUT/ERR. + + It takes a TdeInstance parameter at creation time, or create a default + """ + MAX_QUEUE_SIZE = 10000 + + def __init__(self): + # Set the sub process + self._tdeSubProcess = None # type: TdeSubProcess + + # Arrange the TDengine instance + # self._tInstNum = tInstNum # instance serial number in cluster, ZERO based + # self._tInst = tInst or TdeInstance() # Need an instance + + self._thread = None # The actual thread, # type: threading.Thread + self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually. + + def __repr__(self): + return "[SvcMgrThread: status={}, subProc={}]".format( + self.getStatus(), self._tdeSubProcess) + + def getStatus(self): + return self._status + + # Start the thread (with sub process), and wait for the sub service + # to become fully operational + def start(self, cmdLine): + if self._thread: + raise RuntimeError("Unexpected _thread") + if self._tdeSubProcess: + raise RuntimeError("TDengine sub process already created/running") + + Logging.info("Attempting to start TAOS service: {}".format(self)) + + self._status.set(Status.STATUS_STARTING) + self._tdeSubProcess = TdeSubProcess() + self._tdeSubProcess.start(cmdLine) + + self._ipcQueue = Queue() + self._thread = threading.Thread( # First thread captures server OUTPUT + target=self.svcOutputReader, + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + self._thread.daemon = True # thread dies with the program + self._thread.start() + + self._thread2 = threading.Thread( # 2nd thread captures server ERRORs + target=self.svcErrorReader, + args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) + self._thread2.daemon = True # thread dies with the program + self._thread2.start() + + # wait for service to start + for i in range(0, 100): + time.sleep(1.0) + # self.procIpcBatch() # don't pump message during start up + Progress.emit(Progress.SERVICE_START_NAP) + # print("_zz_", end="", flush=True) + if self._status.isRunning(): + Logging.info("[] TDengine service READY to process requests") + Logging.info("[] TAOS service started: {}".format(self)) + # self._verifyDnode(self._tInst) # query and ensure dnode is ready + # Logging.debug("[] TAOS Dnode verified: {}".format(self)) + return # now we've started + # TODO: handle failure-to-start better? + self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output + raise RuntimeError("TDengine service did not start successfully: {}".format(self)) + + def _verifyDnode(self, tInst: TdeInstance): + dbc = DbConn.createNative(tInst.getDbTarget()) + dbc.open() + dbc.query("show dnodes") + # dbc.query("DESCRIBE {}.{}".format(dbName, self._stName)) + cols = dbc.getQueryResult() # id,end_point,vnodes,cores,status,role,create_time,offline reason + # ret = {row[0]:row[1] for row in stCols if row[3]=='TAG'} # name:type + isValid = False + for col in cols: + # print("col = {}".format(col)) + ep = col[1].split(':') # 10.1.30.2:6030 + print("Found ep={}".format(ep)) + if tInst.getPort() == int(ep[1]): # That's us + # print("Valid Dnode matched!") + isValid = True # now we are valid + break + if not isValid: + print("Failed to start dnode, sleep for a while") + time.sleep(600) + raise RuntimeError("Failed to start Dnode, expected port not found: {}". + format(tInst.getPort())) + dbc.close() + + def stop(self): + # can be called from both main thread or signal handler + Logging.info("Terminating TDengine service running as the sub process...") + if self.getStatus().isStopped(): + Logging.info("Service already stopped") + return + if self.getStatus().isStopping(): + Logging.info("Service is already being stopped") + return + # Linux will send Control-C generated SIGINT to the TDengine process + # already, ref: + # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes + if not self._tdeSubProcess: + raise RuntimeError("sub process object missing") + + self._status.set(Status.STATUS_STOPPING) + # retCode = self._tdeSubProcess.stop() + try: + retCode = self._tdeSubProcess.stop() + # print("Attempted to stop sub process, got return code: {}".format(retCode)) + if retCode == signal.SIGSEGV : # SGV + Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") + except subprocess.TimeoutExpired as err: + Logging.info("Time out waiting for TDengine service process to exit") + else: + if self._tdeSubProcess.isRunning(): # still running, should now never happen + Logging.error("FAILED to stop sub process, it is still running... pid = {}".format( + self._tdeSubProcess.getPid())) + else: + self._tdeSubProcess = None # not running any more + self.join() # stop the thread, change the status, etc. + + # Check if it's really stopped + outputLines = 10 # for last output + if self.getStatus().isStopped(): + self.procIpcBatch(outputLines) # one last time + Logging.debug("End of TDengine Service Output: {}".format(self)) + Logging.info("----- TDengine Service (managed by SMT) is now terminated -----\n") + else: + print("WARNING: SMT did not terminate as expected: {}".format(self)) + + def join(self): + # TODO: sanity check + if not self.getStatus().isStopping(): + raise RuntimeError( + "SMT.Join(): Unexpected status: {}".format(self._status)) + + if self._thread: + self._thread.join() + self._thread = None + self._status.set(Status.STATUS_STOPPED) + # STD ERR thread + self._thread2.join() + self._thread2 = None + else: + print("Joining empty thread, doing nothing") + + def _trimQueue(self, targetSize): + if targetSize <= 0: + return # do nothing + q = self._ipcQueue + if (q.qsize() <= targetSize): # no need to trim + return + + Logging.debug("Triming IPC queue to target size: {}".format(targetSize)) + itemsToTrim = q.qsize() - targetSize + for i in range(0, itemsToTrim): + try: + q.get_nowait() + except Empty: + break # break out of for loop, no more trimming + + TD_READY_MSG = "TDengine is initialized successfully" + + def procIpcBatch(self, trimToTarget=0, forceOutput=False): + self._trimQueue(trimToTarget) # trim if necessary + # Process all the output generated by the underlying sub process, + # managed by IO thread + print("<", end="", flush=True) + while True: + try: + line = self._ipcQueue.get_nowait() # getting output at fast speed + self._printProgress("_o") + except Empty: + # time.sleep(2.3) # wait only if there's no output + # no more output + print(".>", end="", flush=True) + return # we are done with THIS BATCH + else: # got line, printing out + if forceOutput: + Logging.info('[TAOSD] ' + line) + else: + Logging.debug('[TAOSD] ' + line) + print(">", end="", flush=True) + + _ProgressBars = ["--", "//", "||", "\\\\"] + + def _printProgress(self, msg): # TODO: assuming 2 chars + print(msg, end="", flush=True) + pBar = self._ProgressBars[Dice.throw(4)] + print(pBar, end="", flush=True) + print('\b\b\b\b', end="", flush=True) + + def svcOutputReader(self, out: IO, queue): + # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python + # print("This is the svcOutput Reader...") + # for line in out : + for line in iter(out.readline, b''): + # print("Finished reading a line: {}".format(line)) + # print("Adding item to queue...") + try: + line = line.decode("utf-8").rstrip() + except UnicodeError: + print("\nNon-UTF8 server output: {}\n".format(line)) + + # This might block, and then causing "out" buffer to block + queue.put(line) + self._printProgress("_i") + + if self._status.isStarting(): # we are starting, let's see if we have started + if line.find(self.TD_READY_MSG) != -1: # found + Logging.info("Waiting for the service to become FULLY READY") + time.sleep(1.0) # wait for the server to truly start. TODO: remove this + Logging.info("Service is now FULLY READY") # TODO: more ID info here? + self._status.set(Status.STATUS_RUNNING) + + # Trim the queue if necessary: TODO: try this 1 out of 10 times + self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size + + if self._status.isStopping(): # TODO: use thread status instead + # WAITING for stopping sub process to finish its outptu + print("_w", end="", flush=True) + + # queue.put(line) + # meaning sub process must have died + Logging.info("EOF for TDengine STDOUT: {}".format(self)) + out.close() + + def svcErrorReader(self, err: IO, queue): + for line in iter(err.readline, b''): + Logging.info("TDengine STDERR: {}".format(line)) + Logging.info("EOF for TDengine STDERR: {}".format(self)) + err.close() \ No newline at end of file diff --git a/tests/pytest/crash_gen_bootstrap.py b/tests/pytest/crash_gen_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..fd12284b9d7782ac7df89c37fcb653ca3bebe82b --- /dev/null +++ b/tests/pytest/crash_gen_bootstrap.py @@ -0,0 +1,23 @@ +# -----!/usr/bin/python3.7 +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +import sys +from crash_gen.crash_gen_main import MainExec + +if __name__ == "__main__": + + mExec = MainExec() + mExec.init() + exitCode = mExec.run() + + print("Exiting with code: {}".format(exitCode)) + sys.exit(exitCode) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 39d0fa3d94d36aab88366cc9f428a08f6fd3d1dc..525fbad6c1c3a9c45d0e92ba467b255a3538a975 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -24,6 +24,7 @@ python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py +python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py # tag @@ -150,15 +151,20 @@ python3 ./test.py -f query/select_last_crash.py python3 ./test.py -f query/queryNullValueTest.py python3 ./test.py -f query/queryInsertValue.py python3 ./test.py -f query/queryConnection.py +python3 ./test.py -f query/queryCountCSVData.py python3 ./test.py -f query/natualInterval.py python3 ./test.py -f query/bug1471.py +#python3 ./test.py -f query/dataLossTest.py +python3 ./test.py -f query/bug1874.py +python3 ./test.py -f query/bug1875.py +python3 ./test.py -f query/bug1876.py #stream python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/new.py python3 ./test.py -f stream/stream1.py python3 ./test.py -f stream/stream2.py -python3 ./test.py -f stream/parser.py +#python3 ./test.py -f stream/parser.py python3 ./test.py -f stream/history.py #alter table @@ -186,7 +192,7 @@ python3 ./test.py -f functions/function_leastsquares.py -r 1 python3 ./test.py -f functions/function_max.py -r 1 python3 ./test.py -f functions/function_min.py -r 1 python3 ./test.py -f functions/function_operations.py -r 1 -python3 ./test.py -f functions/function_percentile.py +python3 ./test.py -f functions/function_percentile.py -r 1 python3 ./test.py -f functions/function_spread.py -r 1 python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 @@ -196,6 +202,7 @@ python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py python3 test.py -f query/queryInterval.py +python3 test.py -f query/queryFillTest.py # tools python3 test.py -f tools/taosdemo.py @@ -204,3 +211,20 @@ python3 test.py -f tools/taosdemo.py python3 test.py -f subscribe/singlemeter.py #python3 test.py -f subscribe/stability.py python3 test.py -f subscribe/supertable.py + + +# update +python3 ./test.py -f update/allow_update.py +python3 ./test.py -f update/allow_update-0.py +python3 ./test.py -f update/append_commit_data.py +python3 ./test.py -f update/append_commit_last-0.py +python3 ./test.py -f update/append_commit_last.py +python3 ./test.py -f update/merge_commit_data.py +python3 ./test.py -f update/merge_commit_data-0.py +python3 ./test.py -f update/merge_commit_data2.py +python3 ./test.py -f update/merge_commit_data2_update0.py +python3 ./test.py -f update/merge_commit_last-0.py +python3 ./test.py -f update/merge_commit_last.py + +# wal +python3 ./test.py -f wal/addOldWalTest.py \ No newline at end of file diff --git a/tests/pytest/functions/function_arithmetic.py b/tests/pytest/functions/function_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..a2249bab8848927e707b1f3c9378a00a7c91546e --- /dev/null +++ b/tests/pytest/functions/function_arithmetic.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 int, col2 int) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table test2 using test tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + tdSql.execute("insert into test2 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + + # arithmetic verifacation + tdSql.query("select 0.1 + 0.1 from test") + tdSql.checkRows(self.rowNum * 2) + for i in range(self.rowNum * 2): + tdSql.checkData(0, 0, 0.20000000) + + tdSql.query("select 4 * avg(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 22) + + tdSql.query("select 4 * sum(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 440) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2420) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test group by loc") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 1210) + tdSql.checkData(1, 0, 1210) + + tdSql.error("select avg(col1 * 2)from test group by loc") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_percentile.py b/tests/pytest/functions/function_percentile.py index aaeb94372e44282da0d83849c18e40857463b42c..e63d65f2e6a429015e2b4d7dcbe5e8c9884eea5e 100644 --- a/tests/pytest/functions/function_percentile.py +++ b/tests/pytest/functions/function_percentile.py @@ -130,8 +130,19 @@ class TDTestCase: tdSql.query("select percentile(col6, 100) from test") tdSql.checkData(0, 0, np.percentile(floatData, 100)) tdSql.query("select apercentile(col6, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + tdSql.error("select percentile(voltage, 20) from meters") + tdSql.query("select apercentile(voltage, 20) from meters") + print("apercentile result: %s" % tdSql.getData(0, 0)) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh new file mode 100755 index 0000000000000000000000000000000000000000..ce3d1c0c673e6e62fdd2e6ac83769eb3d7a8ca71 --- /dev/null +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' +nohup /var/lib/jenkins/workspace/TDinternal/debug/build/bin/taosd -c /var/lib/jenkins/workspace/TDinternal/community/sim/dnode1/cfg >/dev/null & +./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4 +pidof taosd|xargs kill +grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log + +for memError in `grep 'ERROR SUMMARY' crash_gen_mem_err.log | awk '{print $4}'` +do +if [ -n "$memError" ]; then + if [ "$memError" -gt 12 ]; then + echo -e "${RED} ## Memory errors number valgrind reports is $memError.\ + More than our threshold! ## ${NC}" + fi +fi +done + +grep 'start to execute\|definitely lost:' valgrind.err|grep -v 'grep'|uniq|tee crash_gen-definitely-lost-out.log +for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | awk '{print $7}'` +do + +if [ -n "$defiMemError" ]; then + if [ "$defiMemError" -gt 3 ]; then + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + exit 8 + fi +fi +done \ No newline at end of file diff --git a/tests/pytest/insert/ningsiInsert.py b/tests/pytest/insert/ningsiInsert.py new file mode 100644 index 0000000000000000000000000000000000000000..bcad2b03ed52bc01cae8b8b800edcd7c213ca375 --- /dev/null +++ b/tests/pytest/insert/ningsiInsert.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import requests, json +import threading +import string +import random +import time + +class RestfulInsert: + def init(self): + self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + self.url = "http://ningsi60:6041/rest/sql" + self.ts = 1104508800000 + self.numOfThreads = 10 + self.numOfTables = 3000 + self.dbName = 'netmonitortaos' + self.stbName = 'devinfomt' + self.prefix = 'dev' + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def createTables(self, threadID): + print("create table: thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + for i in range(tablesPerThread): + data = "create table '%s'.dev_%d using '%s'.'%s' tags('%s', '%s')" % (self.dbName, i + threadID * tablesPerThread, self.dbName, self.stbName, self.get_random_string(25), self.get_random_string(25)) + response = requests.post(self.url, data, headers = self.header) + if response.status_code != 200: + print(response.content) + + def insertData(self, threadID): + print("insert data: thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + base_ts = self.ts + while True: + i = 0 + for i in range(tablesPerThread): + data = "insert into %s.dev_%d values(%d, '%s', '%s', %d, %d, %d)" % (self.dbName, i + threadID * tablesPerThread, base_ts, self.get_random_string(25), self.get_random_string(30), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000)) + response = requests.post(self.url, data, headers = self.header) + if response.status_code != 200: + print(response.content) + + time.sleep(30) + base_ts = base_ts + 1 + + def run(self): + data = "create database if not exists %s keep 7300" % self.dbName + requests.post(self.url, data, headers = self.header) + + data = "create table '%s'.'%s' (timeid timestamp, devdesc binary(50), devname binary(50), cpu bigint, temp bigint, ram bigint) tags(devid binary(50), modelid binary(50))" % (self.dbName, self.stbName) + requests.post(self.url, data, headers = self.header) + + threads = [] + for i in range(self.numOfThreads): + thread = threading.Thread(target=self.createTables, args=(i,)) + thread.start() + threads.append(thread) + + for i in range(self.numOfThreads): + threads[i].join() + + threads = [] + for i in range(self.numOfThreads): + thread = threading.Thread(target=self.insertData, args=(i,)) + thread.start() + threads.append(thread) + + for i in range(self.numOfThreads): + threads[i].join() + +ri = RestfulInsert() +ri.init() +ri.run() diff --git a/tests/pytest/insert/restfulInsert.py b/tests/pytest/insert/restfulInsert.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa1f33a2423c4443bc4460ca99973fe56c2786e --- /dev/null +++ b/tests/pytest/insert/restfulInsert.py @@ -0,0 +1,170 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import requests +import threading +import random +import time +import argparse + +class RestfulInsert: + def __init__(self, host, dbname, threads, tables, records, batchSize, tbNamePerfix, outOfOrder): + self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + self.url = "http://%s:6041/rest/sql" % host + self.ts = 1500000000000 + self.dbname = dbname + self.numOfThreads = threads + self.numOfTables = tables + self.recordsPerTable = records + self.batchSize = batchSize + self.tableNamePerfix = tbNamePerfix + self.outOfOrder = outOfOrder + + def createTable(self, threadID): + tablesPerThread = int (self.numOfTables / self.numOfThreads) + print("create table %d to %d" % (tablesPerThread * threadID, tablesPerThread * (threadID + 1) - 1)) + for i in range(tablesPerThread): + tableID = threadID * tablesPerThread + name = 'beijing' if tableID % 2 == 0 else 'shanghai' + data = "create table %s.%s%d using %s.meters tags(%d, '%s')" % (self.dbname, self.tableNamePerfix, tableID + i, self.dbname, tableID + i, name) + requests.post(self.url, data, headers = self.header) + + def insertData(self, threadID): + print("thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + for i in range(tablesPerThread): + tableID = i + threadID * tablesPerThread + start = self.ts + for j in range(int(self.recordsPerTable / self.batchSize)): + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] + for k in range(self.batchSize): + data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)) + requests.post(self.url, data, headers = self.header) + + def insertUnlimitedData(self, threadID): + print("thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + while True: + i = 0 + start = self.ts + + for i in range(tablesPerThread): + tableID = i + threadID * tablesPerThread + + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] + for k in range(self.batchSize): + values.append("(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100))) + + if(self.outOfOrder == False): + for k in range(len(values)): + data += values[k] + else: + random.shuffle(values) + for k in range(len(values)): + data += values[k] + requests.post(self.url, data, headers = self.header) + + def run(self): + data = "drop database if exists %s" % self.dbname + requests.post(self.url, data, headers = self.header) + data = "create database %s" % self.dbname + requests.post(self.url, data, headers = self.header) + data = "create table %s.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" % self.dbname + requests.post(self.url, data, headers = self.header) + + threads = [] + startTime = time.time() + for i in range(self.numOfThreads): + thread = threading.Thread(target=self.createTable, args=(i,)) + thread.start() + threads.append(thread) + for i in range(self.numOfThreads): + threads[i].join() + print("createing %d tables takes %d seconds" % (self.numOfTables, (time.time() - startTime))) + + print("inserting data =======") + threads = [] + startTime = time.time() + for i in range(self.numOfThreads): + if(self.recordsPerTable != -1): + thread = threading.Thread(target=self.insertData, args=(i,)) + else: + thread = threading.Thread(target=self.insertUnlimitedData, args=(i,)) + thread.start() + threads.append(thread) + + for i in range(self.numOfThreads): + threads[i].join() + print("inserting %d records takes %d seconds" % (self.numOfTables * self.recordsPerTable, (time.time() - startTime))) + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='127.0.0.1', + type=str, + help='host name to be connected (default: 127.0.0.1)') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--number-of-threads', + action='store', + default=10, + type=int, + help='Number of threads to create tables and insert datas (default: 10)') +parser.add_argument( + '-T', + '--number-of-tables', + action='store', + default=1000, + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-r', + '--number-of-records', + action='store', + default=1000, + type=int, + help='Number of record to be created for each table (default: 1000, -1 for unlimited records)') +parser.add_argument( + '-s', + '--batch-size', + action='store', + default='1000', + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-p', + '--table-name-prefix', + action='store', + default='t', + type=str, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-o', + '--out-of-order', + action='store_true', + help='The order of test data (default: False)') + +args = parser.parse_args() +ri = RestfulInsert(args.host_name, args.db_name, args.number_of_threads, args.number_of_tables, args.number_of_records, args.batch_size, args.table_name_prefix, args.out_of_order) +ri.run() \ No newline at end of file diff --git a/tests/pytest/query/bug1874.py b/tests/pytest/query/bug1874.py new file mode 100644 index 0000000000000000000000000000000000000000..717748487089f229cc578882b91000a7d488fcda --- /dev/null +++ b/tests/pytest/query/bug1874.py @@ -0,0 +1,59 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug1875.py b/tests/pytest/query/bug1875.py new file mode 100644 index 0000000000000000000000000000000000000000..12e22a7a259e1612d8da4f11647a0adcde5dae02 --- /dev/null +++ b/tests/pytest/query/bug1875.py @@ -0,0 +1,60 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc limit 3;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc slimit 3;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug1876.py b/tests/pytest/query/bug1876.py new file mode 100644 index 0000000000000000000000000000000000000000..a1cc4b6eb2dc3368fdb21989d3b8df27f74cc8f9 --- /dev/null +++ b/tests/pytest/query/bug1876.py @@ -0,0 +1,58 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1)-first(join_mt1.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/dataLossTest.py b/tests/pytest/query/dataLossTest.py new file mode 100644 index 0000000000000000000000000000000000000000..b29dc1fa9f977b1f3f68b3e8f7781c6ac5a1e646 --- /dev/null +++ b/tests/pytest/query/dataLossTest.py @@ -0,0 +1,76 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import inspect + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.numberOfTables = 240 + self.numberOfRecords = 10000 + + def run(self): + tdSql.prepare() + + os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords)) + print("==============step1") + + tdSql.execute("use test") + sql = "select count(*) from meters" + tdSql.query(sql) + rows = tdSql.getData(0, 0) + print ("number of records: %d" % rows) + + newRows = rows + for i in range(10000): + print("kill taosd") + time.sleep(10) + os.system("sudo kill -9 $(pgrep taosd)") + tdDnodes.startWithoutSleep(1) + while True: + try: + tdSql.query(sql) + newRows = tdSql.getData(0, 0) + print("numer of records after kill taosd %d" % newRows) + time.sleep(10) + break + except Exception as e: + pass + continue + + if newRows < rows: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, newRows, rows) + tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + break + + tdSql.query(sql) + tdSql.checkData(0, 0, rows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/kill_query.py b/tests/pytest/query/kill_query.py new file mode 100644 index 0000000000000000000000000000000000000000..8975eea2685b93e551e0008fbf5013d5e16e934f --- /dev/null +++ b/tests/pytest/query/kill_query.py @@ -0,0 +1,82 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import os +import threading +import time + + +class TDTestCase: + """ + kill query + """ + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def query(self): + conn = taos.connect(host='127.0.0.1', user='root', password='taosdata', config='/etc/config') + cursor = conn.cursor() + while True: + cursor.execute('show queries;') + print('show queries!') + temp = cursor.fetchall() + if temp: + print(temp[0][0]) + cursor.execute('kill query %s ;' % temp[0][0]) + print('kill query success') + break + time.sleep(0.5) + cursor.close() + conn.close() + + def run(self): + tdSql.prepare() + + print("==============step1") + os.system('yes | sudo taosdemo -n 100') + print('insert into test.meters 10000000 rows') + + + t1 = threading.Thread(target=self.query) + t1.setDaemon(True) + t1.start() + + print("==============step2") + tdSql.execute('use test;') + try: + print('============begin select * from 10000000 rows') + tdSql.query('select * from test.meters;') + # print(tdSql.queryResult) + except Exception as e: + if not "ProgrammingError('Query terminated'" in str(e): + raise Exception('fail') + + print('success') + print('kill query success') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/query/queryCountCSVData.py b/tests/pytest/query/queryCountCSVData.py new file mode 100644 index 0000000000000000000000000000000000000000..6c73425faec24afeed0c8a5a168f575ec182c771 --- /dev/null +++ b/tests/pytest/query/queryCountCSVData.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + + +class TDTestCase: + """ + create table and insert data from disordered.csv which timestamp is disordered and + ordered.csv which timestamp is ordered. + then execute 'select count(*) from table xx;' + """ + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database if not exists demo;"); + tdSql.execute("use demo;") + tdSql.execute("CREATE TABLE IF NOT EXISTS test1 (ts TIMESTAMP, ValueID int, " + "VariantValue float, Quality int, Flags int);") + tdSql.execute("CREATE TABLE IF NOT EXISTS test2 (ts TIMESTAMP, ValueID int, " + "VariantValue float, Quality int, Flags int);") + ordered_csv = __file__.split('query')[0] + 'test_data/ordered.csv' + disordered_csv = __file__.split('query')[0] + 'test_data/disordered.csv' + + tdSql.execute(" insert into test1 file '{file}';".format(file=ordered_csv)) + tdSql.execute(" insert into test2 file '{file}';".format(file=disordered_csv)) + print("==============insert into test1 and test2 form test file") + + + print("==============step2") + tdSql.query('select * from test1;') + with open(ordered_csv) as f1: + num1 = len(f1.readlines()) + tdSql.checkRows(num1) + + + tdSql.query('select * from test2;') + with open(disordered_csv) as f2: + num2 = len(f2.readlines()) + tdSql.checkRows(num2) + print("=============execute select count(*) from xxx") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryFillTest.py b/tests/pytest/query/queryFillTest.py new file mode 100644 index 0000000000000000000000000000000000000000..9fd898041a525f9aaba65bda1233f94b1bb922e6 --- /dev/null +++ b/tests/pytest/query/queryFillTest.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create table if not exists stb (ts timestamp, col1 int, col2 int, col3 int) tags(loc nchar(20), id int)") + + currTs = self.ts + + for i in range(100): + sql = "create table tb%d using stb tags('city%d', 1)" % (i, i) + tdSql.execute(sql) + + sql = "insert into tb%d values" % i + for j in range(5): + val = 1 + j + sql += "(%d, %d, %d, %d)" % (currTs, val, val, val) + currTs += 1000000 + tdSql.execute(sql) + + tdSql.query("select first(col1) - avg(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' interval(1h)") + tdSql.checkRows(139) + + tdSql.query("select first(col1) - avg(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' interval(1h) fill(null)") + tdSql.checkRows(141) + tdSql.checkData(0, 1, None) + tdSql.checkData(140, 1, None) + + tdSql.query("select max(col1) - min(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' and id = 1 group by loc, id") + rows = tdSql.queryRows + + tdSql.query("select spread(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' and id = 1 group by loc, id") + tdSql.checkRows(rows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py index 99222016048f71ae66768665fb70522d466b248b..98b8c9fbef819caebe34fce96bec66da99176131 100644 --- a/tests/pytest/query/queryInterval.py +++ b/tests/pytest/query/queryInterval.py @@ -32,29 +32,44 @@ class TDTestCase: tdSql.execute("insert into t0 using st tags('beijing') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)" % (self.ts, self.ts + 1000000000, self.ts + 2000000000, self.ts + 3000000000, self.ts + 6000000000)) tdSql.execute("insert into t1 using st tags('shanghai') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)" - % (self.ts, self.ts + 2000000000, self.ts + 4000000000, self.ts + 5000000000, self.ts + 7000000000)) + % (self.ts, self.ts + 2000000000, self.ts + 4000000000, self.ts + 5000000000, self.ts + 7000000000)) + tdSql.query("select avg(voltage) from st interval(1n)") - tdSql.checkRows(3) - tdSql.checkData(0, 1, 221.4) - tdSql.checkData(1, 1, 227.0) + tdSql.checkRows(3) + tdSql.checkData(0, 0, "2020-07-01 00:00:00") + tdSql.checkData(0, 1, 221.4) + tdSql.checkData(1, 0, "2020-08-01 00:00:00") + tdSql.checkData(1, 1, 227.0) + tdSql.checkData(2, 0, "2020-09-01 00:00:00") tdSql.checkData(2, 1, 222.0) tdSql.query("select avg(voltage) from st interval(1n, 15d)") tdSql.checkRows(4) + tdSql.checkData(0, 0, "2020-06-16 00:00:00") tdSql.checkData(0, 1, 220.333333) + tdSql.checkData(1, 0, "2020-07-16 00:00:00") tdSql.checkData(1, 1, 224.666666) + tdSql.checkData(2, 0, "2020-08-16 00:00:00") tdSql.checkData(2, 1, 225.0) + tdSql.checkData(3, 0, "2020-09-16 00:00:00") tdSql.checkData(3, 1, 222.0) tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc") tdSql.checkRows(7) + tdSql.checkData(0, 0, "2020-06-16 00:00:00") tdSql.checkData(0, 1, 220.5) + tdSql.checkData(1, 0, "2020-07-16 00:00:00") tdSql.checkData(1, 1, 226.5) + tdSql.checkData(2, 0, "2020-08-16 00:00:00") tdSql.checkData(2, 1, 222.0) + tdSql.checkData(3, 0, "2020-06-16 00:00:00") tdSql.checkData(3, 1, 220.0) + tdSql.checkData(4, 0, "2020-07-16 00:00:00") tdSql.checkData(4, 1, 221.0) + tdSql.checkData(5, 0, "2020-08-16 00:00:00") tdSql.checkData(5, 1, 226.5) + tdSql.checkData(6, 0, "2020-09-16 00:00:00") tdSql.checkData(6, 1, 222.0) def stop(self): diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py index 17027cf498ff9e87b558866cd4d1e6a8c865afc0..5ad49a265ea52279f7934476a4a1db2fbbdf5883 100644 --- a/tests/pytest/query/queryJoin.py +++ b/tests/pytest/query/queryJoin.py @@ -95,16 +95,18 @@ class TDTestCase: tdSql.error( "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id group by stb_t.id") tdSql.error( - "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.name;") - tdSql.error( - "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.location = stb_t.name") + "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.name;") tdSql.execute("alter table stb_t add tag pid int") tdSql.execute("alter table tb_t1 set tag pid=2") tdSql.execute("alter table tb_t2 set tag pid=1") + tdSql.query( + "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.location = stb_t.name") + tdSql.checkRows(0) + tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.pid") - tdSql.checkRows(3) + tdSql.checkRows(6) tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") tdSql.checkRows(6) diff --git a/tests/pytest/query/queryLike.py b/tests/pytest/query/queryLike.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3b030f8f718371867f323f2496fb17b6b962e1 --- /dev/null +++ b/tests/pytest/query/queryLike.py @@ -0,0 +1,45 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdSql.execute("create table cars(ts timestamp, c nchar(2)) tags(t1 nchar(2))") + tdSql.execute("insert into car0 using cars tags('aa') values(now, 'bb');") + tdSql.query("select count(*) from cars where t1 like '%50 90 30 04 00 00%'") + tdSql.checkRows(0) + + tdSql.execute("create table test_cars(ts timestamp, c nchar(2)) tags(t1 nchar(20))") + tdSql.execute("insert into car1 using test_cars tags('150 90 30 04 00 002') values(now, 'bb');") + tdSql.query("select * from test_cars where t1 like '%50 90 30 04 00 00%'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index a7fc08c5a39d7b9a59068463f521d83106865c2d..72af38450c862f7558df313dd53316599a23dedd 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -11,6 +11,7 @@ # -*- coding: utf-8 -*- + import sys import os import taos @@ -22,7 +23,7 @@ class taosdemoQueryPerformace: self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" - self.config = "/etc/taos" + self.config = "/etc/taosperf" self.conn = taos.connect( self.host, self.user, @@ -32,17 +33,23 @@ class taosdemoQueryPerformace: def query(self): cursor = self.conn.cursor() - cursor.execute("use test") + cursor.execute("use test") totalTime = 0 - for i in range(100): - startTime = time.time() + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() cursor.execute("select count(*) from test.meters") totalTime += time.time() - startTime print("query time for: select count(*) from test.meters %f seconds" % (totalTime / 100)) totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.meters") totalTime += time.time() - startTime @@ -50,6 +57,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select count(*) from test.meters where loc='beijing'") totalTime += time.time() - startTime @@ -57,6 +67,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.meters where areaid=10") totalTime += time.time() - startTime @@ -64,6 +77,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.t10 interval(10s)") totalTime += time.time() - startTime @@ -71,11 +87,34 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select last_row(*) from meters") totalTime += time.time() - startTime print("query time for: select last_row(*) from meters %f seconds" % (totalTime / 100)) + totalTime = 0 + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() + cursor.execute("select * from meters") + totalTime += time.time() - startTime + print("query time for: select * from meters %f seconds" % (totalTime / 100)) + + totalTime = 0 + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() + cursor.execute("select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'") + totalTime += time.time() - startTime + print("query time for: select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000' %f seconds" % (totalTime / 100)) + if __name__ == '__main__': perftest = taosdemoQueryPerformace() perftest.initConnection() diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py index e5d3c8ce1f4eb9c1d2003bd659771562c9ea14e5..649e0dc1cb3191ba08b3f2da0a5edee3afc66575 100644 --- a/tests/pytest/query/querySort.py +++ b/tests/pytest/query/querySort.py @@ -96,6 +96,12 @@ class TDTestCase: tdSql.query("select * from st order by ts desc") self.checkColumnSorted(0, "desc") + print("======= step 2: verify order for special column =========") + + tdSql.query("select tbcol1 from st order by ts desc") + + tdSql.query("select tbcol6 from st order by ts desc") + for i in range(1, 10): tdSql.error("select * from st order by tbcol%d" % i) tdSql.error("select * from st order by tbcol%d asc" % i) diff --git a/tests/pytest/query/query_performance.py b/tests/pytest/query/query_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..c31569ac1382aca17d3d5b37fbef981620c0df83 --- /dev/null +++ b/tests/pytest/query/query_performance.py @@ -0,0 +1,219 @@ +import time +import taos +import csv +import numpy as np +import random +import os +import requests +import json +import sys + +""" +需要第三方库: taos,requests,numpy +当前机器已经启动taosd服务 +使用方法见底部示例 +""" + + + +class Ding: + """ + 发送消息到钉钉, + urls: 钉钉群的token组成的list,可以发多个钉钉群,需要提前加白名单或其他放行策略 + at_mobiles: 需要@的人的手机号组成的list + msg: 要发送的str + """ + def __init__(self, url_list, at_mobiles): + self.urls = url_list + self.at_mobiles = at_mobiles + + def send_message(self, msg): + data1 = { + "msgtype": "text", + "text": { + "content": msg + }, + "at": { + "atMobiles": self.at_mobiles, + "isAtAll": False + } + } + + header = {'Content-Type': 'application/json; charset=utf-8'} + + for url in self.urls: + requests.post(url=url, data=json.dumps(data1), headers=header) + + + + +class TDConn: + def __init__(self, config:dict): + self.host = config['host'] + self.user = config['user'] + self.password = config['password'] + self.config = config['config'] + self.conn = None + self.cursor = None + + def connect(self): + conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config) + cursor = conn.cursor() + self.conn = conn + self.cursor = cursor + print('connect ...') + return self.cursor + + def close(self): + self.cursor.close() + self.conn.close() + print('close ... ') + + +class Tool: + """ + 可能有用 + """ + @staticmethod + def str_gen(num): + return ''.join(random.sample('abcdefghijklmnopqrstuvwxyz', num)) + + @staticmethod + def float_gen(n, m): + return random.uniform(n, m) + + @staticmethod + def int_gen(n, m): + return random.randint(n, m) + +class Demo: + def __init__(self, engine): + self.engine = engine['engine'](engine['config']) + self.cursor = self.engine.connect() + + + def date_gen(self, db, number_per_table, type_of_cols, num_of_cols_per_record, num_of_tables): + """ + :目前都是 taosdemo 的参数 + :return: + """ + sql = 'yes | sudo taosdemo -d {db} -n {number_per_table} -b {type_of_cols} -l {num_of_cols_per_record} ' \ + '-t {num_of_tables}'.format(db=db, number_per_table=number_per_table, type_of_cols=type_of_cols, + num_of_cols_per_record=num_of_cols_per_record, num_of_tables=num_of_tables) + os.system(sql) + print('insert data completed') + + + # def main(self, db, circle, csv_name, case_func, result_csv, nums, ding_flag): + def main(self, every_num_per_table, result_csv, all_result_csv, values): + db = values['db_name'] + number_per_table = every_num_per_table + type_of_cols = values['col_type'] + num_of_cols_per_record = values['col_num'] + num_of_tables = values['table_num'] + self.date_gen(db=db, number_per_table=number_per_table, type_of_cols=type_of_cols, + num_of_cols_per_record=num_of_cols_per_record, num_of_tables=num_of_tables) + + circle = values['circle'] + # print(every_num_per_table, result_csv, values) + csv_name = result_csv + case_func = values['sql_func'] + nums = num_of_tables * number_per_table + ding_flag = values['ding_flag'] + + _data = [] + f = open(csv_name,'w',encoding='utf-8') + f1 = open(all_result_csv,'a',encoding='utf-8') + csv_writer = csv.writer(f) + csv_writer1 = csv.writer(f1) + csv_writer.writerow(["number", "elapse", 'sql']) + self.cursor.execute('use {db};'.format(db=db)) + + + for i in range(circle): + self.cursor.execute('reset query cache;') + sql = case_func() + start = time.time() + self.cursor.execute(sql) + self.cursor.fetchall() + end = time.time() + _data.append(end-start) + elapse = '%.4f' %(end -start) + print(sql, i, elapse, '\n') + csv_writer.writerow([i+1, elapse, sql]) + + # time.sleep(1) + _list = [nums, np.mean(_data)] + _str = '总数据: %s 条 , table数: %s , 每个table数据数: %s , 数据类型: %s \n' % \ + (nums, num_of_tables, number_per_table, type_of_cols) + # print('avg : ', np.mean(_data), '\n') + _str += '平均值 : %.4f 秒\n' % np.mean(_data) + for each in (50, 80, 90, 95): + _list.append(np.percentile(_data,each)) + _str += ' %d 分位数 : %.4f 秒\n' % (each , np.percentile(_data,each)) + + print(_str) + if ding_flag: + ding = Ding(values['ding_config']['urls'], values['ding_config']['at_mobiles']) + ding.send_message(_str) + csv_writer1.writerow(_list) + f.close() + f1.close() + self.engine.close() + + +def run(engine, test_cases: dict, result_dir): + for each_case, values in test_cases.items(): + for every_num_per_table in values['number_per_table']: + result_csv = result_dir + '{case}_table{table_num}_{number_per_table}.csv'.\ + format(case=each_case, table_num=values['table_num'], number_per_table=every_num_per_table) + all_result_csv = result_dir + '{case_all}_result.csv'.format(case_all=each_case) + d = Demo(engine) + # print(each_case, result_csv) + d.main(every_num_per_table, result_csv, all_result_csv, values) + + + +if __name__ == '__main__': + """ + 测试用例在test_cases中添加。 + result_dir: 报告生成目录,会生成每次测试结果,和具体某一用例的统计结果.需注意目录权限需要执行用户可写。 + case1、case2 : 具体用例名称 + engine: 数据库引擎,目前只有taosd。使用时需开启taosd服务。 + table_num: 造数据时的table数目 + circle: 循环测试次数,求平均值 + number_per_table:需要传list,多个数值代表会按照list内的数值逐个测试 + col_num:table col的数目 + col_type: 表中数据类型 + db_name: 造数据的db名,默认用test + sql_func: 当前测试的sql方法,需要自己定义 + ding_flag: 如果需要钉钉发送数据,flag设置真值, + ding_config: 如ding_flag 设置为真值,此项才有意义。ding_flag为假时此项可以为空。urls传入一list,内容为要发送的群的token, + 需提前设置白名单,at_mobiles传入一list,内容为在群内需要@的人的手机号 + """ + engine_dict = { + 'taosd': {'engine': TDConn, 'config': + {'host': '127.0.0.1', 'user': 'root', 'password': 'taosdata', 'config':'/etc/taos'}} + } + + def case1(): + return 'select * from meters where f1 = {n};'.format(n=random.randint(1,30)) + + def case2(): + return 'select * from meters where f1 = %.4f;' %random.uniform(1,30) + + + result_dir = '/usr/local/demo/benchmarktestdata/' + test_cases = { + 'case1': {'engine':'taosd', 'table_num': 10, 'circle': 100, 'number_per_table':[10, 100], 'col_num': 5, + 'col_type': 'INT', 'db_name': 'test', 'sql_func': case1, 'ding_flag': True, + 'ding_config': + {'urls': [r'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx0cd93'], + 'at_mobiles':[17000000000,],}}, + 'case2': {'engine':'taosd', 'table_num': 10, 'circle': 50, 'number_per_table':[10, 100], 'col_num': 5, + 'col_type': 'FLOAT', 'db_name': 'test', 'sql_func': case2, 'ding_flag': False, + 'ding_config': None + } + } + + run(engine_dict['taosd'], test_cases, result_dir) diff --git a/tests/pytest/query/sliding.py b/tests/pytest/query/sliding.py new file mode 100644 index 0000000000000000000000000000000000000000..810d90117a73bba81ea010f81aa605196a496dc4 --- /dev/null +++ b/tests/pytest/query/sliding.py @@ -0,0 +1,63 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1500000000000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table meters(ts timestamp, col1 int) tags(id int, loc nchar(20))") + sql = "insert into t0 using meters tags(1, 'beijing') values" + for i in range(100): + sql += "(%d, %d)" % (self.ts + i * 1000, random.randint(1, 100)) + tdSql.execute(sql) + + sql = "insert into t1 using meters tags(2, 'shanghai') values" + for i in range(100): + sql += "(%d, %d)" % (self.ts + i * 1000, random.randint(1, 100)) + tdSql.execute(sql) + + tdSql.query("select count(*) from meters interval(10s) sliding(5s)") + tdSql.checkRows(21) + + tdSql.error("select count(*) from meters sliding(5s)") + + tdSql.error("select count(*) from meters sliding(5s) interval(10s)") + + tdSql.error("select * from meters sliding(5s) order by ts desc") + + tdSql.query("select count(*) from meters group by loc") + tdSql.checkRows(2) + + tdSql.error("select * from meters group by loc sliding(5s)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py index eac93dc0e649f5d48481079d75851a27be270567..12ec6d4507710869632eac77d719217b3b0ed7b3 100644 --- a/tests/pytest/stream/new.py +++ b/tests/pytest/stream/new.py @@ -26,7 +26,6 @@ class TDTestCase: def run(self): rowNum = 200 - totalNum = 200 tdSql.prepare() tdLog.info("=============== step1") @@ -42,7 +41,9 @@ class TDTestCase: tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") tdLog.info("=============== step3") + start = time.time() tdSql.waitedQuery("select * from st", 1, 120) + delay = int(time.time() - start) + 20 v = tdSql.getData(0, 3) if v >= 51: tdLog.exit("value is %d, which is larger than 51" % v) @@ -54,11 +55,18 @@ class TDTestCase: tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) tdLog.info("=============== step5") - tdLog.sleep(40) - tdSql.waitedQuery("select * from st order by ts desc", 1, 120) - v = tdSql.getData(0, 3) - if v <= 51: - tdLog.exit("value is %d, which is smaller than 51" % v) + maxValue = 0 + for i in range(delay): + time.sleep(1) + tdSql.query("select * from st order by ts desc") + v = tdSql.getData(0, 3) + if v > maxValue: + maxValue = v + if v > 51: + break + + if maxValue <= 51: + tdLog.exit("value is %d, which is smaller than 51" % maxValue) def stop(self): tdSql.close() diff --git a/tests/pytest/table/create_sensitive.py b/tests/pytest/table/create_sensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..1934b662c7a57c13c2a1b8e8dfd65ab6ddbe13a4 --- /dev/null +++ b/tests/pytest/table/create_sensitive.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +import sys +import string +import random +import subprocess +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table TestSensitiveT(ts timestamp, i int)') + tdSql.execute('create table TestSensitiveT(ts timestamp, i int)') + tdLog.info('create table TestSensitiveSt(ts timestamp,i int) tags(j int)') + tdSql.execute('create table TestSensitiveSt(ts timestamp,i int) tags(j int)') + tdLog.info('create table Abcde using TestSensitiveSt tags(1)') + tdSql.execute('create table AbcdeFgh using TestSensitiveSt tags(1)') + tdLog.info('=============== step2') + tdLog.info('test normal table ') + tdSql.error('create table testsensitivet(ts timestamp, i int)') + tdSql.error('create table testsensitivet(ts timestamp, j int)') + tdSql.error('create table testsensItivet(ts timestamp, j int)') + tdSql.error('create table TESTSENSITIVET(ts timestamp, i int)') + tdLog.info('=============== step3') + tdLog.info('test super table ') + tdSql.error('create table testsensitivest(ts timestamp,i int) tags(j int)') + tdSql.error('create table testsensitivest(ts timestamp,i int) tags(k int)') + tdSql.error('create table TESTSENSITIVEST(ts timestamp,i int) tags(j int)') + tdSql.error('create table Testsensitivest(ts timestamp,i int) tags(j int)') + tdLog.info('=============== step4') + tdLog.info('test subtable ') + tdSql.error('create table abcdefgh using TestSensitiveSt tags(1)') + tdSql.error('create table ABCDEFGH using TestSensitiveSt tags(1)') + tdSql.error('create table Abcdefgh using TestSensitiveSt tags(1)') + tdSql.error('create table abcdeFgh using TestSensitiveSt tags(1)') + tdSql.error('insert into table abcdefgh using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table ABCDEFGH using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table Abcdefgh using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table abcdeFgH using TestSensitiveSt tags(1) values(now,1)') + tdSql.query('show tables') + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(2) + + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test_data/__init__.py b/tests/pytest/test_data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd7af5b9db25bf22f960bfca6bf18b1518cc86f --- /dev/null +++ b/tests/pytest/test_data/__init__.py @@ -0,0 +1,15 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + + +""" +this directory contains test data files +""" \ No newline at end of file diff --git a/tests/pytest/test_data/disordered.csv b/tests/pytest/test_data/disordered.csv new file mode 100644 index 0000000000000000000000000000000000000000..0e6fd75e5b1a43da360541f57377236e0984093a --- /dev/null +++ b/tests/pytest/test_data/disordered.csv @@ -0,0 +1,500 @@ +"2020-03-01 20:01:49.493","130","7.595","128","8392704" +"2020-03-01 20:01:50.493","130","7.598","128","8392704" +"2020-03-01 20:01:51.493","130","7.602","128","8392704" +"2020-03-01 20:01:52.493","130","7.604","128","8392704" +"2020-03-01 20:01:53.493","130","7.604","128","8392704" +"2020-03-01 20:01:54.493","130","7.606","128","8392704" +"2020-03-01 20:01:55.493","130","7.607","128","8392704" +"2020-03-01 20:01:56.493","130","7.607","128","8392704" +"2020-03-01 20:01:57.493","130","7.607","128","8392704" +"2020-03-01 20:01:58.493","130","7.607","128","8392704" +"2020-03-01 20:01:59.493","130","7.606","128","8392704" +"2020-03-01 20:02:00.493","130","7.606","128","8392704" +"2020-03-01 20:02:01.493","130","7.606","128","8392704" +"2020-03-01 20:02:02.493","130","7.607","128","8392704" +"2020-03-01 20:02:03.493","130","7.608","128","8392704" +"2020-03-01 20:02:04.493","130","7.609","128","8392704" +"2020-03-01 20:02:05.493","130","7.609","128","8392704" +"2020-03-01 20:02:06.493","130","7.608","128","8392704" +"2020-03-01 20:02:07.493","130","7.606","128","8392704" +"2020-03-01 20:02:08.493","130","7.606","128","8392704" +"2020-03-01 20:02:09.493","130","7.607","128","8392704" +"2020-03-01 20:02:10.493","130","7.609","128","8392704" +"2020-03-01 20:02:11.493","130","7.61","128","8392704" +"2020-03-01 20:02:12.493","130","7.611","128","8392704" +"2020-03-01 20:02:13.493","130","7.61","128","8392704" +"2020-03-01 20:02:14.493","130","7.607","128","8392704" +"2020-03-01 20:02:15.493","130","7.605","128","8392704" +"2020-03-01 20:02:16.493","130","7.604","128","8392704" +"2020-03-01 20:02:17.493","130","7.603","128","8392704" +"2020-03-01 20:02:18.493","130","7.604","128","8392704" +"2020-03-01 20:02:19.493","130","7.604","128","8392704" +"2020-03-01 20:02:20.493","130","7.603","128","8392704" +"2020-03-01 20:02:21.493","130","7.601","128","8392704" +"2020-03-01 20:02:22.493","130","7.598","128","8392704" +"2020-03-01 20:02:23.493","130","7.595","128","8392704" +"2020-03-01 20:02:24.493","130","7.594","128","8392704" +"2020-03-01 20:02:25.493","130","7.594","128","8392704" +"2020-03-01 20:02:26.493","130","7.595","128","8392704" +"2020-03-01 20:02:27.493","130","7.596","128","8392704" +"2020-03-01 20:02:28.493","130","7.596","128","8392704" +"2020-03-01 20:02:29.493","130","7.598","128","8392704" +"2020-03-01 20:02:30.493","130","7.6","128","8392704" +"2020-03-01 20:02:31.493","130","7.6","128","8392704" +"2020-03-01 20:02:32.493","130","7.6","128","8392704" +"2020-03-01 20:02:33.493","130","7.601","128","8392704" +"2020-03-01 20:02:34.493","130","7.603","128","8392704" +"2020-03-01 20:02:35.493","130","7.604","128","8392704" +"2020-03-01 20:02:36.493","130","7.605","128","8392704" +"2020-03-01 20:02:37.493","130","7.606","128","8392704" +"2020-03-01 20:02:38.493","130","7.61","128","8392704" +"2020-03-01 20:02:39.493","130","7.611","128","8392704" +"2020-03-01 20:02:40.493","130","7.61","128","8392704" +"2020-03-01 20:02:41.493","130","7.609","128","8392704" +"2020-03-01 20:02:42.493","130","7.611","128","8392704" +"2020-03-01 20:02:43.493","130","7.61","128","8392704" +"2020-03-01 20:02:44.493","130","7.607","128","8392704" +"2020-03-01 20:02:45.493","130","7.605","128","8392704" +"2020-03-01 20:02:46.493","130","7.606","128","8392704" +"2020-03-01 20:02:47.493","130","7.604","128","8392704" +"2020-03-01 20:02:48.493","130","7.599","128","8392704" +"2020-03-01 20:02:49.493","130","7.595","128","8392704" +"2020-03-01 06:41:17.493","130","6.742","128","8392704" +"2020-03-01 06:41:18.493","130","6.741","128","8392704" +"2020-03-01 06:41:19.493","130","6.737","128","8392704" +"2020-03-01 06:41:20.493","130","6.734","128","8392704" +"2020-03-01 06:41:21.493","130","6.734","128","8392704" +"2020-03-01 06:41:22.493","130","6.733","128","8392704" +"2020-03-01 06:41:23.493","130","6.736","128","8392704" +"2020-03-01 06:41:24.493","130","6.739","128","8392704" +"2020-03-01 06:41:25.493","130","6.738","128","8392704" +"2020-03-01 06:41:26.493","130","6.74","128","8392704" +"2020-03-01 06:41:27.493","130","6.745","128","8392704" +"2020-03-01 06:41:28.493","130","6.749","128","8392704" +"2020-03-01 06:41:29.493","130","6.753","128","8392704" +"2020-03-01 06:41:30.493","130","6.753","128","8392704" +"2020-03-01 06:41:31.493","130","6.757","128","8392704" +"2020-03-01 06:41:32.493","130","6.763","128","8392704" +"2020-03-01 06:41:33.493","130","6.765","128","8392704" +"2020-03-01 06:41:34.493","130","6.764","128","8392704" +"2020-03-01 06:41:35.493","130","6.762","128","8392704" +"2020-03-01 06:41:36.493","130","6.758","128","8392704" +"2020-03-01 06:41:37.493","130","6.756","128","8392704" +"2020-03-01 06:41:38.493","130","6.755","128","8392704" +"2020-03-01 06:41:39.493","130","6.754","128","8392704" +"2020-03-01 06:41:40.493","130","6.755","128","8392704" +"2020-03-01 06:41:41.493","130","6.756","128","8392704" +"2020-03-01 06:41:42.493","130","6.757","128","8392704" +"2020-03-01 06:41:43.493","130","6.756","128","8392704" +"2020-03-01 06:41:44.493","130","6.756","128","8392704" +"2020-03-01 06:41:45.493","130","6.756","128","8392704" +"2020-03-01 06:41:46.493","130","6.759","128","8392704" +"2020-03-01 06:41:47.493","130","6.759","128","8392704" +"2020-03-01 06:41:48.493","130","6.758","128","8392704" +"2020-03-01 06:41:49.493","130","6.758","128","8392704" +"2020-03-01 06:41:50.493","130","6.756","128","8392704" +"2020-03-01 06:41:51.493","130","6.755","128","8392704" +"2020-03-01 06:41:52.493","130","6.755","128","8392704" +"2020-03-01 06:41:53.493","130","6.754","128","8392704" +"2020-03-01 06:41:54.493","130","6.751","128","8392704" +"2020-03-01 06:41:55.493","130","6.752","128","8392704" +"2020-03-01 06:41:56.493","130","6.753","128","8392704" +"2020-03-01 06:41:57.493","130","6.753","128","8392704" +"2020-03-01 06:41:58.493","130","6.753","128","8392704" +"2020-03-01 06:41:59.493","130","6.755","128","8392704" +"2020-03-01 06:42:00.493","130","6.752","128","8392704" +"2020-03-01 06:42:01.493","130","6.75","128","8392704" +"2020-03-01 06:42:02.493","130","6.75","128","8392704" +"2020-03-01 06:42:03.493","130","6.753","128","8392704" +"2020-03-01 06:42:04.493","130","6.755","128","8392704" +"2020-03-01 06:42:05.493","130","6.754","128","8392704" +"2020-03-01 06:42:06.493","130","6.754","128","8392704" +"2020-03-01 06:42:07.493","130","6.752","128","8392704" +"2020-03-01 06:42:08.493","130","6.748","128","8392704" +"2020-03-01 06:42:09.493","130","6.747","128","8392704" +"2020-03-01 06:42:10.493","130","6.747","128","8392704" +"2020-03-01 06:42:11.493","130","6.748","128","8392704" +"2020-03-01 06:42:12.493","130","6.748","128","8392704" +"2020-03-01 06:42:13.493","130","6.75","128","8392704" +"2020-03-01 06:42:14.493","130","6.754","128","8392704" +"2020-03-01 06:42:15.493","130","6.754","128","8392704" +"2020-03-01 06:42:16.493","130","6.756","128","8392704" +"2020-03-01 06:42:17.493","130","6.756","128","8392704" +"2020-03-01 06:42:18.493","130","6.757","128","8392704" +"2020-03-01 06:42:19.493","130","6.757","128","8392704" +"2020-03-01 06:42:20.493","130","6.76","128","8392704" +"2020-03-01 06:42:21.493","130","6.761","128","8392704" +"2020-03-01 06:42:22.493","130","6.76","128","8392704" +"2020-03-01 06:42:23.493","130","6.76","128","8392704" +"2020-03-01 06:42:24.493","130","6.76","128","8392704" +"2020-03-01 06:42:25.493","130","6.76","128","8392704" +"2020-03-01 06:42:26.493","130","6.758","128","8392704" +"2020-03-01 06:42:27.493","130","6.757","128","8392704" +"2020-03-01 06:42:28.493","130","6.752","128","8392704" +"2020-03-01 06:42:29.493","130","6.746","128","8392704" +"2020-03-01 06:42:30.493","130","6.742","128","8392704" +"2020-03-01 06:42:31.493","130","6.741","128","8392704" +"2020-03-01 06:42:32.493","130","6.739","128","8392704" +"2020-03-01 06:42:33.493","130","6.739","128","8392704" +"2020-03-01 06:42:34.493","130","6.737","128","8392704" +"2020-03-01 06:42:35.493","130","6.737","128","8392704" +"2020-03-01 06:42:36.493","130","6.738","128","8392704" +"2020-03-01 06:42:37.493","130","6.739","128","8392704" +"2020-03-01 06:42:38.493","130","6.743","128","8392704" +"2020-03-01 06:42:39.493","130","6.747","128","8392704" +"2020-03-01 06:42:40.493","130","6.748","128","8392704" +"2020-03-01 06:42:41.493","130","6.746","128","8392704" +"2020-03-01 06:42:42.493","130","6.746","128","8392704" +"2020-03-01 06:42:43.493","130","6.745","128","8392704" +"2020-03-01 06:42:44.493","130","6.742","128","8392704" +"2020-03-01 06:42:45.493","130","6.741","128","8392704" +"2020-03-01 06:42:46.493","130","6.74","128","8392704" +"2020-03-01 06:42:47.493","130","6.742","128","8392704" +"2020-03-01 06:42:48.493","130","6.743","128","8392704" +"2020-03-01 06:42:49.493","130","6.742","128","8392704" +"2020-03-01 06:42:50.493","130","6.742","128","8392704" +"2020-03-01 06:42:51.493","130","6.741","128","8392704" +"2020-03-01 06:42:52.493","130","6.741","128","8392704" +"2020-03-01 06:42:53.493","130","6.742","128","8392704" +"2020-03-01 06:42:54.493","130","6.742","128","8392704" +"2020-03-01 06:42:55.493","130","6.745","128","8392704" +"2020-03-01 06:42:56.493","130","6.747","128","8392704" +"2020-03-01 06:42:57.493","130","6.748","128","8392704" +"2020-03-01 06:42:58.493","130","6.75","128","8392704" +"2020-03-01 06:42:59.493","130","6.75","128","8392704" +"2020-03-01 06:43:00.493","130","6.748","128","8392704" +"2020-03-01 06:43:01.493","130","6.748","128","8392704" +"2020-03-01 06:43:02.493","130","6.746","128","8392704" +"2020-03-01 06:43:03.493","130","6.745","128","8392704" +"2020-03-01 06:43:04.493","130","6.745","128","8392704" +"2020-03-01 06:43:05.493","130","6.745","128","8392704" +"2020-03-01 06:43:06.493","130","6.744","128","8392704" +"2020-03-01 06:43:07.493","130","6.749","128","8392704" +"2020-03-01 06:43:08.493","130","6.756","128","8392704" +"2020-03-01 06:43:09.493","130","6.759","128","8392704" +"2020-03-01 06:43:10.493","130","6.759","128","8392704" +"2020-03-01 06:43:11.493","130","6.758","128","8392704" +"2020-03-01 06:43:12.493","130","6.758","128","8392704" +"2020-03-01 06:43:13.493","130","6.759","128","8392704" +"2020-03-01 06:43:14.493","130","6.759","128","8392704" +"2020-03-01 06:43:15.493","130","6.753","128","8392704" +"2020-03-01 06:43:16.493","130","6.751","128","8392704" +"2020-03-01 20:02:50.493","130","7.596","128","8392704" +"2020-03-01 20:02:51.493","130","7.597","128","8392704" +"2020-03-01 20:02:52.493","130","7.599","128","8392704" +"2020-03-01 20:02:53.493","130","7.6","128","8392704" +"2020-03-01 20:02:54.493","130","7.601","128","8392704" +"2020-03-01 20:02:55.493","130","7.603","128","8392704" +"2020-03-01 20:02:56.493","130","7.602","128","8392704" +"2020-03-01 20:02:57.493","130","7.601","128","8392704" +"2020-03-01 20:02:58.493","130","7.601","128","8392704" +"2020-03-01 20:02:59.493","130","7.6","128","8392704" +"2020-03-01 20:03:00.493","130","7.599","128","8392704" +"2020-03-01 20:03:01.493","130","7.599","128","8392704" +"2020-03-01 20:03:02.493","130","7.6","128","8392704" +"2020-03-01 20:03:03.493","130","7.601","128","8392704" +"2020-03-01 20:03:04.493","130","7.601","128","8392704" +"2020-03-01 20:03:05.493","130","7.601","128","8392704" +"2020-03-01 20:03:06.493","130","7.6","128","8392704" +"2020-03-01 20:03:07.493","130","7.602","128","8392704" +"2020-03-01 20:03:08.493","130","7.606","128","8392704" +"2020-03-01 20:03:09.493","130","7.609","128","8392704" +"2020-03-01 20:03:10.493","130","7.612","128","8392704" +"2020-03-01 20:03:11.493","130","7.614","128","8392704" +"2020-03-01 20:03:12.493","130","7.615","128","8392704" +"2020-03-01 20:03:13.493","130","7.614","128","8392704" +"2020-03-01 20:03:14.493","130","7.613","128","8392704" +"2020-03-01 20:03:15.493","130","7.614","128","8392704" +"2020-03-01 20:03:16.493","130","7.612","128","8392704" +"2020-03-01 20:03:17.493","130","7.609","128","8392704" +"2020-03-01 20:03:18.493","130","7.606","128","8392704" +"2020-03-01 20:03:19.493","130","7.604","128","8392704" +"2020-03-01 20:03:20.493","130","7.604","128","8392704" +"2020-03-01 20:03:21.493","130","7.605","128","8392704" +"2020-03-01 20:03:22.493","130","7.605","128","8392704" +"2020-03-01 20:03:23.493","130","7.605","128","8392704" +"2020-03-01 20:03:24.493","130","7.605","128","8392704" +"2020-03-01 20:03:25.493","130","7.604","128","8392704" +"2020-03-01 20:03:26.493","130","7.603","128","8392704" +"2020-03-01 20:03:27.493","130","7.604","128","8392704" +"2020-03-01 20:03:28.493","130","7.605","128","8392704" +"2020-03-01 20:03:29.493","130","7.607","128","8392704" +"2020-03-01 20:03:30.493","130","7.609","128","8392704" +"2020-03-01 20:03:31.493","130","7.609","128","8392704" +"2020-03-01 20:03:32.493","130","7.607","128","8392704" +"2020-03-01 20:03:33.493","130","7.606","128","8392704" +"2020-03-01 20:03:34.493","130","7.607","128","8392704" +"2020-03-01 20:03:35.493","130","7.608","128","8392704" +"2020-03-01 20:03:36.493","130","7.609","128","8392704" +"2020-03-01 20:03:37.493","130","7.609","128","8392704" +"2020-03-01 20:03:38.493","130","7.607","128","8392704" +"2020-03-01 20:03:39.493","130","7.602","128","8392704" +"2020-03-01 20:03:40.493","130","7.599","128","8392704" +"2020-03-01 20:03:41.493","130","7.598","128","8392704" +"2020-03-01 20:03:42.493","130","7.596","128","8392704" +"2020-03-01 20:03:43.493","130","7.595","128","8392704" +"2020-03-01 20:03:44.493","130","7.594","128","8392704" +"2020-03-01 20:03:45.493","130","7.595","128","8392704" +"2020-03-01 20:03:46.493","130","7.597","128","8392704" +"2020-03-01 20:03:47.493","130","7.596","128","8392704" +"2020-03-01 20:03:48.493","130","7.595","128","8392704" +"2020-03-01 20:03:49.493","130","7.596","128","8392704" +"2020-03-01 20:03:50.493","130","7.596","128","8392704" +"2020-03-01 20:03:51.493","130","7.595","128","8392704" +"2020-03-01 20:03:52.493","130","7.596","128","8392704" +"2020-03-01 20:03:53.493","130","7.597","128","8392704" +"2020-03-01 20:03:54.493","130","7.598","128","8392704" +"2020-03-01 20:03:55.493","130","7.596","128","8392704" +"2020-03-01 20:03:56.493","130","7.596","128","8392704" +"2020-03-01 20:03:57.493","130","7.599","128","8392704" +"2020-03-01 20:03:58.493","130","7.602","128","8392704" +"2020-03-01 20:03:59.493","130","7.603","128","8392704" +"2020-03-01 20:04:00.493","130","7.602","128","8392704" +"2020-03-01 20:04:01.493","130","7.6","128","8392704" +"2020-03-01 20:04:02.493","130","7.598","128","8392704" +"2020-03-01 20:04:03.493","130","7.595","128","8392704" +"2020-03-01 20:04:04.493","130","7.593","128","8392704" +"2020-03-01 20:04:05.493","130","7.592","128","8392704" +"2020-03-01 20:04:06.493","130","7.591","128","8392704" +"2020-03-01 20:04:07.493","130","7.591","128","8392704" +"2020-03-01 20:04:08.493","130","7.591","128","8392704" +"2020-03-01 20:04:09.493","130","7.592","128","8392704" +"2020-03-01 20:04:10.493","130","7.59","128","8392704" +"2020-03-01 20:04:11.493","130","7.587","128","8392704" +"2020-03-01 20:04:12.493","130","7.584","128","8392704" +"2020-03-01 20:04:13.493","130","7.583","128","8392704" +"2020-03-01 20:04:14.493","130","7.581","128","8392704" +"2020-03-01 20:04:15.493","130","7.578","128","8392704" +"2020-03-01 20:04:16.493","130","7.576","128","8392704" +"2020-03-01 20:04:17.493","130","7.577","128","8392704" +"2020-03-01 20:04:18.493","130","7.579","128","8392704" +"2020-03-01 20:04:19.493","130","7.583","128","8392704" +"2020-03-01 20:04:20.493","130","7.587","128","8392704" +"2020-03-01 20:04:21.493","130","7.588","128","8392704" +"2020-03-01 20:04:22.493","130","7.589","128","8392704" +"2020-03-01 20:04:23.493","130","7.59","128","8392704" +"2020-03-01 20:04:24.493","130","7.593","128","8392704" +"2020-03-01 20:04:25.493","130","7.597","128","8392704" +"2020-03-01 20:04:26.493","130","7.6","128","8392704" +"2020-03-01 20:04:27.493","130","7.603","128","8392704" +"2020-03-01 20:04:28.493","130","7.606","128","8392704" +"2020-03-01 20:04:29.493","130","7.608","128","8392704" +"2020-03-01 20:04:30.493","130","7.609","128","8392704" +"2020-03-01 20:04:31.493","130","7.607","128","8392704" +"2020-03-01 20:04:32.493","130","7.607","128","8392704" +"2020-03-01 20:04:33.493","130","7.607","128","8392704" +"2020-03-01 20:04:34.493","130","7.602","128","8392704" +"2020-03-01 20:04:35.493","130","7.599","128","8392704" +"2020-03-01 20:04:36.493","130","7.599","128","8392704" +"2020-03-01 20:04:37.493","130","7.599","128","8392704" +"2020-03-01 20:04:38.493","130","7.598","128","8392704" +"2020-03-01 20:04:39.493","130","7.596","128","8392704" +"2020-03-01 20:04:40.493","130","7.595","128","8392704" +"2020-03-01 20:04:41.493","130","7.592","128","8392704" +"2020-03-01 20:04:42.493","130","7.586","128","8392704" +"2020-03-01 20:04:43.493","130","7.582","128","8392704" +"2020-03-01 20:04:44.493","130","7.582","128","8392704" +"2020-03-01 20:04:45.493","130","7.584","128","8392704" +"2020-03-01 20:04:46.493","130","7.583","128","8392704" +"2020-03-01 20:04:47.493","130","7.582","128","8392704" +"2020-03-01 20:04:48.493","130","7.582","128","8392704" +"2020-03-01 20:04:49.493","130","7.585","128","8392704" +"2020-03-01 06:43:17.493","130","6.751","128","8392704" +"2020-03-01 06:43:18.493","130","6.75","128","8392704" +"2020-03-01 06:43:19.493","130","6.748","128","8392704" +"2020-03-01 06:43:20.493","130","6.751","128","8392704" +"2020-03-01 06:43:21.493","130","6.752","128","8392704" +"2020-03-01 06:43:22.493","130","6.751","128","8392704" +"2020-03-01 06:43:23.493","130","6.746","128","8392704" +"2020-03-01 06:43:24.493","130","6.739","128","8392704" +"2020-03-01 06:43:25.493","130","6.737","128","8392704" +"2020-03-01 06:43:26.493","130","6.735","128","8392704" +"2020-03-01 06:43:27.493","130","6.735","128","8392704" +"2020-03-01 06:43:28.493","130","6.734","128","8392704" +"2020-03-01 06:43:29.493","130","6.731","128","8392704" +"2020-03-01 06:43:30.493","130","6.729","128","8392704" +"2020-03-01 06:43:31.493","130","6.73","128","8392704" +"2020-03-01 06:43:32.493","130","6.736","128","8392704" +"2020-03-01 06:43:33.493","130","6.74","128","8392704" +"2020-03-01 06:43:34.493","130","6.741","128","8392704" +"2020-03-01 06:43:35.493","130","6.743","128","8392704" +"2020-03-01 06:43:36.493","130","6.743","128","8392704" +"2020-03-01 06:43:37.493","130","6.745","128","8392704" +"2020-03-01 06:43:38.493","130","6.747","128","8392704" +"2020-03-01 06:43:39.493","130","6.747","128","8392704" +"2020-03-01 06:43:40.493","130","6.746","128","8392704" +"2020-03-01 06:43:41.493","130","6.745","128","8392704" +"2020-03-01 06:43:42.493","130","6.743","128","8392704" +"2020-03-01 06:43:43.493","130","6.741","128","8392704" +"2020-03-01 06:43:44.493","130","6.737","128","8392704" +"2020-03-01 06:43:45.493","130","6.737","128","8392704" +"2020-03-01 06:43:46.493","130","6.74","128","8392704" +"2020-03-01 06:43:47.493","130","6.744","128","8392704" +"2020-03-01 06:43:48.493","130","6.746","128","8392704" +"2020-03-01 06:43:49.493","130","6.745","128","8392704" +"2020-03-01 06:43:50.493","130","6.743","128","8392704" +"2020-03-01 06:43:51.493","130","6.745","128","8392704" +"2020-03-01 06:43:52.493","130","6.747","128","8392704" +"2020-03-01 06:43:53.493","130","6.748","128","8392704" +"2020-03-01 06:43:54.493","130","6.748","128","8392704" +"2020-03-01 06:43:55.493","130","6.747","128","8392704" +"2020-03-01 06:43:56.493","130","6.746","128","8392704" +"2020-03-01 06:43:57.493","130","6.744","128","8392704" +"2020-03-01 06:43:58.493","130","6.742","128","8392704" +"2020-03-01 06:43:59.493","130","6.74","128","8392704" +"2020-03-01 06:44:00.493","130","6.739","128","8392704" +"2020-03-01 06:44:01.493","130","6.739","128","8392704" +"2020-03-01 06:44:02.493","130","6.742","128","8392704" +"2020-03-01 06:44:03.493","130","6.742","128","8392704" +"2020-03-01 06:44:04.493","130","6.756","128","8392704" +"2020-03-01 06:44:05.493","130","6.757","128","8392704" +"2020-03-01 06:44:06.493","130","6.757","128","8392704" +"2020-03-01 06:44:07.493","130","6.757","128","8392704" +"2020-03-01 06:44:08.493","130","6.759","128","8392704" +"2020-03-01 06:44:09.493","130","6.759","128","8392704" +"2020-03-01 06:44:10.493","130","6.75","128","8392704" +"2020-03-01 06:44:11.493","130","6.744","128","8392704" +"2020-03-01 06:44:12.493","130","6.739","128","8392704" +"2020-03-01 06:44:13.493","130","6.739","128","8392704" +"2020-03-01 06:44:14.493","130","6.736","128","8392704" +"2020-03-01 06:44:15.493","130","6.734","128","8392704" +"2020-03-01 06:44:16.493","130","6.735","128","8392704" +"2020-03-01 06:44:17.493","130","6.734","128","8392704" +"2020-03-01 06:44:18.493","130","6.736","128","8392704" +"2020-03-01 06:44:19.493","130","6.741","128","8392704" +"2020-03-01 06:44:20.493","130","6.744","128","8392704" +"2020-03-01 06:44:21.493","130","6.746","128","8392704" +"2020-03-01 06:44:22.493","130","6.746","128","8392704" +"2020-03-01 06:44:23.493","130","6.748","128","8392704" +"2020-03-01 06:44:24.493","130","6.751","128","8392704" +"2020-03-01 06:44:25.493","130","6.752","128","8392704" +"2020-03-01 06:44:26.493","130","6.752","128","8392704" +"2020-03-01 06:44:27.493","130","6.752","128","8392704" +"2020-03-01 06:44:28.493","130","6.753","128","8392704" +"2020-03-01 06:44:29.493","130","6.751","128","8392704" +"2020-03-01 06:44:30.493","130","6.751","128","8392704" +"2020-03-01 06:44:31.493","130","6.749","128","8392704" +"2020-03-01 06:44:32.493","130","6.747","128","8392704" +"2020-03-01 06:44:33.493","130","6.748","128","8392704" +"2020-03-01 06:44:34.493","130","6.749","128","8392704" +"2020-03-01 06:44:35.493","130","6.746","128","8392704" +"2020-03-01 06:44:36.493","130","6.742","128","8392704" +"2020-03-01 06:44:37.493","130","6.742","128","8392704" +"2020-03-01 06:44:38.493","130","6.743","128","8392704" +"2020-03-01 06:44:39.493","130","6.743","128","8392704" +"2020-03-01 06:44:40.493","130","6.743","128","8392704" +"2020-03-01 06:44:41.493","130","6.741","128","8392704" +"2020-03-01 06:44:42.493","130","6.741","128","8392704" +"2020-03-01 06:44:43.493","130","6.741","128","8392704" +"2020-03-01 06:44:44.493","130","6.74","128","8392704" +"2020-03-01 06:44:45.493","130","6.74","128","8392704" +"2020-03-01 06:44:46.493","130","6.739","128","8392704" +"2020-03-01 06:44:47.493","130","6.738","128","8392704" +"2020-03-01 06:44:48.493","130","6.738","128","8392704" +"2020-03-01 06:44:49.493","130","6.741","128","8392704" +"2020-03-01 06:44:50.493","130","6.749","128","8392704" +"2020-03-01 06:44:51.493","130","6.756","128","8392704" +"2020-03-01 06:44:52.493","130","6.763","128","8392704" +"2020-03-01 06:44:53.493","130","6.768","128","8392704" +"2020-03-01 06:44:54.493","130","6.771","128","8392704" +"2020-03-01 06:44:55.493","130","6.774","128","8392704" +"2020-03-01 06:44:56.493","130","6.774","128","8392704" +"2020-03-01 06:44:57.493","130","6.774","128","8392704" +"2020-03-01 06:44:58.493","130","6.765","128","8392704" +"2020-03-01 06:44:59.493","130","6.763","128","8392704" +"2020-03-01 06:45:00.493","130","6.761","128","8392704" +"2020-03-01 06:45:01.493","130","6.758","128","8392704" +"2020-03-01 06:45:02.493","130","6.756","128","8392704" +"2020-03-01 06:45:03.493","130","6.756","128","8392704" +"2020-03-01 06:45:04.493","130","6.756","128","8392704" +"2020-03-01 06:45:05.493","130","6.763","128","8392704" +"2020-03-01 06:45:06.493","130","6.763","128","8392704" +"2020-03-01 06:45:07.493","130","6.764","128","8392704" +"2020-03-01 06:45:08.493","130","6.762","128","8392704" +"2020-03-01 06:45:09.493","130","6.763","128","8392704" +"2020-03-01 06:45:10.493","130","6.764","128","8392704" +"2020-03-01 06:45:11.493","130","6.763","128","8392704" +"2020-03-01 06:45:12.493","130","6.76","128","8392704" +"2020-03-01 06:45:13.493","130","6.759","128","8392704" +"2020-03-01 06:45:14.493","130","6.758","128","8392704" +"2020-03-01 06:45:15.493","130","6.758","128","8392704" +"2020-03-01 06:45:16.493","130","6.755","128","8392704" +"2020-03-01 20:04:50.493","130","7.59","128","8392704" +"2020-03-01 20:04:51.493","130","7.592","128","8392704" +"2020-03-01 20:04:52.493","130","7.592","128","8392704" +"2020-03-01 20:04:53.493","130","7.593","128","8392704" +"2020-03-01 20:04:54.493","130","7.592","128","8392704" +"2020-03-01 20:04:55.493","130","7.592","128","8392704" +"2020-03-01 20:04:56.493","130","7.593","128","8392704" +"2020-03-01 20:04:57.493","130","7.593","128","8392704" +"2020-03-01 20:04:58.493","130","7.593","128","8392704" +"2020-03-01 20:04:59.493","130","7.594","128","8392704" +"2020-03-01 20:05:00.493","130","7.595","128","8392704" +"2020-03-01 20:05:01.493","130","7.596","128","8392704" +"2020-03-01 20:05:02.493","130","7.595","128","8392704" +"2020-03-01 20:05:03.493","130","7.595","128","8392704" +"2020-03-01 20:05:04.493","130","7.594","128","8392704" +"2020-03-01 20:05:05.493","130","7.595","128","8392704" +"2020-03-01 20:05:06.493","130","7.598","128","8392704" +"2020-03-01 20:05:07.493","130","7.597","128","8392704" +"2020-03-01 20:05:08.493","130","7.595","128","8392704" +"2020-03-01 20:05:09.493","130","7.597","128","8392704" +"2020-03-01 20:05:10.493","130","7.598","128","8392704" +"2020-03-01 20:05:11.493","130","7.598","128","8392704" +"2020-03-01 20:05:12.493","130","7.597","128","8392704" +"2020-03-01 20:05:13.493","130","7.595","128","8392704" +"2020-03-01 20:05:14.493","130","7.591","128","8392704" +"2020-03-01 20:05:15.493","130","7.589","128","8392704" +"2020-03-01 20:05:16.493","130","7.588","128","8392704" +"2020-03-01 20:05:17.493","130","7.589","128","8392704" +"2020-03-01 20:05:18.493","130","7.589","128","8392704" +"2020-03-01 20:05:19.493","130","7.589","128","8392704" +"2020-03-01 20:05:20.493","130","7.587","128","8392704" +"2020-03-01 20:05:21.493","130","7.584","128","8392704" +"2020-03-01 20:05:22.493","130","7.583","128","8392704" +"2020-03-01 20:05:23.493","130","7.585","128","8392704" +"2020-03-01 20:05:24.493","130","7.586","128","8392704" +"2020-03-01 20:05:25.493","130","7.586","128","8392704" +"2020-03-01 20:05:26.493","130","7.586","128","8392704" +"2020-03-01 20:05:27.493","130","7.586","128","8392704" +"2020-03-01 20:05:28.493","130","7.587","128","8392704" +"2020-03-01 20:05:29.493","130","7.585","128","8392704" +"2020-03-01 20:05:30.493","130","7.584","128","8392704" +"2020-03-01 20:05:31.493","130","7.586","128","8392704" +"2020-03-01 20:05:32.493","130","7.589","128","8392704" +"2020-03-01 20:05:33.493","130","7.59","128","8392704" +"2020-03-01 20:05:34.493","130","7.591","128","8392704" +"2020-03-01 20:05:35.493","130","7.591","128","8392704" +"2020-03-01 20:05:36.493","130","7.594","128","8392704" +"2020-03-01 20:05:37.493","130","7.599","128","8392704" +"2020-03-01 20:05:38.493","130","7.602","128","8392704" +"2020-03-01 20:05:39.493","130","7.604","128","8392704" +"2020-03-01 20:05:40.493","130","7.605","128","8392704" +"2020-03-01 20:05:41.493","130","7.607","128","8392704" +"2020-03-01 20:05:42.493","130","7.607","128","8392704" +"2020-03-01 20:05:43.493","130","7.604","128","8392704" +"2020-03-01 20:05:44.493","130","7.597","128","8392704" +"2020-03-01 20:05:45.493","130","7.592","128","8392704" +"2020-03-01 20:05:46.493","130","7.59","128","8392704" +"2020-03-01 20:05:47.493","130","7.59","128","8392704" +"2020-03-01 20:05:48.493","130","7.591","128","8392704" +"2020-03-01 20:05:49.493","130","7.591","128","8392704" +"2020-03-01 20:05:50.493","130","7.591","128","8392704" +"2020-03-01 20:05:51.493","130","7.594","128","8392704" +"2020-03-01 20:05:52.493","130","7.599","128","8392704" +"2020-03-01 20:05:53.493","130","7.601","128","8392704" +"2020-03-01 20:05:54.493","130","7.602","128","8392704" +"2020-03-01 20:05:55.493","130","7.602","128","8392704" +"2020-03-01 20:05:56.493","130","7.602","128","8392704" +"2020-03-01 20:05:57.493","130","7.603","128","8392704" +"2020-03-01 20:05:58.493","130","7.604","128","8392704" +"2020-03-01 20:05:59.493","130","7.604","128","8392704" +"2020-03-01 20:06:00.493","130","7.605","128","8392704" +"2020-03-01 20:06:01.493","130","7.606","128","8392704" +"2020-03-01 20:06:02.493","130","7.607","128","8392704" +"2020-03-01 20:06:03.493","130","7.605","128","8392704" +"2020-03-01 20:06:04.493","130","7.604","128","8392704" +"2020-03-01 20:06:05.493","130","7.603","128","8392704" +"2020-03-01 20:06:06.493","130","7.602","128","8392704" +"2020-03-01 20:06:07.493","130","7.603","128","8392704" +"2020-03-01 20:06:08.493","130","7.604","128","8392704" \ No newline at end of file diff --git a/tests/pytest/test_data/ordered.csv b/tests/pytest/test_data/ordered.csv new file mode 100644 index 0000000000000000000000000000000000000000..14da572d75e3c9bef32d6de7696ce65485b06d23 --- /dev/null +++ b/tests/pytest/test_data/ordered.csv @@ -0,0 +1,500 @@ +"2020-03-01 19:46:50.493","130","7.617","128","8392704" +"2020-03-01 19:46:51.493","130","7.615","128","8392704" +"2020-03-01 19:46:52.493","130","7.613","128","8392704" +"2020-03-01 19:46:53.493","130","7.612","128","8392704" +"2020-03-01 19:46:54.493","130","7.611","128","8392704" +"2020-03-01 19:46:55.493","130","7.612","128","8392704" +"2020-03-01 19:46:56.493","130","7.611","128","8392704" +"2020-03-01 19:46:57.493","130","7.61","128","8392704" +"2020-03-01 19:46:58.493","130","7.61","128","8392704" +"2020-03-01 19:46:59.493","130","7.613","128","8392704" +"2020-03-01 19:47:00.493","130","7.617","128","8392704" +"2020-03-01 19:47:01.493","130","7.618","128","8392704" +"2020-03-01 19:47:02.493","130","7.619","128","8392704" +"2020-03-01 19:47:03.493","130","7.62","128","8392704" +"2020-03-01 19:47:04.493","130","7.619","128","8392704" +"2020-03-01 19:47:05.493","130","7.62","128","8392704" +"2020-03-01 19:47:06.493","130","7.62","128","8392704" +"2020-03-01 19:47:07.493","130","7.618","128","8392704" +"2020-03-01 19:47:08.493","130","7.618","128","8392704" +"2020-03-01 19:47:09.493","130","7.616","128","8392704" +"2020-03-01 19:47:10.493","130","7.615","128","8392704" +"2020-03-01 19:47:11.493","130","7.614","128","8392704" +"2020-03-01 19:47:12.493","130","7.614","128","8392704" +"2020-03-01 19:47:13.493","130","7.615","128","8392704" +"2020-03-01 19:47:14.493","130","7.617","128","8392704" +"2020-03-01 19:47:15.493","130","7.617","128","8392704" +"2020-03-01 19:47:16.493","130","7.612","128","8392704" +"2020-03-01 19:47:17.493","130","7.609","128","8392704" +"2020-03-01 19:47:18.493","130","7.609","128","8392704" +"2020-03-01 19:47:19.493","130","7.609","128","8392704" +"2020-03-01 19:47:20.493","130","7.611","128","8392704" +"2020-03-01 19:47:21.493","130","7.613","128","8392704" +"2020-03-01 19:47:22.493","130","7.612","128","8392704" +"2020-03-01 19:47:23.493","130","7.612","128","8392704" +"2020-03-01 19:47:24.493","130","7.612","128","8392704" +"2020-03-01 19:47:25.493","130","7.613","128","8392704" +"2020-03-01 19:47:26.493","130","7.617","128","8392704" +"2020-03-01 19:47:27.493","130","7.62","128","8392704" +"2020-03-01 19:47:28.493","130","7.621","128","8392704" +"2020-03-01 19:47:29.493","130","7.621","128","8392704" +"2020-03-01 19:47:30.493","130","7.623","128","8392704" +"2020-03-01 19:47:31.493","130","7.624","128","8392704" +"2020-03-01 19:47:32.493","130","7.621","128","8392704" +"2020-03-01 19:47:33.493","130","7.619","128","8392704" +"2020-03-01 19:47:34.493","130","7.618","128","8392704" +"2020-03-01 19:47:35.493","130","7.616","128","8392704" +"2020-03-01 19:47:36.493","130","7.618","128","8392704" +"2020-03-01 19:47:37.493","130","7.618","128","8392704" +"2020-03-01 19:47:38.493","130","7.616","128","8392704" +"2020-03-01 19:47:39.493","130","7.615","128","8392704" +"2020-03-01 19:47:40.493","130","7.615","128","8392704" +"2020-03-01 19:47:41.493","130","7.614","128","8392704" +"2020-03-01 19:47:42.493","130","7.613","128","8392704" +"2020-03-01 19:47:43.493","130","7.612","128","8392704" +"2020-03-01 19:47:44.493","130","7.611","128","8392704" +"2020-03-01 19:47:45.493","130","7.612","128","8392704" +"2020-03-01 19:47:46.493","130","7.614","128","8392704" +"2020-03-01 19:47:47.493","130","7.618","128","8392704" +"2020-03-01 19:47:48.493","130","7.62","128","8392704" +"2020-03-01 19:47:49.493","130","7.62","128","8392704" +"2020-03-01 19:47:50.493","130","7.621","128","8392704" +"2020-03-01 19:47:51.493","130","7.62","128","8392704" +"2020-03-01 19:47:52.493","130","7.619","128","8392704" +"2020-03-01 19:47:53.493","130","7.621","128","8392704" +"2020-03-01 19:47:54.493","130","7.622","128","8392704" +"2020-03-01 19:47:55.493","130","7.622","128","8392704" +"2020-03-01 19:47:56.493","130","7.62","128","8392704" +"2020-03-01 19:47:57.493","130","7.617","128","8392704" +"2020-03-01 19:47:58.493","130","7.616","128","8392704" +"2020-03-01 19:47:59.493","130","7.618","128","8392704" +"2020-03-01 19:48:00.493","130","7.62","128","8392704" +"2020-03-01 19:48:01.493","130","7.62","128","8392704" +"2020-03-01 19:48:02.493","130","7.616","128","8392704" +"2020-03-01 19:48:03.493","130","7.612","128","8392704" +"2020-03-01 19:48:04.493","130","7.609","128","8392704" +"2020-03-01 19:48:05.493","130","7.608","128","8392704" +"2020-03-01 19:48:06.493","130","7.605","128","8392704" +"2020-03-01 19:48:07.493","130","7.604","128","8392704" +"2020-03-01 19:48:08.493","130","7.605","128","8392704" +"2020-03-01 19:48:09.493","130","7.604","128","8392704" +"2020-03-01 19:48:10.493","130","7.604","128","8392704" +"2020-03-01 19:48:11.493","130","7.608","128","8392704" +"2020-03-01 19:48:12.493","130","7.611","128","8392704" +"2020-03-01 19:48:13.493","130","7.614","128","8392704" +"2020-03-01 19:48:14.493","130","7.616","128","8392704" +"2020-03-01 19:48:15.493","130","7.618","128","8392704" +"2020-03-01 19:48:16.493","130","7.62","128","8392704" +"2020-03-01 19:48:17.493","130","7.617","128","8392704" +"2020-03-01 19:48:18.493","130","7.61","128","8392704" +"2020-03-01 19:48:19.493","130","7.607","128","8392704" +"2020-03-01 19:48:20.493","130","7.604","128","8392704" +"2020-03-01 19:48:21.493","130","7.601","128","8392704" +"2020-03-01 19:48:22.493","130","7.601","128","8392704" +"2020-03-01 19:48:23.493","130","7.601","128","8392704" +"2020-03-01 19:48:24.493","130","7.598","128","8392704" +"2020-03-01 19:48:25.493","130","7.598","128","8392704" +"2020-03-01 19:48:26.493","130","7.604","128","8392704" +"2020-03-01 19:48:27.493","130","7.608","128","8392704" +"2020-03-01 19:48:28.493","130","7.609","128","8392704" +"2020-03-01 19:48:29.493","130","7.61","128","8392704" +"2020-03-01 19:48:30.493","130","7.611","128","8392704" +"2020-03-01 19:48:31.493","130","7.614","128","8392704" +"2020-03-01 19:48:32.493","130","7.614","128","8392704" +"2020-03-01 19:48:33.493","130","7.611","128","8392704" +"2020-03-01 19:48:34.493","130","7.607","128","8392704" +"2020-03-01 19:48:35.493","130","7.601","128","8392704" +"2020-03-01 19:48:36.493","130","7.596","128","8392704" +"2020-03-01 19:48:37.493","130","7.593","128","8392704" +"2020-03-01 19:48:38.493","130","7.593","128","8392704" +"2020-03-01 19:48:39.493","130","7.593","128","8392704" +"2020-03-01 19:48:40.493","130","7.595","128","8392704" +"2020-03-01 19:48:41.493","130","7.596","128","8392704" +"2020-03-01 19:48:42.493","130","7.599","128","8392704" +"2020-03-01 19:48:43.493","130","7.603","128","8392704" +"2020-03-01 19:48:44.493","130","7.605","128","8392704" +"2020-03-01 19:48:45.493","130","7.607","128","8392704" +"2020-03-01 19:48:46.493","130","7.608","128","8392704" +"2020-03-01 19:48:47.493","130","7.609","128","8392704" +"2020-03-01 19:48:48.493","130","7.61","128","8392704" +"2020-03-01 19:48:49.493","130","7.608","128","8392704" +"2020-03-01 19:48:50.493","130","7.605","128","8392704" +"2020-03-01 19:48:51.493","130","7.605","128","8392704" +"2020-03-01 19:48:52.493","130","7.607","128","8392704" +"2020-03-01 19:48:53.493","130","7.608","128","8392704" +"2020-03-01 19:48:54.493","130","7.608","128","8392704" +"2020-03-01 19:48:55.493","130","7.608","128","8392704" +"2020-03-01 19:48:56.493","130","7.61","128","8392704" +"2020-03-01 19:48:57.493","130","7.613","128","8392704" +"2020-03-01 19:48:58.493","130","7.612","128","8392704" +"2020-03-01 19:48:59.493","130","7.61","128","8392704" +"2020-03-01 19:49:00.493","130","7.609","128","8392704" +"2020-03-01 19:49:01.493","130","7.61","128","8392704" +"2020-03-01 19:49:02.493","130","7.611","128","8392704" +"2020-03-01 19:49:03.493","130","7.61","128","8392704" +"2020-03-01 19:49:04.493","130","7.61","128","8392704" +"2020-03-01 19:49:05.493","130","7.613","128","8392704" +"2020-03-01 19:49:06.493","130","7.615","128","8392704" +"2020-03-01 19:49:07.493","130","7.614","128","8392704" +"2020-03-01 19:49:08.493","130","7.613","128","8392704" +"2020-03-01 19:49:09.493","130","7.613","128","8392704" +"2020-03-01 19:49:10.493","130","7.615","128","8392704" +"2020-03-01 19:49:11.493","130","7.619","128","8392704" +"2020-03-01 19:49:12.493","130","7.62","128","8392704" +"2020-03-01 19:49:13.493","130","7.618","128","8392704" +"2020-03-01 19:49:14.493","130","7.619","128","8392704" +"2020-03-01 19:49:15.493","130","7.618","128","8392704" +"2020-03-01 19:49:16.493","130","7.617","128","8392704" +"2020-03-01 19:49:17.493","130","7.617","128","8392704" +"2020-03-01 19:49:18.493","130","7.618","128","8392704" +"2020-03-01 19:49:19.493","130","7.617","128","8392704" +"2020-03-01 19:49:20.493","130","7.616","128","8392704" +"2020-03-01 19:49:21.493","130","7.615","128","8392704" +"2020-03-01 19:49:22.493","130","7.616","128","8392704" +"2020-03-01 19:49:23.493","130","7.617","128","8392704" +"2020-03-01 19:49:24.493","130","7.615","128","8392704" +"2020-03-01 19:49:25.493","130","7.613","128","8392704" +"2020-03-01 19:49:26.493","130","7.612","128","8392704" +"2020-03-01 19:49:27.493","130","7.613","128","8392704" +"2020-03-01 19:49:28.493","130","7.614","128","8392704" +"2020-03-01 19:49:29.493","130","7.612","128","8392704" +"2020-03-01 19:49:30.493","130","7.611","128","8392704" +"2020-03-01 19:49:31.493","130","7.611","128","8392704" +"2020-03-01 19:49:32.493","130","7.612","128","8392704" +"2020-03-01 19:49:33.493","130","7.613","128","8392704" +"2020-03-01 19:49:34.493","130","7.614","128","8392704" +"2020-03-01 19:49:35.493","130","7.612","128","8392704" +"2020-03-01 19:49:36.493","130","7.607","128","8392704" +"2020-03-01 19:49:37.493","130","7.603","128","8392704" +"2020-03-01 19:49:38.493","130","7.599","128","8392704" +"2020-03-01 19:49:39.493","130","7.599","128","8392704" +"2020-03-01 19:49:40.493","130","7.599","128","8392704" +"2020-03-01 19:49:41.493","130","7.599","128","8392704" +"2020-03-01 19:49:42.493","130","7.601","128","8392704" +"2020-03-01 19:49:43.493","130","7.605","128","8392704" +"2020-03-01 19:49:44.493","130","7.606","128","8392704" +"2020-03-01 19:49:45.493","130","7.606","128","8392704" +"2020-03-01 19:49:46.493","130","7.606","128","8392704" +"2020-03-01 19:49:47.493","130","7.604","128","8392704" +"2020-03-01 19:49:48.493","130","7.604","128","8392704" +"2020-03-01 19:49:49.493","130","7.603","128","8392704" +"2020-03-01 19:49:50.493","130","7.604","128","8392704" +"2020-03-01 19:49:51.493","130","7.608","128","8392704" +"2020-03-01 19:49:52.493","130","7.614","128","8392704" +"2020-03-01 19:49:53.493","130","7.618","128","8392704" +"2020-03-01 19:49:54.493","130","7.621","128","8392704" +"2020-03-01 19:49:55.493","130","7.623","128","8392704" +"2020-03-01 19:49:56.493","130","7.623","128","8392704" +"2020-03-01 19:49:57.493","130","7.624","128","8392704" +"2020-03-01 19:49:58.493","130","7.626","128","8392704" +"2020-03-01 19:49:59.493","130","7.628","128","8392704" +"2020-03-01 19:50:00.493","130","7.627","128","8392704" +"2020-03-01 19:50:01.493","130","7.625","128","8392704" +"2020-03-01 19:50:02.493","130","7.627","128","8392704" +"2020-03-01 19:50:03.493","130","7.63","128","8392704" +"2020-03-01 19:50:04.493","130","7.633","128","8392704" +"2020-03-01 19:50:05.493","130","7.635","128","8392704" +"2020-03-01 19:50:06.493","130","7.634","128","8392704" +"2020-03-01 19:50:07.493","130","7.632","128","8392704" +"2020-03-01 19:50:08.493","130","7.628","128","8392704" +"2020-03-01 19:50:09.493","130","7.625","128","8392704" +"2020-03-01 19:50:10.493","130","7.625","128","8392704" +"2020-03-01 19:50:11.493","130","7.623","128","8392704" +"2020-03-01 19:50:12.493","130","7.623","128","8392704" +"2020-03-01 19:50:13.493","130","7.623","128","8392704" +"2020-03-01 19:50:14.493","130","7.622","128","8392704" +"2020-03-01 19:50:15.493","130","7.621","128","8392704" +"2020-03-01 19:50:16.493","130","7.618","128","8392704" +"2020-03-01 19:50:17.493","130","7.618","128","8392704" +"2020-03-01 19:50:18.493","130","7.617","128","8392704" +"2020-03-01 19:50:19.493","130","7.616","128","8392704" +"2020-03-01 19:50:20.493","130","7.615","128","8392704" +"2020-03-01 19:50:21.493","130","7.615","128","8392704" +"2020-03-01 19:50:22.493","130","7.616","128","8392704" +"2020-03-01 19:50:23.493","130","7.619","128","8392704" +"2020-03-01 19:50:24.493","130","7.622","128","8392704" +"2020-03-01 19:50:25.493","130","7.624","128","8392704" +"2020-03-01 19:50:26.493","130","7.627","128","8392704" +"2020-03-01 19:50:27.493","130","7.627","128","8392704" +"2020-03-01 19:50:28.493","130","7.625","128","8392704" +"2020-03-01 19:50:29.493","130","7.625","128","8392704" +"2020-03-01 19:50:30.493","130","7.625","128","8392704" +"2020-03-01 19:50:31.493","130","7.624","128","8392704" +"2020-03-01 19:50:32.493","130","7.624","128","8392704" +"2020-03-01 19:50:33.493","130","7.624","128","8392704" +"2020-03-01 19:50:34.493","130","7.626","128","8392704" +"2020-03-01 19:50:35.493","130","7.627","128","8392704" +"2020-03-01 19:50:36.493","130","7.627","128","8392704" +"2020-03-01 19:50:37.493","130","7.626","128","8392704" +"2020-03-01 19:50:38.493","130","7.623","128","8392704" +"2020-03-01 19:50:39.493","130","7.619","128","8392704" +"2020-03-01 19:50:40.493","130","7.616","128","8392704" +"2020-03-01 19:50:41.493","130","7.616","128","8392704" +"2020-03-01 19:50:42.493","130","7.615","128","8392704" +"2020-03-01 19:50:43.493","130","7.613","128","8392704" +"2020-03-01 19:50:44.493","130","7.614","128","8392704" +"2020-03-01 19:50:45.493","130","7.614","128","8392704" +"2020-03-01 19:50:46.493","130","7.612","128","8392704" +"2020-03-01 19:50:47.493","130","7.611","128","8392704" +"2020-03-01 19:50:48.493","130","7.611","128","8392704" +"2020-03-01 19:50:49.493","130","7.611","128","8392704" +"2020-03-01 19:50:50.493","130","7.612","128","8392704" +"2020-03-01 19:50:51.493","130","7.613","128","8392704" +"2020-03-01 19:50:52.493","130","7.613","128","8392704" +"2020-03-01 19:50:53.493","130","7.615","128","8392704" +"2020-03-01 19:50:54.493","130","7.617","128","8392704" +"2020-03-01 19:50:55.493","130","7.617","128","8392704" +"2020-03-01 19:50:56.493","130","7.619","128","8392704" +"2020-03-01 19:50:57.493","130","7.622","128","8392704" +"2020-03-01 19:50:58.493","130","7.624","128","8392704" +"2020-03-01 19:50:59.493","130","7.625","128","8392704" +"2020-03-01 19:51:00.493","130","7.624","128","8392704" +"2020-03-01 19:51:01.493","130","7.624","128","8392704" +"2020-03-01 19:51:02.493","130","7.622","128","8392704" +"2020-03-01 19:51:03.493","130","7.62","128","8392704" +"2020-03-01 19:51:04.493","130","7.617","128","8392704" +"2020-03-01 19:51:05.493","130","7.617","128","8392704" +"2020-03-01 19:51:06.493","130","7.618","128","8392704" +"2020-03-01 19:51:07.493","130","7.618","128","8392704" +"2020-03-01 19:51:08.493","130","7.618","128","8392704" +"2020-03-01 19:51:09.493","130","7.62","128","8392704" +"2020-03-01 19:51:10.493","130","7.622","128","8392704" +"2020-03-01 19:51:11.493","130","7.623","128","8392704" +"2020-03-01 19:51:12.493","130","7.624","128","8392704" +"2020-03-01 19:51:13.493","130","7.625","128","8392704" +"2020-03-01 19:51:14.493","130","7.626","128","8392704" +"2020-03-01 19:51:15.493","130","7.626","128","8392704" +"2020-03-01 19:51:16.493","130","7.626","128","8392704" +"2020-03-01 19:51:17.493","130","7.627","128","8392704" +"2020-03-01 19:51:18.493","130","7.627","128","8392704" +"2020-03-01 19:51:19.493","130","7.629","128","8392704" +"2020-03-01 19:51:20.493","130","7.629","128","8392704" +"2020-03-01 19:51:21.493","130","7.626","128","8392704" +"2020-03-01 19:51:22.493","130","7.625","128","8392704" +"2020-03-01 19:51:23.493","130","7.625","128","8392704" +"2020-03-01 19:51:24.493","130","7.626","128","8392704" +"2020-03-01 19:51:25.493","130","7.626","128","8392704" +"2020-03-01 19:51:26.493","130","7.624","128","8392704" +"2020-03-01 19:51:27.493","130","7.623","128","8392704" +"2020-03-01 19:51:28.493","130","7.624","128","8392704" +"2020-03-01 19:51:29.493","130","7.624","128","8392704" +"2020-03-01 19:51:30.493","130","7.624","128","8392704" +"2020-03-01 19:51:31.493","130","7.624","128","8392704" +"2020-03-01 19:51:32.493","130","7.626","128","8392704" +"2020-03-01 19:51:33.493","130","7.626","128","8392704" +"2020-03-01 19:51:34.493","130","7.626","128","8392704" +"2020-03-01 19:51:35.493","130","7.625","128","8392704" +"2020-03-01 19:51:36.493","130","7.624","128","8392704" +"2020-03-01 19:51:37.493","130","7.623","128","8392704" +"2020-03-01 19:51:38.493","130","7.622","128","8392704" +"2020-03-01 19:51:39.493","130","7.62","128","8392704" +"2020-03-01 19:51:40.493","130","7.62","128","8392704" +"2020-03-01 19:51:41.493","130","7.62","128","8392704" +"2020-03-01 19:51:42.493","130","7.621","128","8392704" +"2020-03-01 19:51:43.493","130","7.62","128","8392704" +"2020-03-01 19:51:44.493","130","7.619","128","8392704" +"2020-03-01 19:51:45.493","130","7.62","128","8392704" +"2020-03-01 19:51:46.493","130","7.62","128","8392704" +"2020-03-01 19:51:47.493","130","7.618","128","8392704" +"2020-03-01 19:51:48.493","130","7.619","128","8392704" +"2020-03-01 19:51:49.493","130","7.62","128","8392704" +"2020-03-01 19:51:50.493","130","7.622","128","8392704" +"2020-03-01 19:51:51.493","130","7.622","128","8392704" +"2020-03-01 19:51:52.493","130","7.62","128","8392704" +"2020-03-01 19:51:53.493","130","7.62","128","8392704" +"2020-03-01 19:51:54.493","130","7.622","128","8392704" +"2020-03-01 19:51:55.493","130","7.624","128","8392704" +"2020-03-01 19:51:56.493","130","7.622","128","8392704" +"2020-03-01 19:51:57.493","130","7.616","128","8392704" +"2020-03-01 19:51:58.493","130","7.611","128","8392704" +"2020-03-01 19:51:59.493","130","7.61","128","8392704" +"2020-03-01 19:52:00.493","130","7.608","128","8392704" +"2020-03-01 19:52:01.493","130","7.606","128","8392704" +"2020-03-01 19:52:02.493","130","7.607","128","8392704" +"2020-03-01 19:52:03.493","130","7.608","128","8392704" +"2020-03-01 19:52:04.493","130","7.61","128","8392704" +"2020-03-01 19:52:05.493","130","7.612","128","8392704" +"2020-03-01 19:52:06.493","130","7.615","128","8392704" +"2020-03-01 19:52:07.493","130","7.62","128","8392704" +"2020-03-01 19:52:08.493","130","7.623","128","8392704" +"2020-03-01 19:52:09.493","130","7.624","128","8392704" +"2020-03-01 19:52:10.493","130","7.623","128","8392704" +"2020-03-01 19:52:11.493","130","7.623","128","8392704" +"2020-03-01 19:52:12.493","130","7.624","128","8392704" +"2020-03-01 19:52:13.493","130","7.622","128","8392704" +"2020-03-01 19:52:14.493","130","7.62","128","8392704" +"2020-03-01 19:52:15.493","130","7.621","128","8392704" +"2020-03-01 19:52:16.493","130","7.62","128","8392704" +"2020-03-01 19:52:17.493","130","7.622","128","8392704" +"2020-03-01 19:52:18.493","130","7.625","128","8392704" +"2020-03-01 19:52:19.493","130","7.627","128","8392704" +"2020-03-01 19:52:20.493","130","7.625","128","8392704" +"2020-03-01 19:52:21.493","130","7.621","128","8392704" +"2020-03-01 19:52:22.493","130","7.617","128","8392704" +"2020-03-01 19:52:23.493","130","7.617","128","8392704" +"2020-03-01 19:52:24.493","130","7.617","128","8392704" +"2020-03-01 19:52:25.493","130","7.616","128","8392704" +"2020-03-01 19:52:26.493","130","7.615","128","8392704" +"2020-03-01 19:52:27.493","130","7.616","128","8392704" +"2020-03-01 19:52:28.493","130","7.619","128","8392704" +"2020-03-01 19:52:29.493","130","7.621","128","8392704" +"2020-03-01 19:52:30.493","130","7.621","128","8392704" +"2020-03-01 19:52:31.493","130","7.621","128","8392704" +"2020-03-01 19:52:32.493","130","7.621","128","8392704" +"2020-03-01 19:52:33.493","130","7.621","128","8392704" +"2020-03-01 19:52:34.493","130","7.623","128","8392704" +"2020-03-01 19:52:35.493","130","7.621","128","8392704" +"2020-03-01 19:52:36.493","130","7.617","128","8392704" +"2020-03-01 19:52:37.493","130","7.615","128","8392704" +"2020-03-01 19:52:38.493","130","7.612","128","8392704" +"2020-03-01 19:52:39.493","130","7.609","128","8392704" +"2020-03-01 19:52:40.493","130","7.606","128","8392704" +"2020-03-01 19:52:41.493","130","7.606","128","8392704" +"2020-03-01 19:52:42.493","130","7.609","128","8392704" +"2020-03-01 19:52:43.493","130","7.612","128","8392704" +"2020-03-01 19:52:44.493","130","7.616","128","8392704" +"2020-03-01 19:52:45.493","130","7.619","128","8392704" +"2020-03-01 19:52:46.493","130","7.62","128","8392704" +"2020-03-01 19:52:47.493","130","7.622","128","8392704" +"2020-03-01 19:52:48.493","130","7.622","128","8392704" +"2020-03-01 19:52:49.493","130","7.621","128","8392704" +"2020-03-01 19:52:50.493","130","7.618","128","8392704" +"2020-03-01 19:52:51.493","130","7.616","128","8392704" +"2020-03-01 19:52:52.493","130","7.613","128","8392704" +"2020-03-01 19:52:53.493","130","7.612","128","8392704" +"2020-03-01 19:52:54.493","130","7.612","128","8392704" +"2020-03-01 19:52:55.493","130","7.611","128","8392704" +"2020-03-01 19:52:56.493","130","7.609","128","8392704" +"2020-03-01 19:52:57.493","130","7.608","128","8392704" +"2020-03-01 19:52:58.493","130","7.609","128","8392704" +"2020-03-01 19:52:59.493","130","7.611","128","8392704" +"2020-03-01 19:53:00.493","130","7.612","128","8392704" +"2020-03-01 19:53:01.493","130","7.614","128","8392704" +"2020-03-01 19:53:02.493","130","7.618","128","8392704" +"2020-03-01 19:53:03.493","130","7.62","128","8392704" +"2020-03-01 19:53:04.493","130","7.62","128","8392704" +"2020-03-01 19:53:05.493","130","7.62","128","8392704" +"2020-03-01 19:53:06.493","130","7.619","128","8392704" +"2020-03-01 19:53:07.493","130","7.617","128","8392704" +"2020-03-01 19:53:08.493","130","7.615","128","8392704" +"2020-03-01 19:53:09.493","130","7.612","128","8392704" +"2020-03-01 19:53:10.493","130","7.61","128","8392704" +"2020-03-01 19:53:11.493","130","7.609","128","8392704" +"2020-03-01 19:53:12.493","130","7.608","128","8392704" +"2020-03-01 19:53:13.493","130","7.61","128","8392704" +"2020-03-01 19:53:14.493","130","7.611","128","8392704" +"2020-03-01 19:53:15.493","130","7.609","128","8392704" +"2020-03-01 19:53:16.493","130","7.608","128","8392704" +"2020-03-01 19:53:17.493","130","7.608","128","8392704" +"2020-03-01 19:53:18.493","130","7.607","128","8392704" +"2020-03-01 19:53:19.493","130","7.607","128","8392704" +"2020-03-01 19:53:20.493","130","7.605","128","8392704" +"2020-03-01 19:53:21.493","130","7.603","128","8392704" +"2020-03-01 19:53:22.493","130","7.606","128","8392704" +"2020-03-01 19:53:23.493","130","7.611","128","8392704" +"2020-03-01 19:53:24.493","130","7.615","128","8392704" +"2020-03-01 19:53:25.493","130","7.618","128","8392704" +"2020-03-01 19:53:26.493","130","7.62","128","8392704" +"2020-03-01 19:53:27.493","130","7.622","128","8392704" +"2020-03-01 19:53:28.493","130","7.624","128","8392704" +"2020-03-01 19:53:29.493","130","7.626","128","8392704" +"2020-03-01 19:53:30.493","130","7.624","128","8392704" +"2020-03-01 19:53:31.493","130","7.617","128","8392704" +"2020-03-01 19:53:32.493","130","7.613","128","8392704" +"2020-03-01 19:53:33.493","130","7.613","128","8392704" +"2020-03-01 19:53:34.493","130","7.613","128","8392704" +"2020-03-01 19:53:35.493","130","7.61","128","8392704" +"2020-03-01 19:53:36.493","130","7.609","128","8392704" +"2020-03-01 19:53:37.493","130","7.611","128","8392704" +"2020-03-01 19:53:38.493","130","7.61","128","8392704" +"2020-03-01 19:53:39.493","130","7.609","128","8392704" +"2020-03-01 19:53:40.493","130","7.608","128","8392704" +"2020-03-01 19:53:41.493","130","7.605","128","8392704" +"2020-03-01 19:53:42.493","130","7.601","128","8392704" +"2020-03-01 19:53:43.493","130","7.6","128","8392704" +"2020-03-01 19:53:44.493","130","7.602","128","8392704" +"2020-03-01 19:53:45.493","130","7.604","128","8392704" +"2020-03-01 19:53:46.493","130","7.605","128","8392704" +"2020-03-01 19:53:47.493","130","7.606","128","8392704" +"2020-03-01 19:53:48.493","130","7.605","128","8392704" +"2020-03-01 19:53:49.493","130","7.605","128","8392704" +"2020-03-01 19:53:50.493","130","7.606","128","8392704" +"2020-03-01 19:53:51.493","130","7.606","128","8392704" +"2020-03-01 19:53:52.493","130","7.604","128","8392704" +"2020-03-01 19:53:53.493","130","7.606","128","8392704" +"2020-03-01 19:53:54.493","130","7.61","128","8392704" +"2020-03-01 19:53:55.493","130","7.612","128","8392704" +"2020-03-01 19:53:56.493","130","7.613","128","8392704" +"2020-03-01 19:53:57.493","130","7.613","128","8392704" +"2020-03-01 19:53:58.493","130","7.613","128","8392704" +"2020-03-01 19:53:59.493","130","7.615","128","8392704" +"2020-03-01 19:54:00.493","130","7.616","128","8392704" +"2020-03-01 19:54:01.493","130","7.616","128","8392704" +"2020-03-01 19:54:02.493","130","7.618","128","8392704" +"2020-03-01 19:54:03.493","130","7.621","128","8392704" +"2020-03-01 19:54:04.493","130","7.623","128","8392704" +"2020-03-01 19:54:05.493","130","7.623","128","8392704" +"2020-03-01 19:54:06.493","130","7.624","128","8392704" +"2020-03-01 19:54:07.493","130","7.624","128","8392704" +"2020-03-01 19:54:08.493","130","7.622","128","8392704" +"2020-03-01 19:54:09.493","130","7.62","128","8392704" +"2020-03-01 19:54:10.493","130","7.621","128","8392704" +"2020-03-01 19:54:11.493","130","7.62","128","8392704" +"2020-03-01 19:54:12.493","130","7.617","128","8392704" +"2020-03-01 19:54:13.493","130","7.617","128","8392704" +"2020-03-01 19:54:14.493","130","7.618","128","8392704" +"2020-03-01 19:54:15.493","130","7.617","128","8392704" +"2020-03-01 19:54:16.493","130","7.615","128","8392704" +"2020-03-01 19:54:17.493","130","7.613","128","8392704" +"2020-03-01 19:54:18.493","130","7.612","128","8392704" +"2020-03-01 19:54:19.493","130","7.609","128","8392704" +"2020-03-01 19:54:20.493","130","7.605","128","8392704" +"2020-03-01 19:54:21.493","130","7.604","128","8392704" +"2020-03-01 19:54:22.493","130","7.605","128","8392704" +"2020-03-01 19:54:23.493","130","7.608","128","8392704" +"2020-03-01 19:54:24.493","130","7.611","128","8392704" +"2020-03-01 19:54:25.493","130","7.613","128","8392704" +"2020-03-01 19:54:26.493","130","7.613","128","8392704" +"2020-03-01 19:54:27.493","130","7.611","128","8392704" +"2020-03-01 19:54:28.493","130","7.611","128","8392704" +"2020-03-01 19:54:29.493","130","7.611","128","8392704" +"2020-03-01 19:54:30.493","130","7.612","128","8392704" +"2020-03-01 19:54:31.493","130","7.614","128","8392704" +"2020-03-01 19:54:32.493","130","7.61","128","8392704" +"2020-03-01 19:54:33.493","130","7.603","128","8392704" +"2020-03-01 19:54:34.493","130","7.598","128","8392704" +"2020-03-01 19:54:35.493","130","7.594","128","8392704" +"2020-03-01 19:54:36.493","130","7.591","128","8392704" +"2020-03-01 19:54:37.493","130","7.591","128","8392704" +"2020-03-01 19:54:38.493","130","7.59","128","8392704" +"2020-03-01 19:54:39.493","130","7.588","128","8392704" +"2020-03-01 19:54:40.493","130","7.593","128","8392704" +"2020-03-01 19:54:41.493","130","7.599","128","8392704" +"2020-03-01 19:54:42.493","130","7.602","128","8392704" +"2020-03-01 19:54:43.493","130","7.604","128","8392704" +"2020-03-01 19:54:44.493","130","7.606","128","8392704" +"2020-03-01 19:54:45.493","130","7.609","128","8392704" +"2020-03-01 19:54:46.493","130","7.612","128","8392704" +"2020-03-01 19:54:47.493","130","7.614","128","8392704" +"2020-03-01 19:54:48.493","130","7.616","128","8392704" +"2020-03-01 19:54:49.493","130","7.617","128","8392704" +"2020-03-01 19:54:50.493","130","7.619","128","8392704" +"2020-03-01 19:54:51.493","130","7.623","128","8392704" +"2020-03-01 19:54:52.493","130","7.626","128","8392704" +"2020-03-01 19:54:53.493","130","7.626","128","8392704" +"2020-03-01 19:54:54.493","130","7.624","128","8392704" +"2020-03-01 19:54:55.493","130","7.623","128","8392704" +"2020-03-01 19:54:56.493","130","7.618","128","8392704" +"2020-03-01 19:54:57.493","130","7.613","128","8392704" +"2020-03-01 19:54:58.493","130","7.61","128","8392704" +"2020-03-01 19:54:59.493","130","7.605","128","8392704" +"2020-03-01 19:55:00.493","130","7.604","128","8392704" +"2020-03-01 19:55:01.493","130","7.603","128","8392704" +"2020-03-01 19:55:02.493","130","7.602","128","8392704" +"2020-03-01 19:55:03.493","130","7.602","128","8392704" +"2020-03-01 19:55:04.493","130","7.602","128","8392704" +"2020-03-01 19:55:05.493","130","7.603","128","8392704" +"2020-03-01 19:55:06.493","130","7.608","128","8392704" +"2020-03-01 19:55:07.493","130","7.609","128","8392704" +"2020-03-01 19:55:08.493","130","7.608","128","8392704" +"2020-03-01 19:55:09.493","130","7.609","128","8392704" \ No newline at end of file diff --git a/tests/pytest/tools/insert.json b/tests/pytest/tools/insert.json new file mode 100644 index 0000000000000000000000000000000000000000..c3fa78076b2a25f73ebc50f6a35bcc5afddb246d --- /dev/null +++ b/tests/pytest/tools/insert.json @@ -0,0 +1,50 @@ +{ + "filetype":"insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 1, + "databases": [{ + "dbinfo": { + "name": "db01", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "update": 0, + "maxtablesPerVnode": 1000 + }, + "super_tables": [{ + "name": "stb01", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 1000, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "/home/data/sample.csv", + "tags_file": "", + "columns": [{ + "type": "SMALLINT" + }, { + "type": "BOOL" + }, { + "type": "BINARY", + "len": 6 + }], + "tags": [{ + "type": "INT" + },{ + "type": "BINARY", + "len": 4 + }] + }] + }] +} diff --git a/tests/pytest/tools/lowa.py b/tests/pytest/tools/lowa.py new file mode 100644 index 0000000000000000000000000000000000000000..523229dd463d54c5b2cd23a9a3d4d547858a3b5c --- /dev/null +++ b/tests/pytest/tools/lowa.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 10000 + self.numberOfRecords = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + os.system("yes | %slowa -f tools/insert.json" % binPath) + + tdSql.execute("use db01") + tdSql.query("select count(*) from stb01") + tdSql.checkData(0, 0, 100000) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemo.py b/tests/pytest/tools/taosdemo.py index 5bf8ebaf03beb052f09d80b92c6b6b6d588e4f4f..1a976aef594576ba29b928465fc38baea6dcb63e 100644 --- a/tests/pytest/tools/taosdemo.py +++ b/tests/pytest/tools/taosdemo.py @@ -55,6 +55,12 @@ class TDTestCase: tdSql.query("select count(*) from meters") tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) + tdSql.query("select sum(f1) from test.meters interval(1h) sliding(30m)") + tdSql.checkRows(2) + + tdSql.query("select apercentile(f1, 1) from test.meters interval(10s)") + tdSql.checkRows(11) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/update/allow_update-0.py b/tests/pytest/update/allow_update-0.py new file mode 100644 index 0000000000000000000000000000000000000000..69e23883f347ed06a4a3c2375e1252f17336c467 --- /dev/null +++ b/tests/pytest/update/allow_update-0.py @@ -0,0 +1,170 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numOfRecords = 10 + self.ts = 1604295582000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + startTs = self.ts + + print("==============step1") + tdSql.execute("create database udb update 0") + tdSql.execute("use udb") + tdSql.execute("create table t (ts timestamp, a int)") + tdSql.execute("insert into t values (%d, 1)" % (startTs)) + tdSql.execute("insert into t values (%d, 1)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 1)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step2") + tdSql.execute("insert into t values (%d, 2)" % (startTs)) + tdSql.execute("insert into t values (%d, 2)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 2)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step3") + tdSql.execute("insert into t values (%d, 3)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 3)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 1) + tdSql.checkData(6, 0, 3) + + print("==============step4") + tdSql.execute("insert into t values (%d, 4)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 4)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 1) + tdSql.checkData(6, 0, 3) + + print("==============step5") + tdSql.execute("insert into t values (%d, 5)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 5)" % (startTs + 1)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + print("==============step6") + tdSql.execute("insert into t values (%d, 6)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/allow_update.py b/tests/pytest/update/allow_update.py new file mode 100644 index 0000000000000000000000000000000000000000..fa122ff5cf778cca5c72525b9acb56d09c8b2314 --- /dev/null +++ b/tests/pytest/update/allow_update.py @@ -0,0 +1,266 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numOfRecords = 10 + self.ts = 1604295582000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + startTs = self.ts + + tdSql.execute("create database udb update 1") + tdSql.execute("use udb") + tdSql.execute("create table t (ts timestamp, a int)") + + print("==============step1") + tdSql.execute("insert into t values (%d, 1)" % (startTs)) + tdSql.execute("insert into t values (%d, 1)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 1)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step2") + tdSql.execute("insert into t values (%d, 2)" % (startTs)) + tdSql.execute("insert into t values (%d, 2)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 2)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 2) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 2) + + print("==============step3") + tdSql.execute("insert into t values (%d, 3)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 3)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 2) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 2) + tdSql.checkData(6, 0, 3) + + print("==============step4") + tdSql.execute("insert into t values (%d, 4)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 4)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 4) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, 2) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 2) + tdSql.checkData(6, 0, 4) + + print("==============step5") + tdSql.execute("insert into t values (%d, 5)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 5)" % (startTs + 1)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 4) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 2) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 4) + tdSql.checkData(7, 0, 2) + tdSql.checkData(8, 0, 4) + + print("==============step6") + tdSql.execute("insert into t values (%d, 6)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 6) + tdSql.checkData(1, 0, 6) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 6) + tdSql.checkData(4, 0, 6) + tdSql.checkData(5, 0, 6) + tdSql.checkData(6, 0, 6) + tdSql.checkData(7, 0, 6) + tdSql.checkData(8, 0, 6) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 6) + tdSql.checkData(1, 0, 6) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 6) + tdSql.checkData(4, 0, 6) + tdSql.checkData(5, 0, 6) + tdSql.checkData(6, 0, 6) + tdSql.checkData(7, 0, 6) + tdSql.checkData(8, 0, 6) + + tdSql.execute("create table subt (ts timestamp, a int, b float, c binary(16), d bool)") + + print("==============step7") + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c+0')" % (startTs)) + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c-3')" % (startTs - 3)) + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c+3')" % (startTs + 3)) + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(0, 2, 'c-3') + tdSql.checkData(1, 2, 'c+0') + tdSql.checkData(2, 2, 'c+3') + tdSql.checkData(0, 3, None) + tdSql.checkData(1, 3, None) + tdSql.checkData(2, 3, None) + + print("==============step8") + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, true)" % (startTs)) + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, true)" % (startTs - 3)) + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, false)" % (startTs + 3)) + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 1, 2.0) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(0, 3, 1) + tdSql.checkData(1, 3, 1) + tdSql.checkData(2, 3, 0) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 1, 2.0) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(0, 3, 1) + tdSql.checkData(1, 3, 1) + tdSql.checkData(2, 3, 0) + + + + tdSql.execute("create table ct (ts timestamp, a int, b float, c binary(128))") + + print("==============step9") + insertRows = 20000 + for i in range(0, insertRows): + tdSql.execute("insert into ct values (%d , %d, %d, 'aabbccddeeffgghhiijjkkllmmoonn112233445566778899xxyyzz')" % (startTs + i, i, i)) + + tdSql.query("select * from ct") + tdSql.checkRows(insertRows) + + for i in range(0, insertRows): + tdSql.execute("insert into ct values (%d , %d, %d, 'aabbccddeeffgghhiijjkkllmmoonn112233445566778899xxyyzz')" % (startTs + i, i+insertRows, i+insertRows)) + + tdSql.query("select * from ct") + tdSql.checkRows(insertRows) + + tdSql.query("select a,b from ct limit 3") + tdSql.checkData(0, 0, insertRows+0) + tdSql.checkData(1, 0, insertRows+1) + tdSql.checkData(2, 0, insertRows+2) + + tdSql.checkData(0, 1, insertRows+0) + tdSql.checkData(1, 1, insertRows+1) + tdSql.checkData(2, 1, insertRows+2) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_data-0.py b/tests/pytest/update/append_commit_data-0.py new file mode 100644 index 0000000000000000000000000000000000000000..b844a50a086dc52d7edb5250801ee87cf68ee28f --- /dev/null +++ b/tests/pytest/update/append_commit_data-0.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("create table && insert data") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0+i)) + print("==========step2") + print("restart to commit ") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows) + for k in range(0,100): + tdLog.info("insert %d rows" % (insertRows)) + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into db.t1 values(%d,1)' % + (t0+k*200+i) + ) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows+200*k) + print("==========step2") + print("insert into another table ") + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 20000 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d, 1)' % + (t0+i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_data.py b/tests/pytest/update/append_commit_data.py new file mode 100644 index 0000000000000000000000000000000000000000..3169b748e0843c720beb54eced28a14f1ca747a6 --- /dev/null +++ b/tests/pytest/update/append_commit_data.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("create table && insert data") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db update 1' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0+i)) + print("==========step2") + print("restart to commit ") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows) + for k in range(0,100): + tdLog.info("insert %d rows" % (insertRows)) + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into db.t1 values(%d,1)' % + (t0+k*200+i) + ) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows+200*k) + print("==========step2") + print("insert into another table ") + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 20000 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d, 1)' % + (t0+i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_last-0.py b/tests/pytest/update/append_commit_last-0.py new file mode 100644 index 0000000000000000000000000000000000000000..c884207f2bba5dd0da09cf4aae501d27caef7aab --- /dev/null +++ b/tests/pytest/update/append_commit_last-0.py @@ -0,0 +1,90 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1604298064000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use db") + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(10): + tdSql.execute("insert into t1 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(i + 1) + tdSql.query("select sum(a) from t1") + tdSql.checkData(0, 0, i + 1) + + print("==============step2") + tdSql.execute("create table t2 (ts timestamp, a int)") + tdSql.execute("insert into t2 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + for i in range(1, 151): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(151) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 151) + + + print("==============step3") + tdSql.execute("create table t3 (ts timestamp, a int)") + tdSql.execute("insert into t3 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + for i in range(8): + for j in range(1, 11): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i * 10 + j)) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(81) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 81) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_last.py b/tests/pytest/update/append_commit_last.py new file mode 100644 index 0000000000000000000000000000000000000000..013983f9402292d03d26bd998c96eaf39b26a8fd --- /dev/null +++ b/tests/pytest/update/append_commit_last.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1604298064000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database udb update 1") + tdSql.execute("use udb") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(10): + tdSql.execute("insert into t1 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(i + 1) + + + print("==============step2") + tdSql.execute("create table t2 (ts timestamp, a int)") + tdSql.execute("insert into t2 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(1) + + for i in range(1, 151): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(151) + + + print("==============step3") + tdSql.execute("create table t3 (ts timestamp, a int)") + tdSql.execute("insert into t3 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(1) + + for i in range(8): + for j in range(1, 11): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i * 10 + j)) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(81) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data-0.py b/tests/pytest/update/merge_commit_data-0.py new file mode 100644 index 0000000000000000000000000000000000000000..14d435f7f20d9e04565fdb7036da043d948b1dcf --- /dev/null +++ b/tests/pytest/update/merge_commit_data-0.py @@ -0,0 +1,351 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("UPDATE THE WHOLE DATA BLOCK REPEATEDLY") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db days 30' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0 + i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + + for k in range(0,10): + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into t1 values(%d,1)' % + (t0+i) + ) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + print("==========step2") + print("PREPEND DATA ") + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + for i in range(-100,0): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + print("==========step3") + print("PREPEND MASSIVE DATA ") + ret = tdSql.execute('create table t3 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + for i in range(-6000,0): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + print("==========step4") + print("APPEND DATA") + ret = tdSql.execute('create table t4 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + print("==========step5") + print("APPEND DATA") + ret = tdSql.execute('create table t5 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + for i in range(0,6000): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + print("==========step6") + print("UPDATE BLOCK IN TWO STEP") + ret = tdSql.execute('create table t6 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t6 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + for i in range(0,200): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + print("==========step7") + print("UPDATE LAST HALF AND INSERT LITTLE DATA") + ret = tdSql.execute('create table t7 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t7 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + for i in range(100,300): + ret = tdSql.execute( + 'insert into t7 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'400') + print("==========step8") + print("UPDATE LAST HALF AND INSERT MASSIVE DATA") + ret = tdSql.execute('create table t8 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t8 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + for i in range(6000): + ret = tdSql.execute( + 'insert into t8 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'11800') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'11800') + print("==========step9") + print("UPDATE FIRST HALF AND PREPEND LITTLE DATA") + ret = tdSql.execute('create table t9 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t9 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + for i in range(-100,100): + ret = tdSql.execute( + 'insert into t9 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'400') + print("==========step10") + print("UPDATE FIRST HALF AND PREPEND MASSIVE DATA") + ret = tdSql.execute('create table t10 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t10 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + for i in range(-6000,100): + ret = tdSql.execute( + 'insert into t10 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12200') + print("==========step11") + print("UPDATE FIRST HALF AND APPEND MASSIVE DATA") + ret = tdSql.execute('create table t11 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t11 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + for i in range(100): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + for i in range(200,6000): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11800') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11800') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data.py b/tests/pytest/update/merge_commit_data.py new file mode 100644 index 0000000000000000000000000000000000000000..4fb6765361e8099acb0f1f861623a88fd6b8e466 --- /dev/null +++ b/tests/pytest/update/merge_commit_data.py @@ -0,0 +1,351 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("UPDATE THE WHOLE DATA BLOCK REPEATEDLY") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db update 1 days 30' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0 + i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + + for k in range(0,10): + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into t1 values(%d,1)' % + (t0+i) + ) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + print("==========step2") + print("PREPEND DATA ") + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + for i in range(-100,0): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + print("==========step3") + print("PREPEND MASSIVE DATA ") + ret = tdSql.execute('create table t3 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + for i in range(-6000,0): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + print("==========step4") + print("APPEND DATA") + ret = tdSql.execute('create table t4 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + print("==========step5") + print("APPEND DATA") + ret = tdSql.execute('create table t5 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + for i in range(0,6000): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + print("==========step6") + print("UPDATE BLOCK IN TWO STEP") + ret = tdSql.execute('create table t6 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t6 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'300') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'300') + for i in range(0,200): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'400') + print("==========step7") + print("UPDATE LAST HALF AND INSERT LITTLE DATA") + ret = tdSql.execute('create table t7 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t7 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + for i in range(100,300): + ret = tdSql.execute( + 'insert into t7 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'500') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'500') + print("==========step8") + print("UPDATE LAST HALF AND INSERT MASSIVE DATA") + ret = tdSql.execute('create table t8 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t8 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + for i in range(6000): + ret = tdSql.execute( + 'insert into t8 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'12000') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'12000') + print("==========step9") + print("UPDATE FIRST HALF AND PREPEND LITTLE DATA") + ret = tdSql.execute('create table t9 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t9 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + for i in range(-100,100): + ret = tdSql.execute( + 'insert into t9 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'500') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'500') + print("==========step10") + print("UPDATE FIRST HALF AND PREPEND MASSIVE DATA") + ret = tdSql.execute('create table t10 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t10 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + for i in range(-6000,100): + ret = tdSql.execute( + 'insert into t10 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12300') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12300') + print("==========step11") + print("UPDATE FIRST HALF AND APPEND MASSIVE DATA") + ret = tdSql.execute('create table t11 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t11 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + for i in range(100): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + for i in range(200,6000): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11900') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11900') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0fc718ad83244353bf88da905e6ac0ff800cb5 --- /dev/null +++ b/tests/pytest/update/merge_commit_data2.py @@ -0,0 +1,352 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def restart_taosd(self,db): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use %s;" % db) + + def date_to_timestamp_microseconds(self, date): + datetime_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") + obj_stamp = int(time.mktime(datetime_obj.timetuple()) * 1000.0 + datetime_obj.microsecond / 1000.0) + return obj_stamp + + def timestamp_microseconds_to_date(self, timestamp): + d = datetime.datetime.fromtimestamp(timestamp/1000) + str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") + return str1 + + + + def run(self): + print("==========step1") + print("create table && insert data") + sql = 'reset query cache' + tdSql.execute(sql) + sql = 'drop database if exists db' + tdSql.execute(sql) + sql = 'create database db update 1 days 30;' + tdSql.execute(sql) + sql = 'use db;' + tdSql.execute(sql) + tdSql.execute('create table t1 (ts timestamp, a int)') + + + print("==================================1 start") + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i)) + print("==========step2") + print("restart to commit ") + self.restart_taosd('db') + + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + + print("==========step3") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + print('check query result before restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==========step4") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 2)' %(t0+i)) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t1;') + print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 2) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t1;') + # print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 2) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==================================2 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t2 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for k in range(10): + for i in range(10): + tdSql.execute('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + print('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + + + print("==================================3 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t3 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(5200): + tdSql.execute('insert into t3 values (%d , 2)' %(t0+i)) + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t3;') + for i in range(5200): + tdSql.checkData(i, 1, 2) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t3;') + for i in range(5200): + tdSql.checkData(i, 1, 2) + + print("==================================4 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t4 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t4;') + for i in range(100): + tdSql.checkData(i, 1, 2) + for i in range(100, 200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t4;') + for i in range(100): + tdSql.checkData(i, 1, 2) + for i in range(100, 200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================5 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t5 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t5;') + for i in range(100): + tdSql.checkData(i, 1, 1) + for i in range(100, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5100): + tdSql.checkData(i, 1, 1) + for i in range(5100, 5200): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t5;') + for i in range(100): + tdSql.checkData(i, 1, 1) + for i in range(100, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5100): + tdSql.checkData(i, 1, 1) + for i in range(5100, 5200): + tdSql.checkData(i, 1, 2) + + print("==================================6 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t6 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t6 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + + print("==================================7 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t7 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i)) + + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t7 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data2_update0.py b/tests/pytest/update/merge_commit_data2_update0.py new file mode 100644 index 0000000000000000000000000000000000000000..def50e04661b1752668202359eec7dd89df9b6f0 --- /dev/null +++ b/tests/pytest/update/merge_commit_data2_update0.py @@ -0,0 +1,384 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def restart_taosd(self,db): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use %s;" % db) + + def date_to_timestamp_microseconds(self, date): + datetime_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") + obj_stamp = int(time.mktime(datetime_obj.timetuple()) * 1000.0 + datetime_obj.microsecond / 1000.0) + return obj_stamp + + def timestamp_microseconds_to_date(self, timestamp): + d = datetime.datetime.fromtimestamp(timestamp/1000) + str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") + return str1 + + + + def run(self): + print("==========step1") + print("create table && insert data") + sql = 'reset query cache' + tdSql.execute(sql) + sql = 'drop database if exists db' + tdSql.execute(sql) + sql = 'create database db update 0 days 30;' + tdSql.execute(sql) + sql = 'use db;' + tdSql.execute(sql) + tdSql.execute('create table t1 (ts timestamp, a int)') + + + print("==================================1 start") + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i)) + print("==========step2") + print("restart to commit ") + self.restart_taosd('db') + + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + + print("==========step3") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + print('check query result before restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==========step4") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 2)' %(t0+i)) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t1;') + print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t1;') + # print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==================================2 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t2 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for k in range(10): + for i in range(10): + tdSql.execute('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + # print('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + + + print("==================================3 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t3 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(5200): + tdSql.execute('insert into t3 values (%d , 2)' %(t0+i)) + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t3;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t3;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================4 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t4 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t4;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t4;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + # + print("==================================5 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t5 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t5;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t5;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================6 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t6 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t6 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + + print("==================================7 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t7 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i)) + + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t7 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_last-0.py b/tests/pytest/update/merge_commit_last-0.py new file mode 100644 index 0000000000000000000000000000000000000000..8a247f38091467f69c74c57f00341adde0e15992 --- /dev/null +++ b/tests/pytest/update/merge_commit_last-0.py @@ -0,0 +1,309 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1603152000000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use db") + + def run(self): + tdSql.prepare() + + print("==============step 1: UPDATE THE LAST RECORD REPEATEDLY") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(5): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts, i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 0) + + print("==============step 2: UPDATE THE WHOLE LAST BLOCK") + tdSql.execute("create table t2 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 2)" % (self.ts + i)) + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + print("==============step 3: UPDATE PART OF THE LAST BLOCK") + tdSql.execute("create table t3 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + for i in range(25): + tdSql.execute("insert into t3 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + print("==============step 4: UPDATE AND INSERT APPEND AT END OF DATA") + tdSql.execute("create table t4 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t4 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 50) + + for i in range(25): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + for i in range(50, 60): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t4") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 70) + + print("==============step 5: UPDATE AND INSERT PREPEND SOME DATA") + tdSql.execute("create table t5 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t5 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 50) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + for i in range(25): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + for i in range(25, 50): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + + print("==============step 6: INSERT AHEAD A LOT OF DATA") + tdSql.execute("create table t6 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t6 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 0): + tdSql.execute("insert into t6 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + print("==============step 7: INSERT AHEAD A LOT AND UPDATE") + tdSql.execute("create table t7 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t7 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 25): + tdSql.execute("insert into t7 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2050) + + print("==============step 8: INSERT AFTER A LOT AND UPDATE") + tdSql.execute("create table t8 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t8 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 50) + + for i in range(25, 6000): + tdSql.execute("insert into t8 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11950) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11950) + + print("==============step 9: UPDATE ONLY MIDDLE") + tdSql.execute("create table t9 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t9 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + for i in range(20, 30): + tdSql.execute("insert into t9 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + print("==============step 10: A LOT OF DATA COVER THE WHOLE BLOCK") + tdSql.execute("create table t10 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t10 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 50) + + for i in range(-4000, 4000): + tdSql.execute("insert into t10 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 15950) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 15950) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_last.py b/tests/pytest/update/merge_commit_last.py new file mode 100644 index 0000000000000000000000000000000000000000..183cca0a1e40fd995daaed0f271bb5083838a78f --- /dev/null +++ b/tests/pytest/update/merge_commit_last.py @@ -0,0 +1,321 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1603152000000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + + tdSql.execute("create database udb update 1 days 30") + tdSql.execute("use udb") + + print("==============step 1: UPDATE THE LAST RECORD REPEATEDLY") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(5): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts, i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, i) + + print("==============step 2: UPDATE THE WHOLE LAST BLOCK") + tdSql.execute("create table t2 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 2)" % (self.ts + i)) + tdSql.query("select * from t2") + for i in range(50): + tdSql.checkData(i, 1, 2) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 2) + + print("==============step 3: UPDATE PART OF THE LAST BLOCK") + tdSql.execute("create table t3 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(25): + tdSql.execute("insert into t3 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t3") + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + + print("==============step 4: UPDATE AND INSERT APPEND AT END OF DATA") + tdSql.execute("create table t4 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t4 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(25): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + for i in range(50, 60): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t4") + tdSql.checkRows(60) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + for i in range(50, 60): + tdSql.checkData(i, 1, 2) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(60) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + for i in range(50, 60): + tdSql.checkData(i, 1, 2) + + print("==============step 5: UPDATE AND INSERT PREPEND SOME DATA") + tdSql.execute("create table t5 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t5 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + for i in range(25): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 95) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 95) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + for i in range(25, 50): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 155) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 155) + + + print("==============step 6: INSERT AHEAD A LOT OF DATA") + tdSql.execute("create table t6 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t6 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 0): + tdSql.execute("insert into t6 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + print("==============step 7: INSERT AHEAD A LOT AND UPDATE") + tdSql.execute("create table t7 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t7 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 25): + tdSql.execute("insert into t7 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2075) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2075) + + print("==============step 8: INSERT AFTER A LOT AND UPDATE") + tdSql.execute("create table t8 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t8 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 50) + + for i in range(25, 6000): + tdSql.execute("insert into t8 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11975) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11975) + + print("==============step 9: UPDATE ONLY MIDDLE") + tdSql.execute("create table t9 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t9 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + for i in range(20, 30): + tdSql.execute("insert into t9 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 60) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 60) + + print("==============step 10: A LOT OF DATA COVER THE WHOLE BLOCK") + tdSql.execute("create table t10 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t10 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 50) + + for i in range(-4000, 4000): + tdSql.execute("insert into t10 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 16000) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 16000) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 1ac492bb3ad2733c7b2ebb46560edbb7204e8951..757399b4a262dff7b11619791d3c82686fb293e8 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -15,6 +15,7 @@ import sys import os import os.path import subprocess +from time import sleep from util.log import * @@ -210,6 +211,7 @@ class TDDnode: (self.index, self.cfgPath)) def getBuildPath(self): + buildPath = "" selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): @@ -256,6 +258,35 @@ class TDDnode: tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) time.sleep(5) + + def startWithoutSleep(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): if self.valgrind == 0: @@ -425,6 +456,10 @@ class TDDnodes: def start(self, index): self.check(index) self.dnodes[index - 1].start() + + def startWithoutSleep(self, index): + self.check(index) + self.dnodes[index - 1].startWithoutSleep() def stop(self, index): self.check(index) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 9abec354c6d58507ff3bcc74d1c0dc03f691440c..b2ed6212fd643c158f7ed6f4cc6cb2449a512a2e 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -25,7 +25,7 @@ class TDSql: self.queryCols = 0 self.affectedRows = 0 - def init(self, cursor, log=True): + def init(self, cursor, log=False): self.cursor = cursor if (log): diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py new file mode 100644 index 0000000000000000000000000000000000000000..2f4dcd5ce807cf7bbadfa480af6ed6342058a78a --- /dev/null +++ b/tests/pytest/wal/addOldWalTest.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def createOldDir(self): + oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" + os.system("sudo mkdir -p %s" % oldDir) + + def createOldDirAndAddWal(self): + oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" + os.system("sudo echo 'test' >> %s/wal" % oldDir) + + + def run(self): + tdSql.prepare() + + tdSql.execute("create table t1(ts timestamp, a int)") + tdSql.execute("insert into t1 values(now, 1)") + + # create old dir only + self.createOldDir() + os.system("sudo kill -9 $(pgrep taosd)") + tdDnodes.start(1) + + tdSql.execute("use db") + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + # create old dir and add wal under old dir + self.createOldDir() + self.createOldDirAndAddWal() + os.system("sudo kill -9 $(pgrep taosd)") + tdDnodes.start(1) + + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/db/nosuchfile.sim b/tests/script/general/db/nosuchfile.sim new file mode 100644 index 0000000000000000000000000000000000000000..98ac4ec012dc694357878a61ca0dbc11259f0a9e --- /dev/null +++ b/tests/script/general/db/nosuchfile.sim @@ -0,0 +1,66 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect +sleep 3000 + +print ========== step3 +sql create database d1 +sql create table d1.t1 (t timestamp, i int) +sql insert into d1.t1 values(now+1s, 35) +sql insert into d1.t1 values(now+2s, 34) +sql insert into d1.t1 values(now+3s, 33) +sql insert into d1.t1 values(now+4s, 32) +sql insert into d1.t1 values(now+5s, 31) + +print ========== step4 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +sleep 3000 + +print ========== step5 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != 31 then + return -1 +endi +if $data11 != 32 then + return -1 +endi +if $data21 != 33 then + return -1 +endi +if $data31 != 34 then + return -1 +endi +if $data41 != 35 then + return -1 +endi + +print ========== step6 +system_content rm -rf ../../../sim/dnode1/data/vnode/vnode2/tsdb/data + +print ========== step7 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != null then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != null then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != null then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 8d2f1a7c00304c42c91311ae703bcef97aa6ace0..94ecb59f75304d99f48ebdb644be432370f86f2a 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -81,7 +81,7 @@ print =============== step2 - no db #11 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content -if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then +if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","update","status"],"data":[],"rows":0}@ then return -1 endi diff --git a/tests/script/general/insert/basic.sim b/tests/script/general/insert/basic.sim index ba8cff83fa8b470af1b1f7fccf74d0e1d042049e..3f0f25a95ba68c8099c02e6daab933411089a632 100644 --- a/tests/script/general/insert/basic.sim +++ b/tests/script/general/insert/basic.sim @@ -8,8 +8,8 @@ sleep 3000 sql connect $i = 0 -$dbPrefix = tb_in_db -$tbPrefix = tb_in_tb +$dbPrefix = d +$tbPrefix = t $db = $dbPrefix . $i $tb = $tbPrefix . $i @@ -22,28 +22,27 @@ sql create table $tb (ts timestamp, speed int) $x = 0 while $x < 10 - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into $tb values ($ms , $x ) $x = $x + 1 endw print =============== step 2 -sql insert into $tb values (now - 5m , 10) -sql insert into $tb values (now - 6m , 10) -sql insert into $tb values (now - 7m , 10) -sql insert into $tb values (now - 8m , 10) +$x = 0 +while $x < 5 + $cc = $x * 60000 + $ms = 1551481600000 + $cc + + sql insert into $tb values ($ms , $x ) + $x = $x + 1 +endw sql select * from $tb print $rows points data are retrieved -if $rows != 14 then - return -1 -endi - -sql drop database $db -sleep 1000 -sql show databases -if $rows != 0 then +if $rows != 15 then return -1 endi diff --git a/tests/script/general/parser/alter.sim b/tests/script/general/parser/alter.sim index 6b28b20fb87d3983fc93a055505415ffa56ac946..5c15656f246ffa09217a991ca60e127c74353700 100644 --- a/tests/script/general/parser/alter.sim +++ b/tests/script/general/parser/alter.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 3 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = m_alt_db @@ -56,6 +56,7 @@ if $rows != 2 then endi print data03 = $data03 if $data03 != taos then + print expect taos, actual: $data03 return -1 endi sql drop table tb @@ -113,7 +114,7 @@ endi sql drop table tb sql drop table mt -sleep 3000 +sleep 500 ### ALTER TABLE WHILE STREAMING [TBASE271] #sql create table tb1 (ts timestamp, c1 int, c2 nchar(5), c3 int) #sql create table strm as select count(*), avg(c1), first(c2), sum(c3) from tb1 interval(2s) @@ -133,7 +134,7 @@ sleep 3000 # return -1 #endi #sql alter table tb1 drop column c3 -#sleep 6000 +#sleep 3000 #sql insert into tb1 values (now, 2, 'taos') #sleep 30000 #sql select * from strm @@ -144,9 +145,9 @@ sleep 3000 # return -1 #endi #sql alter table tb1 add column c3 int -#sleep 6000 -#sql insert into tb1 values (now, 3, 'taos', 3); #sleep 3000 +#sql insert into tb1 values (now, 3, 'taos', 3); +#sleep 500 #sql select * from strm #if $rows != 3 then # return -1 @@ -185,7 +186,7 @@ sql create database $db sql use $db sql create table mt (ts timestamp, c1 int, c2 nchar(7), c3 int) tags (t1 int) sql create table tb using mt tags(1) -sleep 3000 +sleep 500 sql insert into tb values ('2018-11-01 16:30:00.000', 1, 'insert', 1) sql alter table mt drop column c3 diff --git a/tests/script/general/parser/alter1.sim b/tests/script/general/parser/alter1.sim index 24cf79eae148acaa822974f1d5d681983f72405a..e013242b824a42f9100557f7a9cc267833138e16 100644 --- a/tests/script/general/parser/alter1.sim +++ b/tests/script/general/parser/alter1.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect sql reset query cache @@ -87,7 +87,7 @@ if $data13 != NULL then return -1 endi -sleep 3000 +sleep 500 print ================== insert values into table sql insert into car1 values (now, 1, 1,1 ) (now +1s, 2,2,2,) car2 values (now, 1,3,3) diff --git a/tests/script/general/parser/alter_stable.sim b/tests/script/general/parser/alter_stable.sim index b64c919042296099bf0dbb17eddff67c564a745d..6b3f3a8f53548979f12707cf381d583674114616 100644 --- a/tests/script/general/parser/alter_stable.sim +++ b/tests/script/general/parser/alter_stable.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 3 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ========== alter_stable.sim diff --git a/tests/script/general/parser/auto_create_tb.sim b/tests/script/general/parser/auto_create_tb.sim index 64fec4b56d8a6623cd37884e277e0381192410ba..6deaf92a6c8e713a8fb5281ec2f8e4ca4f2d425f 100644 --- a/tests/script/general/parser/auto_create_tb.sim +++ b/tests/script/general/parser/auto_create_tb.sim @@ -4,7 +4,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start @@ -208,11 +208,11 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 sql use $db #### auto create multiple tables @@ -298,7 +298,7 @@ endi sql create table tu(ts timestamp, k int); sql_error create table txu using tu tags(0) values(now, 1); -#[TBASE-675] +print =================> [TBASE-675] sql insert into tu values(1565971200000, 1) (1565971200000,2) (1565971200001, 3)(1565971200001, 4) sql select * from tu if $rows != 2 then diff --git a/tests/script/general/parser/auto_create_tb_drop_tb.sim b/tests/script/general/parser/auto_create_tb_drop_tb.sim index be334bca4ad14930d363160d8e31ff63c5514637..8a429cf91abff583abd104344ec1ba300ca684bb 100644 --- a/tests/script/general/parser/auto_create_tb_drop_tb.sim +++ b/tests/script/general/parser/auto_create_tb_drop_tb.sim @@ -4,7 +4,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c ctime -v 30 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = db @@ -49,7 +49,7 @@ while $t < $tbNum endw print ====== tables created -sleep 60000 +sleep 500 sql drop table tb2 $x = 0 @@ -59,9 +59,7 @@ while $x < $rowNum $t1 = $t1 . ' sql insert into tb1 using $stb tags( $t1 ) values ( $ts , $x ) $x = $x + 1 -endw - -sleep 6000 +endw $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/binary_escapeCharacter.sim b/tests/script/general/parser/binary_escapeCharacter.sim index c3c867795a30c6e2e87081e59cfbf5f558a60fb7..e9e61f35bfda688fcd735085672be95a0fcf626f 100644 --- a/tests/script/general/parser/binary_escapeCharacter.sim +++ b/tests/script/general/parser/binary_escapeCharacter.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 3 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect sql drop database if exists ecdb diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim index 3208df95e446f5f06494617fa33a71a5f09ab828..d5ba57e6c7dd5befcacb777a1e22f96ba8dae79d 100644 --- a/tests/script/general/parser/col_arithmetic_operation.sim +++ b/tests/script/general/parser/col_arithmetic_operation.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ca_db @@ -34,7 +34,7 @@ while $i < $halfTbNum $tb1 = $tbPrefix . $tbId sql create table $tb using $stb tags( $i ) sql create table $tb1 using $stb tags( $tbId ) - + $x = 0 while $x < $rowNum $xs = $x * $delta @@ -46,8 +46,7 @@ while $i < $halfTbNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $x = $x + 1 endw $i = $i + 1 diff --git a/tests/script/general/parser/columnValue.sim b/tests/script/general/parser/columnValue.sim index e905f612156eae7575c4884210f8fee7b91e7556..2c03a3552a26ff8c64640f908b15fcc5d194f61e 100644 --- a/tests/script/general/parser/columnValue.sim +++ b/tests/script/general/parser/columnValue.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 3 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ========== columnValues.sim diff --git a/tests/script/general/parser/columnValue_bigint.sim b/tests/script/general/parser/columnValue_bigint.sim index e6839b2fb60eadf8d5c284cef99f47f470723378..3546ca15ee15eba485f1868d4bae5f4c545218f9 100644 --- a/tests/script/general/parser/columnValue_bigint.sim +++ b/tests/script/general/parser/columnValue_bigint.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_bool.sim b/tests/script/general/parser/columnValue_bool.sim index 7f3a6c3d6affaefcc2a8d47963ac065c2dc71b25..d68f37590091ab892599d4017a73b6cbb2f28450 100644 --- a/tests/script/general/parser/columnValue_bool.sim +++ b/tests/script/general/parser/columnValue_bool.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_double.sim b/tests/script/general/parser/columnValue_double.sim index 733fc52285a3c90dda50e5bb34a03e51daf4b9ae..fd2da37838cbabea9c6cee2e992a6904df132c3e 100644 --- a/tests/script/general/parser/columnValue_double.sim +++ b/tests/script/general/parser/columnValue_double.sim @@ -1,5 +1,5 @@ #### -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_float.sim b/tests/script/general/parser/columnValue_float.sim index c71b4b40ee0d799c6faa619cd10b765959074f11..019cf176d0e0f455b5f6b61fcabb501806378c96 100644 --- a/tests/script/general/parser/columnValue_float.sim +++ b/tests/script/general/parser/columnValue_float.sim @@ -1,5 +1,5 @@ #### -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_int.sim b/tests/script/general/parser/columnValue_int.sim index 2a909ebf3c64dd487d6d4b1bd5341c59794e8970..1f84df5ca05ef70e95c2db03efe0a670abaff8da 100644 --- a/tests/script/general/parser/columnValue_int.sim +++ b/tests/script/general/parser/columnValue_int.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_smallint.sim b/tests/script/general/parser/columnValue_smallint.sim index cf34a85a2a7d8efb7b1d4e4606cead89055ae3b1..af5c5818717b6c2f01e4726975ceaee4703cdbfb 100644 --- a/tests/script/general/parser/columnValue_smallint.sim +++ b/tests/script/general/parser/columnValue_smallint.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/columnValue_tinyint.sim b/tests/script/general/parser/columnValue_tinyint.sim index 707242a23ed3eecde6c23413e53b67c5daa83a82..3efe52cc91f2ecb1f22000a3f212ec491b9a16c1 100644 --- a/tests/script/general/parser/columnValue_tinyint.sim +++ b/tests/script/general/parser/columnValue_tinyint.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect sql create database if not exists db sql use db diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim index 4d85806b690fafdb71707133f97f8fc78942c2c2..c798bf9d7cc21f2c91c8c14e8debe22ad253b72b 100644 --- a/tests/script/general/parser/commit.sim +++ b/tests/script/general/parser/commit.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxTablesperVnode -v 100 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = sc_db @@ -50,8 +50,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -83,12 +82,12 @@ endw print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 -system sh/exec.sh -n dnode1 -s start sleep 3000 +system sh/exec.sh -n dnode1 -s start +sleep 500 print ================== server restart completed sql connect -sleep 3000 +sleep 500 print ====== select from table and check num of rows returned sql use $db diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim index 7ae496f1ac60ca7c4eb2b143f4d906c0c18e3726..4a8e443281dbaa0892106a8fe91bbdd6d61c3e8a 100644 --- a/tests/script/general/parser/constCol.sim +++ b/tests/script/general/parser/constCol.sim @@ -347,6 +347,17 @@ if $rows != 3 then return -1 endi +sql select 0.1 + 0.2 from t1 +if $rows != 3 then + return -1 +endi + +print =============================> td-2036 +if $data00 != 0.300000000 then + print expect: 0.300000000, actual:$data00 + return -1 +endi + print ======================udc with normal column group by sql_error select from t1 diff --git a/tests/script/general/parser/create_db.sim b/tests/script/general/parser/create_db.sim index ed6e427fe70fe5cfe65852352ac680f645a3a21f..3b7f24b6d900dc1f130f91a138da81fd708b16fe 100644 --- a/tests/script/general/parser/create_db.sim +++ b/tests/script/general/parser/create_db.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start diff --git a/tests/script/general/parser/create_mt.sim b/tests/script/general/parser/create_mt.sim index f21a83067186dc226bd035fe3ff7ba5cc28675c0..e11f32276187d15334193af688b3c50604df2b32 100644 --- a/tests/script/general/parser/create_mt.sim +++ b/tests/script/general/parser/create_mt.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start diff --git a/tests/script/general/parser/create_tb.sim b/tests/script/general/parser/create_tb.sim index 9d1672fdbef136fccb28d38df967d45132c9d40a..609aad2adbc9611d33c595d33191e5b8a26b3359 100644 --- a/tests/script/general/parser/create_tb.sim +++ b/tests/script/general/parser/create_tb.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start diff --git a/tests/script/general/parser/dbtbnameValidate.sim b/tests/script/general/parser/dbtbnameValidate.sim index b46a22228280c73a2cfffa145c9ed71d75dd4957..fd40ecc3f758affa3ca401813132b830a241a049 100644 --- a/tests/script/general/parser/dbtbnameValidate.sim +++ b/tests/script/general/parser/dbtbnameValidate.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect print ========== db name and table name check in create and drop, describe diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index f89c27d71fd8ba02105f502afd4df26a1870ffb5..9851a4e7fcfea026271729cbe7436c47e184df40 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = m_fl_db @@ -116,7 +116,7 @@ if $data81 != 1 then endi # avg_with_fill -print avg_witt_constant_fill +print avg_with_constant_fill sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) if $rows != 9 then return -1 @@ -371,12 +371,10 @@ if $data11 != 99 then endi sql select * from $tb -#print data08 = $data08 if $data08 != NCHAR then + print expect NCHAR, actual:$data08 return -1 endi -#return -1 - # fill_into_nonarithmetic_fieds sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) @@ -853,11 +851,205 @@ endi print =====================>td-1442 sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev); -print =============== clear -sql drop database $db -sql show databases -if $rows != 0 then +print =====================> aggregation + arithmetic + fill +#sql select avg(cpu_taosd) - first(cpu_taosd) from dn1 where ts<'2020-11-13 11:00:00' and ts>'2020-11-13 10:50:00' interval(10s) fill(value, 99) +#sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(value, 99); +#sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(NULL); + +print =====================> td-2060 +sql create table m1 (ts timestamp, k int ) tags(a int); +sql create table if not exists tm0 using m1 tags(1); +sql insert into tm0 values('2020-1-1 1:1:1', 1); +sql insert into tm0 values('2020-1-1 1:1:2', 2); +sql insert into tm0 values('2020-1-1 1:1:3', 3); +sql insert into tm0 values('2020-1-1 1:2:4', 4); +sql insert into tm0 values('2020-1-1 1:2:5', 5); +sql insert into tm0 values('2020-1-1 1:2:6', 6); +sql insert into tm0 values('2020-1-1 1:3:7', 7); +sql insert into tm0 values('2020-1-1 1:3:8', 8); +sql insert into tm0 values('2020-1-1 1:3:9', 9); +sql insert into tm0 values('2020-1-1 1:4:10', 10); + +sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85); +if $rows != 8 then return -1 endi +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data01 != 2.000000000 then + return -1 +endi + +if $data02 != 2.000000000 then + return -1 +endi + +if $data03 != -2.000000000 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != 99.000000000 then + return -1 +endi + +if $data12 != 91.000000000 then + return -1 +endi + +if $data13 != 90.000000000 then + return -1 +endi + +if $data60 != @20-01-01 01:02:00.000@ then + return -1 +endi + +if $data61 != 2.000000000 then + return -1 +endi + +if $data62 != 2.000000000 then + return -1 +endi + +if $data63 != -2.000000000 then + return -1 +endi + +if $data70 != @20-01-01 01:02:10.000@ then + return -1 +endi + +if $data71 != 99.000000000 then + return -1 +endi + +if $data72 != 91.000000000 then + return -1 +endi + +if $data73 != 90.000000000 then + return -1 +endi + +sql select first(k)-avg(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(NULL); +if $rows != 8 then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data01 != -1.000000000 then + return -1 +endi + +if $data02 != -2.000000000 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != NULL then + return -1 +endi + +if $data12 != NULL then + return -1 +endi + +sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) order by ts asc; +if $rows != 21749 then + return -1 +endi + +sql select max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) order by ts asc; +if $rows != 8 then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi +if $data1 +if $data01 != 2.000000000 then + return -1 +endi + +if $data02 != 2.000000000 then + return -1 +endi + +if $data03 != -2.000000000 then + return -1 +endi + +if $data04 != 3 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != 99.000000000 then + return -1 +endi + +if $data12 != 91.000000000 then + return -1 +endi + +if $data13 != 90.000000000 then + return -1 +endi + +if $data14 != 89 then + return -1 +endi + +print ==================> td-2115 +sql select count(*), min(c3)-max(c3) from m_fl_mt0 group by tgcol +if $rows != 10 then + return -1 +endi + +if $data00 != 5 then + return -1 +endi + +if $data01 != -4.000000000 then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data12 != 1 then + return -1 +endi + + +print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/fill_stb.sim b/tests/script/general/parser/fill_stb.sim index f0cd058352803f322647aad338bc84be7fa84b5a..83eb98c465927beb70ce403491dcd100f2556c7d 100644 --- a/tests/script/general/parser/fill_stb.sim +++ b/tests/script/general/parser/fill_stb.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = fl1_db diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim index b597d378a24f3a07d28d3bebc1a6fe403fdf3208..dc8ee8659d6c3b99e8103e3ee0a622a10e7bc7df 100644 --- a/tests/script/general/parser/fill_us.sim +++ b/tests/script/general/parser/fill_us.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = m_fl_db diff --git a/tests/script/general/parser/first_last.sim b/tests/script/general/parser/first_last.sim index 46431b0848c72aefc9b5aaa04f7e13f525dcfe1c..a934d3bcabf09ebe1584c296d6cab2f5c80654d2 100644 --- a/tests/script/general/parser/first_last.sim +++ b/tests/script/general/parser/first_last.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxTablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = first_db @@ -76,11 +76,11 @@ run general/parser/first_last_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/first_last_query.sim diff --git a/tests/script/general/parser/first_last_query.sim b/tests/script/general/parser/first_last_query.sim index d43cd528781cb900f239e4e7549da91ace2d0e5c..a982f10362a46e7c671e8df4674b06c5c9f60b75 100644 --- a/tests/script/general/parser/first_last_query.sim +++ b/tests/script/general/parser/first_last_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = first_db diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index bd0d3c1a12c77570c19ea1ef061395912ad9f93a..19d9ae84cbc02e179c2ab060082507529b2bb608 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = group_db @@ -27,18 +27,25 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) +$half = $tbNum / 2 + $i = 0 -while $i < $tbNum +while $i < $half $tb = $tbPrefix . $i $tg2 = ' . abc $tg2 = $tg2 . ' + + $nextSuffix = $i + $half + $tb1 = $tbPrefix . $nextSuffix + sql create table $tb using $mt tags( $i , $tg2 ) + sql create table $tb1 using $mt tags( $nextSuffix , $tg2 ) $x = 0 while $x < $rowNum @@ -55,7 +62,7 @@ while $i < $tbNum $nchar = $nchar . $c $nchar = $nchar . ' - sql insert into $tb values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + sql insert into $tb values ($tstart , $c , $c , $x , $x , $c , $c , $c , $binary , $nchar ) $tb1 values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $tstart = $tstart + 1 $x = $x + 1 endw @@ -423,65 +430,229 @@ if $data97 != @group_tb0@ then return -1 endi +sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4; +if $rows != 10000 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data01 != @70-01-01 08:01:40.000@ then + return -1 +endi + +if $data02 != @70-01-01 08:01:40.000@ then + return -1 +endi + +if $data03 != 0 then + return -1 +endi + +sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 1; +if $rows != 1 then + return -1 +endi + +sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 20 offset 9990; +if $rows != 10 then + return -1 +endi + +sql select count(*),first(ts),last(ts),min(c3),max(c3),sum(c3),avg(c3),sum(c4)/count(c4) from group_tb1 group by c4; +if $rows != 10000 then + return -1 +endi + print ---------------------------------> group by binary|nchar data add cases +sql select count(*) from group_tb1 group by c8; +if $rows != 100 then + return -1 +endi +sql select count(*),sum(c4), count(c4), sum(c4)/count(c4) from group_tb1 group by c8 +if $rows != 100 then + return -1 +endi -#=========================== group by multi tags ====================== -sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int); -sql create table t1 using st tags(1, 1, 1, 1); -sql create table t2 using st tags(1, 2, 2, 2); -sql insert into t1 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; -sql insert into t1 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; -sql insert into t2 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; -sql insert into t2 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; +if $data00 != 100 then + return -1 +endi -sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; -if $rows != 40 then +if $data01 != 495000 then return -1 endi -if $data01 != 1.000000000 then +if $data02 != 100 then return -1 endi -if $data02 != t1 then + +if $data03 != 4950.000000000 then + print expect 4950.000000000 , acutal $data03 return -1 endi -if $data03 != 1 then + +if $data10 != 100 then return -1 endi -if $data04 != 1 then + +if $data11 != 495100 then return -1 endi -if $data11 != 1.000000000 then +if $data13 != 4951.000000000 then return -1 endi -if $data12 != t1 then + +print ====================> group by normal column + slimit + soffset +sql select count(*), c8 from group_mt0 group by c8 limit 1 offset 0; +if $rows != 100 then return -1 endi -if $data13 != 1 then + +sql select sum(c2),c8,avg(c2), sum(c2)/count(*) from group_mt0 group by c8 slimit 2 soffset 99 +if $rows != 1 then return -1 endi -if $data14 != 1 then + +if $data00 != 79200.000000000 then return -1 endi -sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; -if $rows != 2 then +if $data01 != @binary99@ then return -1 endi -if $data11 != 1.000000000 then +if $data02 != 99.000000000 then return -1 endi -if $data12 != t2 then + +if $data03 != 99.000000000 then return -1 endi -if $data13 != 1 then + +print ============>td-1765 +sql select percentile(c4, 49),min(c4),max(c4),avg(c4),stddev(c4) from group_tb0 group by c8; +if $rows != 100 then + return -1 +endi + +if $data00 != 4851.000000000 then + return -1 +endi + +if $data01 != 0 then return -1 endi -if $data14 != 2 then + +if $data02 != 9900 then return -1 endi +if $data03 != 4950.000000000 then + return -1 +endi + +if $data04 != 2886.607004772 then + return -1 +endi + +if $data10 != 4852.000000000 then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +if $data12 != 9901 then + return -1 +endi + +if $data13 != 4951.000000000 then + return -1 +endi + +if $data14 != 2886.607004772 then + return -1 +endi + +print ================>td-2090 +sql select leastsquares(c2, 1, 1) from group_tb1 group by c8; +if $rows != 100 then + return -1 +endi + +if $data00 != @{slop:0.000000, intercept:0.000000}@ then + return -1 +endi + +if $data10 != @{slop:0.000000, intercept:1.000000}@ then + return -1 +endi + +if $data90 != @{slop:0.000000, intercept:9.000000}@ then + return -1 +endi + +#=========================== group by multi tags ====================== +sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int); +sql create table t1 using st tags(1, 1, 1, 1); +sql create table t2 using st tags(1, 2, 2, 2); +sql insert into t1 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t1 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; + +#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; +#if $rows != 40 then +# return -1 +#endi +# +#if $data01 != 1.000000000 then +# return -1 +#endi +#if $data02 != t1 then +# return -1 +#endi +#if $data03 != 1 then +# return -1 +#endi +#if $data04 != 1 then +# return -1 +#endi +# +#if $data11 != 1.000000000 then +# return -1 +#endi +#if $data12 != t1 then +# return -1 +#endi +#if $data13 != 1 then +# return -1 +#endi +#if $data14 != 1 then +# return -1 +#endi +# +#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; +#if $rows != 2 then +# return -1 +#endi +# +#if $data11 != 1.000000000 then +# return -1 +#endi +#if $data12 != t2 then +# return -1 +#endi +#if $data13 != 1 then +# return -1 +#endi +#if $data14 != 2 then +# return -1 +#endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/import.sim b/tests/script/general/parser/import.sim index 7ab0437685869594daded7fcfd9c886339842221..6da2483738a6d5c805e3a29129183845e64c4f22 100644 --- a/tests/script/general/parser/import.sim +++ b/tests/script/general/parser/import.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = impt_db @@ -64,7 +64,7 @@ sleep 2000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 sql use $db sql select * from tb diff --git a/tests/script/general/parser/import_commit1.sim b/tests/script/general/parser/import_commit1.sim index 197ae5845362f2dbfc9392fe2a57601aec442e22..9c5144a630cbd0df20e8842211c1d3d3e5d5cbb0 100644 --- a/tests/script/general/parser/import_commit1.sim +++ b/tests/script/general/parser/import_commit1.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c ctime -v 30 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ic_db @@ -40,7 +40,7 @@ while $x < $rowNum endw print ====== tables created -sleep 6000 +sleep 3000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit2.sim b/tests/script/general/parser/import_commit2.sim index e400d0c3cb7e347d34b3c75d9ae2b68ffeeb656f..000394386ec169bba4b907969d73878760584eb6 100644 --- a/tests/script/general/parser/import_commit2.sim +++ b/tests/script/general/parser/import_commit2.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c ctime -v 30 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ic_db @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 6000 +sleep 3000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim index 7e7451e689bbe2ac0059b13ba3fa64c530445adf..997a4a22aada1d3f02becd8e02f82c35a41a6175 100644 --- a/tests/script/general/parser/import_commit3.sim +++ b/tests/script/general/parser/import_commit3.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c ctime -v 30 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ic_db @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 6000 +sleep 3000 $ts = $ts + 1 sql insert into $tb values ( $ts , -1, -1, -1, -1, -1) @@ -47,7 +47,7 @@ $ts = $ts0 + $delta $ts = $ts + 1 sql import into $tb values ( $ts , -2, -2, -2, -2, -2) -sleep 6000 +sleep 3000 sql show databases diff --git a/tests/script/general/parser/insert_multiTbl.sim b/tests/script/general/parser/insert_multiTbl.sim index b24c1b629fa371e417826bcb1da67180f3317c29..887f97a198237ff9c181c3d313525ae4fb66aec5 100644 --- a/tests/script/general/parser/insert_multiTbl.sim +++ b/tests/script/general/parser/insert_multiTbl.sim @@ -6,7 +6,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start sleep 2000 sql connect -sleep 3000 +sleep 500 print ======================== dnode1 start sql create database mul_db diff --git a/tests/script/general/parser/insert_tb.sim b/tests/script/general/parser/insert_tb.sim index 4ba455c2446e6fd9804b8e518dc31cacb0781bfc..0a9eb9f678c12de13b33e7dd1ca438c768addf81 100644 --- a/tests/script/general/parser/insert_tb.sim +++ b/tests/script/general/parser/insert_tb.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start @@ -103,7 +103,7 @@ if $rows != 1 then endi sql drop database $db -sleep 1000 +sleep 500 sql create database $db sql use $db sql create table stb1 (ts timestamp, c1 int) tags(t1 int) @@ -136,7 +136,7 @@ if $data21 != 1.000000000 then endi sql drop database $db -sleep 1000 +sleep 500 sql create database $db sql use $db sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 nchar(10), c6 binary(20)) tags(t1 int, t2 bigint, t3 double, t4 float, t5 nchar(10)) diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 1cd857162d9a764519d2edd6f790f9fd1c083b61..0d5c1804dd6cd4e929aaaa4de2e6775cc5b71153 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = intp_db @@ -47,8 +47,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -60,7 +59,7 @@ run general/parser/interp_test.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 8bffae4af634e9c04c4d5f4614d51e1e1b33ffba..42738a8bd134c3b7cc72569fe490285baa694e57 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = intp_db @@ -303,6 +303,8 @@ $tb = $tbPrefix . 0 return -1 endi + print select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from $tb where ts = $ts0 fill(linear) + sql select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from $tb where ts = $ts0 fill(linear) if $rows != 1 then return -1 @@ -338,6 +340,8 @@ $tb = $tbPrefix . 0 return -1 endi # columns contain NULL values + + print select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from intp_tb3 where ts = $ts0 fill(linear) sql select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from intp_tb3 where ts = $ts0 fill(linear) if $rows != 1 then return -1 @@ -380,6 +384,7 @@ $tb = $tbPrefix . 0 endi $t = $tsu + 1000 + print select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from $tb where ts = $t fill(linear) sql select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from $tb where ts = $t fill(linear) if $rows != 0 then return -1 @@ -387,6 +392,7 @@ $tb = $tbPrefix . 0 ## fill(value) $t = $ts0 + 1000 + print 91 sql select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from $tb where ts = $t fill(value, -1, -2) if $rows != 1 then return -1 @@ -456,6 +462,7 @@ $tb = $tbPrefix . 0 if $data09 != nchar0 then return -1 endi + # table has NULL columns sql select interp(ts), interp(c1), interp(c2), interp(c3), interp(c4), interp(c5), interp(c6), interp(c7), interp(c8), interp(c9) from intp_tb3 where ts = $ts0 fill(value, -1, -2, -3) if $rows != 1 then @@ -491,11 +498,14 @@ $tb = $tbPrefix . 0 ##### select interp from stable ## interp(*) from stb + print select interp(*) from $stb where ts = $ts0 sql select interp(*) from $stb where ts = $ts0 if $rows != 1 then return -1 endi $t = $ts0 + 1000 + print 92 + sql select interp(*) from $stb where ts = $t if $rows != 0 then return -1 diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 254571bda103957fbaeaf0e311e7be03b4dcfc35..3ee90cda352002961c131b7e014f8aa6d8155c05 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -7,7 +7,7 @@ system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = join_db @@ -24,7 +24,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db @@ -360,9 +360,7 @@ endi sql select join_mt1.* from join_mt1 print $rows - -$val = 2000 -if $rows != $val then +if $rows != 2000 then return -1 endi @@ -444,6 +442,9 @@ if $rows != $val then return -1 endi +#=============================================================== +sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb0.c7 = true + #====================group by========================================= diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index 51f1ef11c7fc9f8cfff60ebe86ad00104266e7ad..1c901dd2e32b3950865510176d7cc4383a41fe97 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -6,7 +6,7 @@ system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sql connect -sleep 1000 +sleep 500 $dbPrefix = join_m_db $tbPrefix = join_tb @@ -22,7 +22,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db @@ -132,4 +132,239 @@ sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbna sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbname from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1 limit 1 +#1970-01-01 08:01:40.800 | 10 | 45.000000000 | 0 | true | false | 0 | +#1970-01-01 08:01:40.790 | 10 | 945.000000000 | 90 | true | true | 0 | +sql_error select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc limit 20 offset 19; + +sql select count(join_mt0.c1), sum(join_mt0.c2)/count(*), avg(c2), first(join_mt0.c5), last(c7) from join_mt0 interval(10a) group by join_mt0.t1 order by join_mt0.ts desc; +if $rows != 300 then + return -1 +endi + +if $data00 != @70-01-01 08:01:40.990@ then + print expect 0, actual: $data00 + return -1 +endi + +if $data01 != 10 then + return -1 +endi + +if $data02 != 94.500000000 then + print expect 94.500000000, actual $data02 + return -1 +endi + +if $data03 != 94.500000000 then + return -1 +endi + +if $data04 != 90 then + return -1 +endi + +if $data05 != 1 then + return -1 +endi + +if $data06 != 0 then + return -1 +endi + +if $data10 != @70-01-01 08:01:40.980@ then + print expect 70-01-01 08:01:40.980, actual: $data10 + return -1 +endi + +if $data11 != 10 then + return -1 +endi + +if $data12 != 84.500000000 then + print expect 84.500000000, actual $data12 + return -1 +endi + +if $data13 != 84.500000000 then + return -1 +endi + +if $data14 != 80 then + return -1 +endi + +if $data15 != 1 then + return -1 +endi + +if $data16 != 0 then + return -1 +endi + +# this function will cause shell crash +sql_error select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc; +sql_error select last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10m) group by join_mt0.t1 order by join_mt0.ts asc; +sql_error select count(join_mt0.c1), first(join_mt0.c1)-last(join_mt1.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); + +print ================================> TD-2152 +sql_error select join_mt1.c1,join_mt0.c1 from join_mt1,join_mt0 where join_mt1.ts = join_mt0.ts and join_mt1.t1 = join_mt0.t1 order by t; + +print =================================> add result check +sql select count(join_mt0.c1), first(join_mt0.c1)/count(*), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); +sql select count(join_mt0.c1), first(join_mt0.c1)-last(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); +sql select last(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); + +sql create database disorder_db; +sql use disorder_db; +sql create table m1(ts timestamp, k int) tags(a int); +sql create table tm0 using m1 tags(0); +sql create table tm1 using m1 tags(1); +sql create table tm2 using m1 tags(2); +sql create table tm3 using m1 tags(3); +sql create table tm4 using m1 tags(4); +sql create table tm5 using m1 tags(5); +sql create table tm6 using m1 tags(6); +sql create table tm7 using m1 tags(7); + +sql show vgroups +if $rows != 2 then + print maxTablesPerVnode set to 4 is not active. + return -1 +endi + +sql insert into tm0 values('2020-1-1 1:1:1', 0); +sql insert into tm1 values('2020-1-1 1:1:1', 1); +sql insert into tm2 values('2020-1-1 1:1:1', 2); +sql insert into tm3 values('2020-1-1 1:1:1', 3); +sql insert into tm4 values('2020-1-1 1:1:1', 4); +sql insert into tm5 values('2020-1-1 1:1:1', 5); +sql insert into tm6 values('2020-1-1 1:1:1', 6); +sql insert into tm7 values('2020-1-1 1:1:1', 7); + +sql create table m2(ts timestamp, k int) tags(b int); +sql create table t0 using m2 tags(0); +sql create table t1 using m2 tags(4); +sql create table t2 using m2 tags(92); +sql create table t3 using m2 tags(93); +sql create table t4 using m2 tags(1); +sql create table t5 using m2 tags(5); +sql create table t6 using m2 tags(96); +sql create table t7 using m2 tags(97); + +sql show vgroups +if $rows != 4 then + return -1 +endi + +sql insert into t0 values('2020-1-1 1:1:1', 10); +sql insert into t1 values('2020-1-1 1:1:1', 11); +sql insert into t2 values('2020-1-1 1:1:1', 12); +sql insert into t3 values('2020-1-1 1:1:1', 13); +sql insert into t4 values('2020-1-1 1:1:1', 14); +sql insert into t5 values('2020-1-1 1:1:1', 15); +sql insert into t6 values('2020-1-1 1:1:1', 16); +sql insert into t7 values('2020-1-1 1:1:1', 17); + +sql select m1.ts,m1.tbname,m1.a, m2.ts,m2.tbname,m2.b from m1,m2 where m1.a=m2.b and m1.ts=m2.ts; +if $rows != 4 then + return -1 +endi + +if $data00 != @20-01-01 01:01:01.000@ then + print expect 20-01-01 01:01:01.000, actual:$data00 + return -1 +endi + +if $data01 != @tm0@ then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data03 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data04 != @t0@ then + return -1 +endi + +if $data05 != 0 then + return -1 +endi + +if $data10 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data11 != @tm1@ then + return -1 +endi + +if $data12 != 1 then + return -1 +endi + +if $data13 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data14 != @t4@ then + return -1 +endi + +if $data15 != 1 then + return -1 +endi + +if $data20 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data21 != @tm4@ then + return -1 +endi + +if $data22 != 4 then + return -1 +endi + +if $data23 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data24 != @t1@ then + return -1 +endi + +if $data25 != 4 then + return -1 +endi + +if $data30 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data31 != @tm5@ then + return -1 +endi + +if $data32 != 5 then + return -1 +endi + +if $data33 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data34 != @t5@ then + return -1 +endi + +if $data35 != 5 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 6321823fe2f75854beeab091663d268876e373b9..cc71123a77e7e48c491d8c1450addb55e243bdd6 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lr_db @@ -62,11 +62,11 @@ run general/parser/lastrow_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/lastrow_query.sim diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index f81a48d5b2d61ddf829920201dd01f5dd1ee44ba..5b9c8b60c3f62b48159d13f5cb82ccffd32d0feb 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lr_db diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index d8c72349db9ebf40e038a921ea7534e6a1a87e23..fb5e704bf1ba3bab457dae2dc06a23628fde316c 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lm_db @@ -62,11 +62,11 @@ run general/parser/limit_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/limit_tb.sim run general/parser/limit_stb.sim diff --git a/tests/script/general/parser/limit1.sim b/tests/script/general/parser/limit1.sim index 8b927b2a7fe599e5e51392e8577d3dffb112ea57..7236421ec2eb1617c7c16b5d659b9b57f300559a 100644 --- a/tests/script/general/parser/limit1.sim +++ b/tests/script/general/parser/limit1.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lm1_db @@ -48,8 +48,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -62,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit1_stb.sim b/tests/script/general/parser/limit1_stb.sim index fbcd8d09656b9c1b5f515c2224fbca8c5e8f324c..d5846adc45327e6faa7b2bd7cdd10b44ea6fc094 100644 --- a/tests/script/general/parser/limit1_stb.sim +++ b/tests/script/general/parser/limit1_stb.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lm1_db diff --git a/tests/script/general/parser/limit1_tb.sim b/tests/script/general/parser/limit1_tb.sim index a9484d10dbaaad1c8c08b2ae54caf85ad186207e..1e473eb8580965b6e777c7fe09cbc607af8938a8 100644 --- a/tests/script/general/parser/limit1_tb.sim +++ b/tests/script/general/parser/limit1_tb.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lm1_db diff --git a/tests/script/general/parser/limit1_tblocks100.sim b/tests/script/general/parser/limit1_tblocks100.sim index f1702924aaee3091c7ac9e4229b36ec11c141123..9a123e645ce070f5146d2507b90e16b06d258c4d 100644 --- a/tests/script/general/parser/limit1_tblocks100.sim +++ b/tests/script/general/parser/limit1_tblocks100.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lm1_db @@ -48,8 +48,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -62,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2.sim b/tests/script/general/parser/limit2.sim index d22c786aa3001e6f7e275466bd5426b7e275b53c..47c3eb6d08ca1f01ded0b8e4597f91d0cd2f1933 100644 --- a/tests/script/general/parser/limit2.sim +++ b/tests/script/general/parser/limit2.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c rowsInFileBlock -v 255 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lm2_db @@ -69,7 +69,7 @@ print ====== tables created print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 8294247a86415bcd90c692a8765475fbb6ecdd6b..f9a1dd8e4b51107d6713d52f850f792f339b3fa4 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lm2_db diff --git a/tests/script/general/parser/limit2_tblocks100.sim b/tests/script/general/parser/limit2_tblocks100.sim index 64f86edd28092bc816b663719c0ab153ff1181e0..1aaa8e885a21b4db79c28645029a015d12a24fb5 100644 --- a/tests/script/general/parser/limit2_tblocks100.sim +++ b/tests/script/general/parser/limit2_tblocks100.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c rowsInFileBlock -v 255 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = lm2_db @@ -69,7 +69,7 @@ print ====== tables created print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit_stb.sim b/tests/script/general/parser/limit_stb.sim index c9df03ab2efe220dc9a5bf5707cb83f81178e4d2..b41b7b726bc24b83f64cecbec8ac0eb83afa57ea 100644 --- a/tests/script/general/parser/limit_stb.sim +++ b/tests/script/general/parser/limit_stb.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lm_db diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim index 970c7b75c5683e8ece8ce3c7b5458097a0ec3973..b917627fdf6499fe2dacf948af7badb2b5de43d7 100644 --- a/tests/script/general/parser/limit_tb.sim +++ b/tests/script/general/parser/limit_tb.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = lm_db diff --git a/tests/script/general/parser/mixed_blocks.sim b/tests/script/general/parser/mixed_blocks.sim index 41082bb1441a111602a127b3dd8e9eb6d0420e51..d3558560df95d02256e0bc6b4a04ae9fae6e5890 100644 --- a/tests/script/general/parser/mixed_blocks.sim +++ b/tests/script/general/parser/mixed_blocks.sim @@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = mb_db diff --git a/tests/script/general/parser/nchar.sim b/tests/script/general/parser/nchar.sim index bdac5ace55a0c5feeb3413eea13a8a7b2c13f7e4..ab4ed2607a062ed32ebe52527d30705aa135aa86 100644 --- a/tests/script/general/parser/nchar.sim +++ b/tests/script/general/parser/nchar.sim @@ -5,7 +5,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== dnode1 start diff --git a/tests/script/general/parser/null_char.sim b/tests/script/general/parser/null_char.sim index 2e39fc7db2e20f9528c2d62458f3a6a625181c73..7b1c81a2958c36acbe96641494a8c156dabb275d 100644 --- a/tests/script/general/parser/null_char.sim +++ b/tests/script/general/parser/null_char.sim @@ -6,7 +6,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 3 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ========== NULL_char.sim @@ -251,7 +251,7 @@ endi ################### nchar sql alter table st41 set tag tag_nchar = "��˼����" sql select tag_binary, tag_nchar, tag_int, tag_bool, tag_float, tag_double from st41 -#sleep 1000 +#sleep 500 #if $data01 != ��˼���� then # print ==== expect ��˼����, actually $data01 # return -1 diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim index fbff99d58f5b6355863b90172c2fb14c1f2ba393..bc22bd6da9194d9798f4744dea84d999beaf6a43 100644 --- a/tests/script/general/parser/projection_limit_offset.sim +++ b/tests/script/general/parser/projection_limit_offset.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = group_db @@ -21,18 +21,26 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) $i = 0 -while $i < $tbNum +$half = $tbNum / 2 + +while $i < $half $tb = $tbPrefix . $i $tg2 = ' . abc $tg2 = $tg2 . ' + + $tbId = $i + $half + + $tb1 = $tbPrefix . $tbId + sql create table $tb using $mt tags( $i , $tg2 ) + sql create table $tb1 using $mt tags( $i , $tg2 ) $x = 0 while $x < $rowNum @@ -49,7 +57,7 @@ while $i < $tbNum $nchar = $nchar . $c $nchar = $nchar . ' - sql insert into $tb values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + sql insert into $tb values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $tb1 values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $tstart = $tstart + 1 $x = $x + 1 endw @@ -324,8 +332,22 @@ sql create table tm0 using m1 tags(1); sql create table tm1 using m1 tags(2); sql insert into tm0 values(10000, 1) (20000, 2)(30000, 3) (40000, NULL) (50000, 2) tm1 values(10001, 2)(20000,4)(90000,9); -sql select count(*),first(k),last(k) from m1 where tbname in ('tm0') interval(1s) order by ts desc; +#=============================tbase-1205 +sql select count(*) from tm1 where ts= now -1d interval(1h) fill(NULL); + +print ===================>TD-1834 +sql select * from tm0 where ts>11000 and ts< 20000 order by ts asc +if $rows != 0 then + return -1 +endi + +sql select * from tm0 where ts>11000 and ts< 20000 order by ts desc +if $rows != 0 then + return -1 +endi + +sql select count(*),first(k),last(k) from m1 where tbname in ('tm0') interval(1s) order by ts desc; if $row != 5 then return -1 endi @@ -386,7 +408,25 @@ sql_error select k+1,sum(k) from tm0; sql_error select k, sum(k) from tm0; sql_error select k, sum(k)+1 from tm0; +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 3000 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + #=============================tbase-1205 sql select count(*) from tm1 where ts= now -1d interval(1h) fill(NULL); +print ===================>TD-1834 +sql select * from tm0 where ts>11000 and ts< 20000 order by ts asc +if $rows != 0 then + return -1 +endi + +sql select * from tm0 where ts>11000 and ts< 20000 order by ts desc +if $rows != 0 then + return -1 +endi + + diff --git a/tests/script/general/parser/selectResNum.sim b/tests/script/general/parser/selectResNum.sim index 42cedc034b7f000eea4b75b156a847a4d1667c5b..464f363222b2d135b68d7e41d18ec33d71d843d2 100644 --- a/tests/script/general/parser/selectResNum.sim +++ b/tests/script/general/parser/selectResNum.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 200 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = sc_db @@ -50,8 +50,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -119,12 +118,12 @@ endw print ====== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 6000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ====== server restart completed -sleep 3000 +sleep 500 sql connect -sleep 3000 +sleep 500 sql use $db ##### repeat test after server restart diff --git a/tests/script/general/parser/select_across_vnodes.sim b/tests/script/general/parser/select_across_vnodes.sim index ac3a8f2b2b3bd04681a20e070aaf2aba58447b26..44e5576dac11d9f8ebffb9eee8f067bab86344c1 100644 --- a/tests/script/general/parser/select_across_vnodes.sim +++ b/tests/script/general/parser/select_across_vnodes.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = sav_db diff --git a/tests/script/general/parser/select_from_cache_disk.sim b/tests/script/general/parser/select_from_cache_disk.sim index 4fdfa7b55b88372d5497f399c5c821f10743f4d5..0fa0848144c740c6ed8c7583cbca46816d3da6bc 100644 --- a/tests/script/general/parser/select_from_cache_disk.sim +++ b/tests/script/general/parser/select_from_cache_disk.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = scd_db @@ -35,11 +35,11 @@ sql insert into $tb values ('2018-09-17 09:00:00.030', 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 sql use $db # generate some data in cache diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 68d145a5f277464a224ccf0b3c92767a48e26fb6..dab76f60044c7b587e6c98bb256f895aeedb656e 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = select_tags_db diff --git a/tests/script/general/parser/set_tag_vals.sim b/tests/script/general/parser/set_tag_vals.sim index 38af57c73d681fb0ed40212a967c741b0431ebad..bf29fe3902f88ae557b53068d1678a0aa1288818 100644 --- a/tests/script/general/parser/set_tag_vals.sim +++ b/tests/script/general/parser/set_tag_vals.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = db diff --git a/tests/script/general/parser/single_row_in_tb.sim b/tests/script/general/parser/single_row_in_tb.sim index 4305ae1b5d0d6fa1b308885263d1ac9947af3101..fe1edb1f738a01831ad3e926e6b1a925b0c05dcc 100644 --- a/tests/script/general/parser/single_row_in_tb.sim +++ b/tests/script/general/parser/single_row_in_tb.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = sr_db @@ -32,7 +32,7 @@ run general/parser/single_row_in_tb_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/single_row_in_tb_query.sim b/tests/script/general/parser/single_row_in_tb_query.sim index a1ae70ec814d0babf00dbf5051da45c5cd7daf94..9e90b91220dae51bf31c9f3dde69eb1404223e88 100644 --- a/tests/script/general/parser/single_row_in_tb_query.sim +++ b/tests/script/general/parser/single_row_in_tb_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = sr_db diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim index f85211beb83e575e2a73518d89f4b7d989486f83..4283421169d146a19a7a46eed1c1bd819f77464d 100644 --- a/tests/script/general/parser/sliding.sim +++ b/tests/script/general/parser/sliding.sim @@ -4,8 +4,9 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = sliding_db @@ -26,9 +27,9 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: -sql create database if not exists $db maxtables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) @@ -169,19 +170,20 @@ if $data94 != 90 then endi sql select count(c2),last(c4) from sliding_tb0 interval(30s) sliding(10s) order by ts asc; -if $row != 30 then +if $row != 32 then return -1 endi -if $data00 != @00-01-01 00:00:00.000@ then +if $data00 != @99-12-31 23:59:40.000@ then + print expect 12-31 23:59:40.000, actual: $data00 return -1 endi -if $data01 != 1000 then +if $data01 != 334 then return -1 endi -if $data02 != 99 then +if $data02 != 33 then return -1 endi @@ -304,11 +306,11 @@ if $data13 != 9.810708435 then endi sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 1; -if $row != 14 then +if $row != 15 then return -1 endi -if $data00 != @00-01-01 00:00:20.000@ then +if $data00 != @00-01-01 00:00:00.000@ then return -1 endi @@ -316,7 +318,7 @@ if $data01 != 1000 then return -1 endi -if $data02 != 66 then +if $data02 != 99 then return -1 endi @@ -324,7 +326,7 @@ if $data03 != 28.866070048 then return -1 endi -if $data90 != @00-01-01 00:03:20.000@ then +if $data90 != @00-01-01 00:03:00.000@ then return -1 endi @@ -332,113 +334,158 @@ if $data91 != 1000 then return -1 endi -if $data92 != 66 then +if $data92 != 99 then return -1 endi -sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 14; -if $row != 1 then +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 5; +if $row != 11 then return -1 endi -sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) order by ts desc; +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 6; if $row != 10 then return -1 endi -#00-01-01 00:04:30.000| 10| 0| 0.000000000| 0.000000000| -if $data00 != @00-01-01 00:04:30.000@ then +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 7; +if $row != 9 then return -1 endi -if $data01 != 10 then +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 8; +if $row != 8 then return -1 endi -if $data02 != 0 then +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 9; +if $row != 7 then return -1 endi -if $data03 != 0.000000000 then +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 10; +if $row != 6 then return -1 endi -sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 15; +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 11; +if $row != 5 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 12; +if $row != 4 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 13; +if $row != 3 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 14; +if $row != 2 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 15; +if $row != 1 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 16; if $row != 0 then return -1 endi -sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts asc limit 1000; -if $row != 100 then +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) order by ts desc; +if $row != 10 then return -1 endi -if $data00 != @00-01-01 00:00:00.000@ then +#00-01-01 00:04:30.000| 10| 0| 0.000000000| 0.000000000| +if $data00 != @00-01-01 00:04:30.000@ then return -1 endi -if $data02 != 9.521904571 then +if $data01 != 10 then return -1 endi -if $data05 != 33 then +if $data02 != 0 then return -1 endi -if $data10 != @00-01-01 00:00:00.010@ then +if $data03 != 0.000000000 then return -1 endi -if $data12 != 9.521904571 then +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 15; +if $row != 1 then return -1 endi -if $data15 != 33 then +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 16; +if $row != 0 then return -1 endi -if $data95 != 33 then +sql select count(c2), first(c3),stddev(c4) from sliding_tb0 interval(10a) order by ts desc limit 10 offset 2; +if $data00 != @00-01-01 00:04:59.910@ then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10s) order by ts asc limit 1000; +if $row != 3 then return -1 endi -sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts desc limit 1000; -if $row != 100 then +if $data00 != @99-12-31 23:59:40.000@ then return -1 endi -if $data00 != @00-01-01 00:00:00.990@ then +if $data02 != 9.521904571 then return -1 endi -if $data01 != 1 then +if $data05 != 33 then + return -1 +endi + +if $data10 != @99-12-31 23:59:50.000@ then + return -1 +endi + +if $data12 != 9.521904571 then return -1 endi -if $data02 != 0.000000000 then +if $data15 != 33 then return -1 endi -if $data03 != 1 then +if $data25 != 33 then return -1 endi -if $data90 != @00-01-01 00:00:00.900@ then +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10s) order by ts desc limit 1000; +if $row != 1 then return -1 endi -if $data91 != 4 then +if $data00 != @99-12-31 23:59:40.000@ then return -1 endi -if $data92 != 1.118033989 then +if $data01 != 33 then return -1 endi -if $data93 != 4 then +if $data02 != 9.521904571 then return -1 endi -if $data94 != 30.00000 then +if $data03 != 33 then return -1 endi @@ -457,5 +504,7 @@ sql_error select sum(c1) from sliding_tb0 interval(0) sliding(0); sql_error select sum(c1) from sliding_tb0 interval(0m) sliding(0m); sql_error select sum(c1) from sliding_tb0 interval(m) sliding(m); sql_error select sum(c1) from sliding_tb0 sliding(4m); +sql_error select count(*) from sliding_tb0 interval(1s) sliding(10s); +sql_error select count(*) from sliding_tb0 interval(10s) sliding(10a); system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/slimit.sim b/tests/script/general/parser/slimit.sim index edbf0c8cc0499d3cb40e0c7696d278ebcf9ba14a..f7a23019cf2e1bcdca7540dd4acd997f24d31e69 100644 --- a/tests/script/general/parser/slimit.sim +++ b/tests/script/general/parser/slimit.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = slm_db @@ -97,11 +97,11 @@ run general/parser/slimit_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/slimit_query.sim diff --git a/tests/script/general/parser/slimit1.sim b/tests/script/general/parser/slimit1.sim index 9e26a2882a5b013aa804f01c1dd8e339d17d54db..7a2511eb76b2a0d1289c509876375a2a6f06419c 100644 --- a/tests/script/general/parser/slimit1.sim +++ b/tests/script/general/parser/slimit1.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = slm_alt_tg_db @@ -56,11 +56,11 @@ run general/parser/slimit1_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/slimit1_query.sim diff --git a/tests/script/general/parser/slimit1_query.sim b/tests/script/general/parser/slimit1_query.sim index 617ccb3fd9a13b1392510c4287d11f69df5dde4e..c205d456892d79f7a9dd8fa1ec877c04afac49ef 100644 --- a/tests/script/general/parser/slimit1_query.sim +++ b/tests/script/general/parser/slimit1_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = slm_alt_tg_db diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim index eccd4a6815a5516afe5ed28dc8ec067737f18d01..1072f9ccb46c69ab1156783cbcdeb8940afb9aa4 100644 --- a/tests/script/general/parser/slimit_alter_tags.sim +++ b/tests/script/general/parser/slimit_alter_tags.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = slm_alt_tg_db @@ -171,11 +171,11 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 sql use $db ### repeat above queries diff --git a/tests/script/general/parser/slimit_query.sim b/tests/script/general/parser/slimit_query.sim index 655bc5f8beab0209428a4796ba83ba10d8b3afde..3020f8047217dfbd9f39b10b5b74b9280faa4737 100644 --- a/tests/script/general/parser/slimit_query.sim +++ b/tests/script/general/parser/slimit_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = slm_db diff --git a/tests/script/general/parser/stream_on_sys.sim b/tests/script/general/parser/stream_on_sys.sim index 5507b4db484006fc8ae7887a47bcd83df5208c50..1c8eb82c79a537ddbb322bdb164541263d92373c 100644 --- a/tests/script/general/parser/stream_on_sys.sim +++ b/tests/script/general/parser/stream_on_sys.sim @@ -6,7 +6,7 @@ system sh/cfg.sh -n dnode1 -c monitor -v 1 system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect print ======================== stream_on_sys.sim @@ -22,12 +22,12 @@ $i = 0 sql use $db -sql create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s) sliding(2s) -sql create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s) sliding(2s) -sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s) sliding(2s) -sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) sliding(2s) -sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) sliding(2s) -sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sliding(2s) +sql create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s) +sql create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s) +sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s) +sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) +sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) +sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sleep 120000 sql select * from cpustrm if $rows <= 0 then diff --git a/tests/script/general/parser/tags_dynamically_specifiy.sim b/tests/script/general/parser/tags_dynamically_specifiy.sim index 07bf4d8dd1cae6b0b374b9b67b1dca58f218fb3f..8303a9c86d610476d6dba164e0af3e6a9199b1da 100644 --- a/tests/script/general/parser/tags_dynamically_specifiy.sim +++ b/tests/script/general/parser/tags_dynamically_specifiy.sim @@ -3,9 +3,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect -sleep 3000 +sleep 500 $db = dytag_db $tbNum = 10 diff --git a/tests/script/general/parser/tags_filter.sim b/tests/script/general/parser/tags_filter.sim index 9842b4fda6304314a36eb06078fb7cb64e2c9c5c..c3d0fdfc61d2d9ec8aa19100511577652076d91d 100644 --- a/tests/script/general/parser/tags_filter.sim +++ b/tests/script/general/parser/tags_filter.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $db = tf_db diff --git a/tests/script/general/parser/tbnameIn.sim b/tests/script/general/parser/tbnameIn.sim index fd5f32972ad9e0d0f89875d1f89d2734fd2cbdd8..d0f74ae53d5c696cb598b471dfae32307dfd0f73 100644 --- a/tests/script/general/parser/tbnameIn.sim +++ b/tests/script/general/parser/tbnameIn.sim @@ -3,7 +3,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ti_db @@ -55,8 +55,7 @@ while $i < $halfNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) + sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) $x = $x + 1 endw @@ -68,7 +67,7 @@ run general/parser/tbnameIn_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index 37cc1aa8245a22e698c1d756cfe1eac72fcf3fdb..ad7456f5578ccec76eef3262298130aaed2462c6 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = ti_db diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 3dd80b8e38c8144fee218ec372e59fe262ac7a15..8593400ce8c3d6ae5375993b0fcc1d7c36b86ca8 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,113 +1,112 @@ -sleep 2000 run general/parser/alter.sim -sleep 2000 +sleep 500 run general/parser/alter1.sim -sleep 2000 +sleep 500 run general/parser/alter_stable.sim -sleep 2000 +sleep 500 run general/parser/auto_create_tb.sim -sleep 2000 +sleep 500 run general/parser/auto_create_tb_drop_tb.sim -sleep 2000 +sleep 500 run general/parser/col_arithmetic_operation.sim -sleep 2000 +sleep 500 run general/parser/columnValue.sim -sleep 2000 +sleep 500 run general/parser/commit.sim -sleep 2000 +sleep 500 run general/parser/create_db.sim -sleep 2000 +sleep 500 run general/parser/create_mt.sim -sleep 2000 +sleep 500 run general/parser/create_tb.sim -sleep 2000 +sleep 500 run general/parser/dbtbnameValidate.sim -sleep 2000 +sleep 500 run general/parser/fill.sim -sleep 2000 +sleep 500 run general/parser/fill_stb.sim -sleep 2000 +sleep 500 #run general/parser/fill_us.sim # -sleep 2000 +sleep 500 run general/parser/first_last.sim -sleep 2000 +sleep 500 run general/parser/import_commit1.sim -sleep 2000 +sleep 500 run general/parser/import_commit2.sim -sleep 2000 +sleep 500 run general/parser/import_commit3.sim -sleep 2000 +sleep 500 #run general/parser/import_file.sim -sleep 2000 +sleep 500 run general/parser/insert_tb.sim -sleep 2000 +sleep 500 run general/parser/tags_dynamically_specifiy.sim -sleep 2000 +sleep 500 run general/parser/interp.sim -sleep 2000 +sleep 500 run general/parser/lastrow.sim -sleep 2000 +sleep 500 run general/parser/limit.sim -sleep 2000 +sleep 500 run general/parser/limit1.sim -sleep 2000 +sleep 500 run general/parser/limit1_tblocks100.sim -sleep 2000 +sleep 500 run general/parser/limit2.sim -sleep 2000 +sleep 500 run general/parser/mixed_blocks.sim -sleep 2000 +sleep 500 run general/parser/nchar.sim -sleep 2000 +sleep 500 run general/parser/null_char.sim -sleep 2000 +sleep 500 run general/parser/selectResNum.sim -sleep 2000 +sleep 500 run general/parser/select_across_vnodes.sim -sleep 2000 +sleep 500 run general/parser/select_from_cache_disk.sim -sleep 2000 +sleep 500 run general/parser/set_tag_vals.sim -sleep 2000 +sleep 500 run general/parser/single_row_in_tb.sim -sleep 2000 +sleep 500 run general/parser/slimit.sim -sleep 2000 +sleep 500 run general/parser/slimit1.sim -sleep 2000 +sleep 500 run general/parser/slimit_alter_tags.sim -sleep 2000 +sleep 500 run general/parser/tbnameIn.sim -sleep 2000 +sleep 500 run general/parser/slimit_alter_tags.sim # persistent failed -sleep 2000 +sleep 500 run general/parser/join.sim -sleep 2000 +sleep 500 run general/parser/join_multivnode.sim -sleep 2000 +sleep 500 run general/parser/projection_limit_offset.sim -sleep 2000 +sleep 500 run general/parser/select_with_tags.sim -sleep 2000 +sleep 500 run general/parser/groupby.sim -sleep 2000 +sleep 500 run general/parser/tags_filter.sim -sleep 2000 +sleep 500 run general/parser/topbot.sim -sleep 2000 +sleep 500 run general/parser/union.sim -sleep 2000 +sleep 500 run general/parser/constCol.sim -sleep 2000 +sleep 500 run general/parser/where.sim -sleep 2000 +sleep 500 run general/parser/timestamp.sim -sleep 2000 +sleep 500 run general/parser/sliding.sim -#sleep 2000 +#sleep 500 #run general/parser/repeatStream.sim -#sleep 2000 +#sleep 500 #run general/parser/stream_on_sys.sim -#sleep 2000 +#sleep 500 #run general/parser/stream.sim \ No newline at end of file diff --git a/tests/script/general/parser/timestamp.sim b/tests/script/general/parser/timestamp.sim index 67da0f08698a29582212400f8b189e26d1446ed8..72966459d0ced737e2d0f44702cd5e6956a3aced 100644 --- a/tests/script/general/parser/timestamp.sim +++ b/tests/script/general/parser/timestamp.sim @@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = ts_db @@ -59,10 +59,10 @@ run general/parser/timestamp_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 3000 +sleep 500 run general/parser/timestamp_query.sim diff --git a/tests/script/general/parser/timestamp_query.sim b/tests/script/general/parser/timestamp_query.sim index 783c03602b983bf0faea25db28d1ac1ce29d80b7..056e8bba6bd6587fcba2c2685d9764f3f67c42b7 100644 --- a/tests/script/general/parser/timestamp_query.sim +++ b/tests/script/general/parser/timestamp_query.sim @@ -1,4 +1,4 @@ -sleep 3000 +sleep 500 sql connect $dbPrefix = ts_db diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index c2b41888d739c559b24784c42ad040d54d7ae160..08e2f6ab00cdcdc3a974266420f7e66bb2e0a65b 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 200 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = tb_db @@ -128,11 +128,11 @@ sql insert into test values(29999, 1)(70000, 2)(80000, 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 +sleep 3000 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 1000 +sleep 500 sql select count(*) from t1.test where ts>10000 and ts<90000 interval(5000a) if $rows != 3 then @@ -169,7 +169,7 @@ endw system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start sql connect -sleep 1000 +sleep 500 sql use db; $ts = 1000 @@ -221,7 +221,7 @@ sql insert into t2 values('2020-2-2 1:1:1', 1); system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start sql connect -sleep 1000 +sleep 500 sql use db sql select count(*), first(ts), last(ts) from t2 interval(1d); diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index 4af482bde073000d9f2ba098b469f8ec33d7f419..cb46ac6b0d1a6d381d33974660b6584e12faff44 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -7,7 +7,7 @@ system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 1000 +sleep 500 sql connect $dbPrefix = union_db @@ -27,7 +27,7 @@ $j = 1 $mt1 = $mtPrefix . $j -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db sql use $db @@ -36,9 +36,16 @@ sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 $i = 0 $t = 1578203484000 -while $i < $tbNum +$half = $tbNum / 2 + +while $i < $half $tb = $tbPrefix . $i + + $nextSuffix = $i + $half + $tb1 = $tbPrefix . $nextSuffix + sql create table $tb using $mt tags( $i ) + sql create table $tb1 using $mt tags( $nextSuffix ) $x = 0 while $x < $rowNum @@ -54,7 +61,7 @@ while $i < $tbNum $nchar = $nchar . ' $t1 = $t + $ms - sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $x = $x + 1 endw @@ -96,7 +103,7 @@ while $i < $tbNum endw print sleep 1sec. -sleep 1000 +sleep 500 $i = 1 $tb = $tbPrefix . $i diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 5cac3f47235c1321d740e373d0f5828f076c31c7..8e17220b5b38f995c6dc7e662130f7250157aa44 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sleep 500 sql connect $dbPrefix = wh_db @@ -20,16 +20,23 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) +$half = $tbNum / 2 + $i = 0 -while $i < $tbNum +while $i < $half $tb = $tbPrefix . $i + + $nextSuffix = $i + $half + $tb1 = $tbPrefix . $nextSuffix + sql create table $tb using $mt tags( $i ) + sql create table $tb1 using $mt tags( $nextSuffix ) $x = 0 while $x < $rowNum @@ -42,7 +49,7 @@ while $i < $tbNum $binary = $binary . ' $nchar = 'nchar . $c $nchar = $nchar . ' - sql insert into $tb values ($ms , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + sql insert into $tb values ($ms , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $tb1 values ($ms , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) $x = $x + 1 endw diff --git a/tests/script/general/wal/kill.sim b/tests/script/general/wal/kill.sim new file mode 100644 index 0000000000000000000000000000000000000000..7f103874a561c2bb2534996ab30d30ab0e8907a3 --- /dev/null +++ b/tests/script/general/wal/kill.sim @@ -0,0 +1,77 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ============== deploy +system sh/exec.sh -n dnode1 -s start +sleep 3001 +sql connect + +sql create database d1 +sql use d1 + +sql create table t1 (ts timestamp, i int) +sql insert into t1 values(now, 1); + +print =============== step3 +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step4 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step5 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step6 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step7 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step8 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL diff --git a/tests/script/general/wal/maxtables.sim b/tests/script/general/wal/maxtables.sim new file mode 100644 index 0000000000000000000000000000000000000000..e504c7e92e3447f7d29dbb2dc03456c3775ced2c --- /dev/null +++ b/tests/script/general/wal/maxtables.sim @@ -0,0 +1,46 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100 +system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 +system sh/cfg.sh -n dnode1 -c tableIncStepPerVnode -v 2 + + +print ============== deploy +system sh/exec.sh -n dnode1 -s start +sleep 3001 +sql connect + +sql create database d1 +sql use d1 +sql create table st (ts timestamp, tbcol int) TAGS(tgcol int) + +$i = 0 +while $i < 100 + $tb = t . $i + sql create table $tb using st tags( $i ) + sql insert into $tb values (now , $i ) + $i = $i + 1 +endw + +sql_error sql create table tt (ts timestamp, i int) + +print =============== step3 +sql select * from st; +if $rows != 100 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 +sleep 3000 + +print =============== step4 +system sh/exec.sh -n dnode1 -s start +sleep 3000 + +sql select * from st; +if $rows != 100 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/wal/sync.sim b/tests/script/general/wal/sync.sim new file mode 100644 index 0000000000000000000000000000000000000000..abaf22f91921e5bcc8effbe6fc1c66766ff92a3f --- /dev/null +++ b/tests/script/general/wal/sync.sim @@ -0,0 +1,124 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c http -v 1 +system sh/cfg.sh -n dnode2 -c http -v 1 +system sh/cfg.sh -n dnode3 -c http -v 1 + +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000 +system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000 +system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000 + +system sh/cfg.sh -n dnode1 -c replica -v 3 +system sh/cfg.sh -n dnode2 -c replica -v 3 +system sh/cfg.sh -n dnode3 -c replica -v 3 + +system sh/cfg.sh -n dnode1 -c maxSQLLength -v 940032 +system sh/cfg.sh -n dnode2 -c maxSQLLength -v 940032 +system sh/cfg.sh -n dnode3 -c maxSQLLength -v 940032 + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +sleep 5001 +sql connect + +sql create dnode $hostname2 +sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 +$x = 0 +show1: + $x = $x + 1 + sleep 2000 + if $x == 5 then + return -1 + endi +sql show mnodes -x show1 +$mnode1Role = $data2_1 +print mnode1Role $mnode1Role +$mnode2Role = $data2_2 +print mnode2Role $mnode2Role +$mnode3Role = $data2_3 +print mnode3Role $mnode3Role + +if $mnode1Role != master then + goto show1 +endi +if $mnode2Role != slave then + goto show1 +endi +if $mnode3Role != slave then + goto show1 +endi + +print =============== step2 +sql create database d1 replica 3 +sql use d1 + +sql create table table_rest (ts timestamp, i int) +print sql length is 870KB +restful d1 table_rest 1591072800 30000 +restful d1 table_rest 1591172800 30000 +restful d1 table_rest 1591272800 30000 +restful d1 table_rest 1591372800 30000 +restful d1 table_rest 1591472800 30000 +restful d1 table_rest 1591572800 30000 +restful d1 table_rest 1591672800 30000 +restful d1 table_rest 1591772800 30000 +restful d1 table_rest 1591872800 30000 +restful d1 table_rest 1591972800 30000 + +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi + +print =============== step3 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi +system sh/exec.sh -n dnode1 -s start -x SIGINT +sleep 5000 + +print =============== step4 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi +system sh/exec.sh -n dnode2 -s start -x SIGINT +sleep 5000 + +print =============== step5 +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 4e68d1566a76468b06115a80aace70871665c9cb..64a6c871fcf0ae610025b060105766d3b0cf3105 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -64,6 +64,7 @@ cd ../../../debug; make ./test.sh -f general/db/repeat.sim ./test.sh -f general/db/tables.sim ./test.sh -f general/db/vnodes.sim +./test.sh -f general/db/nosuchfile.sim ./test.sh -f general/field/2.sim ./test.sh -f general/field/3.sim @@ -235,6 +236,10 @@ cd ../../../debug; make ./test.sh -f general/vector/table_query.sim ./test.sh -f general/vector/table_time.sim +./test.sh -f general/wal/sync.sim +./test.sh -f general/wal/kill.sim +./test.sh -f general/wal/maxtables.sim + ./test.sh -f unique/account/account_create.sim ./test.sh -f unique/account/account_delete.sim ./test.sh -f unique/account/account_len.sim @@ -272,11 +277,14 @@ cd ../../../debug; make ./test.sh -f unique/db/replica_part.sim ./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/monitor.sim +./test.sh -f unique/dnode/monitor_bug.sim ./test.sh -f unique/dnode/simple.sim ./test.sh -f unique/dnode/balance1.sim ./test.sh -f unique/dnode/balance2.sim ./test.sh -f unique/dnode/balance3.sim ./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/data1.sim ./test.sh -f unique/dnode/offline1.sim ./test.sh -f unique/dnode/offline2.sim ./test.sh -f unique/dnode/reason.sim diff --git a/tests/script/unique/dnode/data1.sim b/tests/script/unique/dnode/data1.sim new file mode 100644 index 0000000000000000000000000000000000000000..61a991148b21b471aef906223a5f600f7db38f5f --- /dev/null +++ b/tests/script/unique/dnode/data1.sim @@ -0,0 +1,137 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/cfg.sh -n dnode2 -c wallevel -v 2 +system sh/cfg.sh -n dnode3 -c wallevel -v 2 +system sh/cfg.sh -n dnode4 -c wallevel -v 2 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect +sleep 3000 + +print ========== step2 +sql create dnode $hostname2 +system sh/exec.sh -n dnode2 -s start +sql create dnode $hostname3 +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname4 +system sh/exec.sh -n dnode4 -s start + +$x = 0 +show2: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then + goto show2 +endi +if $data2_2 != 0 then + goto show2 +endi +if $data2_3 != 0 then + goto show2 +endi +if $data2_4 != 0 then + goto show2 +endi + +print ========== step3 +sql create database d1 replica 3 +sql create table d1.t1 (t timestamp, i int) +sql insert into d1.t1 values(now+1s, 35) +sql insert into d1.t1 values(now+2s, 34) +sql insert into d1.t1 values(now+3s, 33) +sql insert into d1.t1 values(now+4s, 32) +sql insert into d1.t1 values(now+5s, 31) + +$x = 0 +show3: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then + goto show3 +endi +if $data2_2 != 1 then + goto show3 +endi +if $data2_3 != 1 then + goto show3 +endi +if $data2_4 != 1 then + goto show3 +endi + +print ========== step4 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + +print ========== step5 +system_content rm -rf ../../../sim/dnode4/data/vnode/vnode2/tsdb/data + +print ========== step6 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +sleep 10000 + +print ========== step7 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != 31 then + return -1 +endi +if $data11 != 32 then + return -1 +endi +if $data21 != 33 then + return -1 +endi +if $data31 != 34 then + return -1 +endi +if $data41 != 35 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT +system sh/exec.sh -n dnode6 -s stop -x SIGINT +system sh/exec.sh -n dnode7 -s stop -x SIGINT +system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/monitor.sim b/tests/script/unique/dnode/monitor.sim new file mode 100644 index 0000000000000000000000000000000000000000..1e5b0f6f56c261437ffc25c8a68139c53d7303df --- /dev/null +++ b/tests/script/unique/dnode/monitor.sim @@ -0,0 +1,92 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 + +system sh/cfg.sh -n dnode1 -c role -v 1 +system sh/cfg.sh -n dnode2 -c role -v 2 + +system sh/cfg.sh -n dnode1 -c wallevel -v 1 +system sh/cfg.sh -n dnode2 -c wallevel -v 1 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c minTablesPerVnode -v 10 +system sh/cfg.sh -n dnode2 -c minTablesPerVnode -v 10 +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10 +system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 10 + +system sh/cfg.sh -n dnode1 -c monitor -v 1 +system sh/cfg.sh -n dnode2 -c monitor -v 1 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect +sleep 5000 + +sql show dnodes +print dnode1 openVnodes $data3_1 +if $data2_1 != 0 then + return -1 +endi + +print ========== step2 +sql create dnode $hostname2 +system sh/exec.sh -n dnode2 -s start + +sleep 10000 +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +if $data2_1 != 0 then + return -1 +endi +if $data2_2 != 1 then + return -1 +endi + +print ========== step3 +sql show log.tables + +print $data00 +print $data10 +print $data20 +print $data30 +print $data40 +print $data50 + +if $rows != 5 then + return -1 +endi + +print ========== step4 +sql select * from log.dn1 +print $rows +$rows1 = $rows + +sleep 3000 +sql select * from log.dn1 +print $rows +$rows2 = $rows + +if $rows2 <= $rows1 then + return -1 +endi + +print ========== step5 +sql select * from log.dn2 +print $rows +$rows1 = $rows + +sleep 3000 +sql select * from log.dn2 +print $rows +$rows2 = $rows + +if $rows2 <= $rows1 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/unique/dnode/monitor_bug.sim b/tests/script/unique/dnode/monitor_bug.sim index 519aae6ca9f64b14a4488889a1e3eb6ead884467..3169c7cdba4c9f631a41365e25e6537dd823fc5a 100644 --- a/tests/script/unique/dnode/monitor_bug.sim +++ b/tests/script/unique/dnode/monitor_bug.sim @@ -18,8 +18,8 @@ sql connect sleep 5000 sql show dnodes -print dnode1 openVnodes $data3_1 -if $data3_1 != 3 then +print dnode1 openVnodes $data2_1 +if $data2_1 != 1 then return -1 endi @@ -31,22 +31,21 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes -print dnode1 openVnodes $data3_1 -print dnode2 openVnodes $data3_2 -if $data3_1 != 4 then +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +if $data2_1 != 0 then goto show2 endi -if $data3_2 != 3 then +if $data2_2 != 1 then goto show2 endi print ========== step3 -sleep 3000 sql show log.tables print $data00 @@ -56,6 +55,23 @@ print $data30 print $data40 print $data50 -if $rows != 5 then +if $rows != 4 then + return -1 +endi + +print ========== step4 +sql select * from log.dn1 +print $rows +$rows1 = $rows + +sleep 3000 +sql select * from log.dn1 +print $rows +$rows2 = $rows + +if $rows2 <= $rows1 then return -1 endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/offline1.sim b/tests/script/unique/dnode/offline1.sim index 02d03dee97be0f2de62b3f8eb18194c595e7b050..beebbfda60c03d24a774347c8ecf8c2f9a4c6c9e 100644 --- a/tests/script/unique/dnode/offline1.sim +++ b/tests/script/unique/dnode/offline1.sim @@ -49,7 +49,7 @@ print dnode1 $data4_2 if $data4_1 != ready then return -1 endi -if $data4_2 != offline then +if $data4_2 == ready then return -1 endi diff --git a/tests/test-all.sh b/tests/test-all.sh index e45dd15fedc999c08037544254f13f78607ee638..ff47cbfd712650503b18fcf351b95abaae3b0b03 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -9,13 +9,17 @@ NC='\033[0m' function runSimCaseOneByOne { while read -r line; do - if [[ $line =~ ^./test.sh* ]]; then + if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then case=`echo $line | grep sim$ |awk '{print $NF}'` start_time=`date +%s` ./test.sh -f $case > /dev/null 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a out.log || \ echo -e "${RED}$case failed${NC}" | tee -a out.log + out_log=`tail -1 out.log ` + if [[ $out_log =~ 'failed' ]];then + exit 8 + fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log fi @@ -26,12 +30,21 @@ function runPyCaseOneByOne { while read -r line; do if [[ $line =~ ^python.* ]]; then if [[ $line != *sleep* ]]; then - case=`echo $line|awk '{print $NF}'` + + if [[ $line =~ '-r' ]];then + case=`echo $line|awk '{print $4}'` + else + case=`echo $line|awk '{print $NF}'` + fi start_time=`date +%s` $line > /dev/null 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log || \ echo -e "${RED}$case failed${NC}" | tee -a pytest-out.log end_time=`date +%s` + out_log=`tail -1 pytest-out.log ` + if [[ $out_log =~ 'failed' ]];then + exit 8 + fi echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log else $line > /dev/null 2>&1 diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index 58e58a442cedca564864578604faa6cea28ede68..01e5016557f2987741fdec6c6fd54facd885ea8a 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -100,7 +100,7 @@ typedef struct _cmd_t { int16_t cmdno; int16_t nlen; char name[MAX_SIM_CMD_NAME_LEN]; - bool (*parseCmd)(char *, struct _cmd_t *, int); + bool (*parseCmd)(char *, struct _cmd_t *, int32_t); bool (*executeCmd)(struct _script_t *script, char *option); struct _cmd_t *next; } SCommand; @@ -111,7 +111,7 @@ typedef struct { int16_t errorJump; // sql jump flag, while '-x' exist in sql cmd, this flag // will be SQL_JUMP_TRUE, otherwise is SQL_JUMP_FALSE */ int16_t lineNum; // correspodning line number in original file - int optionOffset; // relative option offset + int32_t optionOffset;// relative option offset } SCmdLine; typedef struct _var_t { @@ -121,59 +121,56 @@ typedef struct _var_t { } SVariable; typedef struct _script_t { - int type; - bool killed; - - void *taos; - char rows[12]; // number of rows data retrieved - char data[MAX_QUERY_ROW_NUM][MAX_QUERY_COL_NUM] - [MAX_QUERY_VALUE_LEN]; // query results - char system_exit_code[12]; - char system_ret_content[MAX_SYSTEM_RESULT_LEN]; - - int varLen; - int linePos; // current cmd position - int numOfLines; // number of lines in the script - int bgScriptLen; - char fileName[MAX_FILE_NAME_LEN]; // script file name - char error[MAX_ERROR_LEN]; - char *optionBuffer; + int32_t type; + bool killed; + void * taos; + char rows[12]; // number of rows data retrieved + char data[MAX_QUERY_ROW_NUM][MAX_QUERY_COL_NUM][MAX_QUERY_VALUE_LEN]; // query results + char system_exit_code[12]; + char system_ret_content[MAX_SYSTEM_RESULT_LEN]; + int32_t varLen; + int32_t linePos; // current cmd position + int32_t numOfLines; // number of lines in the script + int32_t bgScriptLen; + char fileName[MAX_FILE_NAME_LEN]; // script file name + char error[MAX_ERROR_LEN]; + char * optionBuffer; SCmdLine *lines; // command list SVariable variables[MAX_VAR_LEN]; + pthread_t bgPid; + char auth[128]; struct _script_t *bgScripts[MAX_BACKGROUND_SCRIPT_NUM]; - char auth[128]; } SScript; extern SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; extern SCommand simCmdList[]; -extern int simScriptPos; -extern int simScriptSucced; -extern int simDebugFlag; -extern char tsScriptDir[]; -extern bool simAsyncQuery; +extern int32_t simScriptPos; +extern int32_t simScriptSucced; +extern int32_t simDebugFlag; +extern char tsScriptDir[]; +extern bool simAsyncQuery; SScript *simParseScript(char *fileName); - SScript *simProcessCallOver(SScript *script); -void *simExecuteScript(void *script); -void simInitsimCmdList(); -bool simSystemInit(); -void simSystemCleanUp(); -char *simGetVariable(SScript *script, char *varName, int varLen); -bool simExecuteExpCmd(SScript *script, char *option); -bool simExecuteTestCmd(SScript *script, char *option); -bool simExecuteGotoCmd(SScript *script, char *option); -bool simExecuteRunCmd(SScript *script, char *option); -bool simExecuteRunBackCmd(SScript *script, char *option); -bool simExecuteSystemCmd(SScript *script, char *option); -bool simExecuteSystemContentCmd(SScript *script, char *option); -bool simExecutePrintCmd(SScript *script, char *option); -bool simExecuteSleepCmd(SScript *script, char *option); -bool simExecuteReturnCmd(SScript *script, char *option); -bool simExecuteSqlCmd(SScript *script, char *option); -bool simExecuteSqlErrorCmd(SScript *script, char *rest); -bool simExecuteSqlSlowCmd(SScript *script, char *option); -bool simExecuteRestfulCmd(SScript *script, char *rest); -void simVisuallizeOption(SScript *script, char *src, char *dst); +void * simExecuteScript(void *script); +void simInitsimCmdList(); +bool simSystemInit(); +void simSystemCleanUp(); +char * simGetVariable(SScript *script, char *varName, int32_t varLen); +bool simExecuteExpCmd(SScript *script, char *option); +bool simExecuteTestCmd(SScript *script, char *option); +bool simExecuteGotoCmd(SScript *script, char *option); +bool simExecuteRunCmd(SScript *script, char *option); +bool simExecuteRunBackCmd(SScript *script, char *option); +bool simExecuteSystemCmd(SScript *script, char *option); +bool simExecuteSystemContentCmd(SScript *script, char *option); +bool simExecutePrintCmd(SScript *script, char *option); +bool simExecuteSleepCmd(SScript *script, char *option); +bool simExecuteReturnCmd(SScript *script, char *option); +bool simExecuteSqlCmd(SScript *script, char *option); +bool simExecuteSqlErrorCmd(SScript *script, char *rest); +bool simExecuteSqlSlowCmd(SScript *script, char *option); +bool simExecuteRestfulCmd(SScript *script, char *rest); +void simVisuallizeOption(SScript *script, char *src, char *dst); #endif \ No newline at end of file diff --git a/tests/tsim/inc/simParse.h b/tests/tsim/inc/simParse.h index d3f92add71f3dc6ddb205c1810e1a3caace47e36..ef7d8e5ce72cc9bf0ae52380089d36576e84bd28 100644 --- a/tests/tsim/inc/simParse.h +++ b/tests/tsim/inc/simParse.h @@ -50,6 +50,6 @@ typedef struct { char sexpLen[MAX_NUM_BLOCK]; /*switch expression length */ } SBlock; -bool simParseExpression(char *token, int lineNum); +bool simParseExpression(char *token, int32_t lineNum); #endif \ No newline at end of file diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 7f786dfaa9de5f6a93d3a181ddd53e37e8694310..2db750cdd302c63522e008e3ea324230dd87b6e5 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "taos.h" @@ -38,30 +39,28 @@ void simLogSql(char *sql, bool useSharp) { } else { fprintf(fp, "%s;\n", sql); } - + fflush(fp); } char *simParseArbitratorName(char *varName); char *simParseHostName(char *varName); -char *simGetVariable(SScript *script, char *varName, int varLen) { +char *simGetVariable(SScript *script, char *varName, int32_t varLen) { if (strncmp(varName, "hostname", 8) == 0) { return simParseHostName(varName); } if (strncmp(varName, "arbitrator", 10) == 0) { - return simParseArbitratorName(varName); + return simParseArbitratorName(varName); } if (strncmp(varName, "error", varLen) == 0) return script->error; if (strncmp(varName, "rows", varLen) == 0) return script->rows; - if (strncmp(varName, "system_exit", varLen) == 0) - return script->system_exit_code; + if (strncmp(varName, "system_exit", varLen) == 0) return script->system_exit_code; - if (strncmp(varName, "system_content", varLen) == 0) - return script->system_ret_content; + if (strncmp(varName, "system_content", varLen) == 0) return script->system_ret_content; // variable like data2_192.168.0.1 if (strncmp(varName, "data", 4) == 0) { @@ -70,16 +69,16 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } if (varName[5] == '_') { - int col = varName[4] - '0'; + int32_t col = varName[4] - '0'; if (col < 0 || col >= MAX_QUERY_COL_NUM) { return "null"; } - char *keyName; - int keyLen; + char * keyName; + int32_t keyLen; paGetToken(varName + 6, &keyName, &keyLen); - for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { + for (int32_t i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { simDebug("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; @@ -87,16 +86,16 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } return "null"; } else if (varName[6] == '_') { - int col = (varName[4] - '0') * 10 + (varName[5] - '0'); + int32_t col = (varName[4] - '0') * 10 + (varName[5] - '0'); if (col < 0 || col >= MAX_QUERY_COL_NUM) { return "null"; } - char *keyName; - int keyLen; + char * keyName; + int32_t keyLen; paGetToken(varName + 7, &keyName, &keyLen); - for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { + for (int32_t i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { simTrace("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; @@ -104,8 +103,8 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } return "null"; } else { - int row = varName[4] - '0'; - int col = varName[5] - '0'; + int32_t row = varName[4] - '0'; + int32_t col = varName[5] - '0'; if (row < 0 || row >= MAX_QUERY_ROW_NUM) { return "null"; } @@ -118,7 +117,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } } - for (int i = 0; i < script->varLen; ++i) { + for (int32_t i = 0; i < script->varLen; ++i) { SVariable *var = &script->variables[i]; if (var->varNameLen != varLen) { continue; @@ -144,11 +143,11 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { return var->varValue; } -int simExecuteExpression(SScript *script, char *exp) { - char *op1, *op2, *var1, *var2, *var3, *rest; - int op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; - char t0[512], t1[512], t2[512], t3[1024]; - int result; +int32_t simExecuteExpression(SScript *script, char *exp) { + char * op1, *op2, *var1, *var2, *var3, *rest; + int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; + char t0[512], t1[512], t2[512], t3[1024]; + int32_t result; rest = paGetToken(exp, &var1, &var1Len); rest = paGetToken(rest, &op1, &op1Len); @@ -234,7 +233,7 @@ bool simExecuteExpCmd(SScript *script, char *option) { } bool simExecuteTestCmd(SScript *script, char *option) { - int result; + int32_t result; result = simExecuteExpression(script, option); if (result >= 0) @@ -285,13 +284,12 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. parse file:%s error", script->lines[script->linePos].lineNum, fileName); return false; } - simInfo("script:%s, start to execute in background", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_BACKGROUND; script->bgScripts[script->bgScriptLen++] = newScript; + simInfo("script:%s, start to execute in background,", newScript->fileName); - pthread_t pid; - if (pthread_create(&pid, NULL, simExecuteScript, (void *)newScript) != 0) { + if (pthread_create(&newScript->bgPid, NULL, simExecuteScript, (void *)newScript) != 0) { sprintf(script->error, "lineNum:%d. create background thread failed", script->lines[script->linePos].lineNum); return false; } @@ -307,13 +305,13 @@ bool simExecuteSystemCmd(SScript *script, char *option) { simVisuallizeOption(script, option, buf + strlen(buf)); simLogSql(buf, true); - int code = system(buf); - int repeatTimes = 0; + int32_t code = system(buf); + int32_t repeatTimes = 0; while (code < 0) { - simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", - script->fileName, buf, code, errno, strerror(errno), repeatTimes); + simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", script->fileName, buf, code, + errno, strerror(errno), repeatTimes); taosMsleep(1000); -#ifdef LINUX +#ifdef LINUX signal(SIGCHLD, SIG_DFL); #endif if (repeatTimes++ >= 10) { @@ -368,8 +366,8 @@ bool simExecutePrintCmd(SScript *script, char *rest) { } bool simExecuteSleepCmd(SScript *script, char *option) { - int delta; - char buf[1024]; + int32_t delta; + char buf[1024]; simVisuallizeOption(script, option, buf); option = buf; @@ -395,7 +393,7 @@ bool simExecuteReturnCmd(SScript *script, char *option) { simVisuallizeOption(script, option, buf); option = buf; - int ret = 1; + int32_t ret = 1; if (option && option[0] != 0) ret = atoi(option); if (ret < 0) { @@ -411,8 +409,8 @@ bool simExecuteReturnCmd(SScript *script, char *option) { } void simVisuallizeOption(SScript *script, char *src, char *dst) { - char *var, *token, *value; - int dstLen, srcLen, tokenLen; + char * var, *token, *value; + int32_t dstLen, srcLen, tokenLen; dst[0] = 0, dstLen = 0; @@ -420,14 +418,14 @@ void simVisuallizeOption(SScript *script, char *src, char *dst) { var = strchr(src, '$'); if (var == NULL) break; if (var && ((var - src - 1) > 0) && *(var - 1) == '\\') { - srcLen = (int)(var - src - 1); + srcLen = (int32_t)(var - src - 1); memcpy(dst + dstLen, src, srcLen); dstLen += srcLen; src = var; break; } - srcLen = (int)(var - src); + srcLen = (int32_t)(var - src); memcpy(dst + dstLen, src, srcLen); dstLen += srcLen; @@ -435,13 +433,13 @@ void simVisuallizeOption(SScript *script, char *src, char *dst) { value = simGetVariable(script, token, tokenLen); strcpy(dst + dstLen, value); - dstLen += (int)strlen(value); + dstLen += (int32_t)strlen(value); } strcpy(dst + dstLen, src); } -void simCloseRestFulConnect(SScript *script) { +void simCloseRestFulConnect(SScript *script) { memset(script->auth, 0, sizeof(script->auth)); } @@ -465,7 +463,7 @@ void simCloseTaosdConnect(SScript *script) { // {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} // {"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1} // {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10} -int simParseHttpCommandResult(SScript *script, char *command) { +int32_t simParseHttpCommandResult(SScript *script, char *command) { cJSON* root = cJSON_Parse(command); if (root == NULL) { simError("script:%s, failed to parse json, response:%s", script->fileName, command); @@ -492,14 +490,15 @@ int simParseHttpCommandResult(SScript *script, char *command) { cJSON_Delete(root); return -1; } - int retcode = (int)code->valueint; + int32_t retcode = (int32_t)code->valueint; if (retcode != 1017) { - simError("script:%s, json:status:%s not equal to succ, response:%s", script->fileName, status->valuestring, command); + simError("script:%s, json:status:%s not equal to succ, response:%s", script->fileName, status->valuestring, + command); cJSON_Delete(root); return retcode; } else { simDebug("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, - status->valuestring, retcode, command); + status->valuestring, retcode, command); cJSON_Delete(root); return 0; } @@ -524,27 +523,27 @@ int simParseHttpCommandResult(SScript *script, char *command) { return -1; } - int rowsize = cJSON_GetArraySize(data); + int32_t rowsize = cJSON_GetArraySize(data); if (rowsize < 0) { simError("script:%s, failed to parse json:data, data size %d, response:%s", script->fileName, rowsize, command); cJSON_Delete(root); return -1; } - int rowIndex = 0; + int32_t rowIndex = 0; sprintf(script->rows, "%d", rowsize); - for (int r = 0; r < rowsize; ++r) { + for (int32_t r = 0; r < rowsize; ++r) { cJSON *row = cJSON_GetArrayItem(data, r); if (row == NULL) continue; if (rowIndex++ >= 10) break; - int colsize = cJSON_GetArraySize(row); + int32_t colsize = cJSON_GetArraySize(row); if (colsize < 0) { break; } colsize = MIN(10, colsize); - for (int c = 0; c < colsize; ++c) { + for (int32_t c = 0; c < colsize; ++c) { cJSON *col = cJSON_GetArrayItem(row, c); if (col->valuestring != NULL) { strcpy(script->data[r][c], col->valuestring); @@ -561,7 +560,7 @@ int simParseHttpCommandResult(SScript *script, char *command) { return 0; } -int simExecuteRestFulCommand(SScript *script, char *command) { +int32_t simExecuteRestFulCommand(SScript *script, char *command) { char buf[5000] = {0}; sprintf(buf, "%s 2>/dev/null", command); @@ -571,13 +570,13 @@ int simExecuteRestFulCommand(SScript *script, char *command) { return -1; } - int mallocSize = 2000; - int alreadyReadSize = 0; - char* content = malloc(mallocSize); + int32_t mallocSize = 2000; + int32_t alreadyReadSize = 0; + char * content = malloc(mallocSize); while (!feof(fp)) { - int availSize = mallocSize - alreadyReadSize; - int len = (int)fread(content + alreadyReadSize, 1, availSize, fp); + int32_t availSize = mallocSize - alreadyReadSize; + int32_t len = (int32_t)fread(content + alreadyReadSize, 1, availSize, fp); if (len >= availSize) { alreadyReadSize += len; mallocSize *= 2; @@ -595,10 +594,11 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { sprintf(command, "curl 127.0.0.1:6041/rest/login/%s/%s", user, pass); bool success = false; - for (int attempt = 0; attempt < 10; ++attempt) { + for (int32_t attempt = 0; attempt < 10; ++attempt) { success = simExecuteRestFulCommand(script, command) == 0; if (!success) { - simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), + attempt); taosMsleep(1000); } else { simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); @@ -607,7 +607,8 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { } if (!success) { - sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, taos_errstr(NULL)); + sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, + taos_errstr(NULL)); return false; } @@ -619,10 +620,11 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { simCloseTaosdConnect(script); void *taos = NULL; taosMsleep(2000); - for (int attempt = 0; attempt < 10; ++attempt) { + for (int32_t attempt = 0; attempt < 10; ++attempt) { taos = taos_connect(NULL, user, pass, NULL, tsDnodeShellPort); if (taos == NULL) { - simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), + attempt); taosMsleep(1000); } else { simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); @@ -631,7 +633,8 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } if (taos == NULL) { - sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, taos_errstr(NULL)); + sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, + taos_errstr(NULL)); return false; } @@ -642,9 +645,9 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } bool simCreateTaosdConnect(SScript *script, char *rest) { - char *user = TSDB_DEFAULT_USER; - char *token; - int tokenLen; + char * user = TSDB_DEFAULT_USER; + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); rest = paGetToken(rest, &token, &tokenLen); if (tokenLen != 0) { @@ -659,26 +662,27 @@ bool simCreateTaosdConnect(SScript *script, char *rest) { } bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { - char timeStr[30] = {0}; - time_t tt; + char timeStr[30] = {0}; + time_t tt; struct tm *tp; - SCmdLine *line = &script->lines[script->linePos]; - int ret = -1; + SCmdLine * line = &script->lines[script->linePos]; + int32_t ret = -1; + + TAOS_RES *pSql = NULL; - TAOS_RES* pSql = NULL; - - for (int attempt = 0; attempt < 10; ++attempt) { + for (int32_t attempt = 0; attempt < 10; ++attempt) { simLogSql(rest, false); pSql = taos_query(script->taos, rest); ret = taos_errno(pSql); - + if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", - script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret), taos_errstr(pSql)); + simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret), taos_errstr(pSql)); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; @@ -689,7 +693,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { } else { break; } - + taos_free_result(pSql); } @@ -698,8 +702,8 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { return false; } - int numOfRows = 0; - int num_fields = taos_field_count(pSql); + int32_t numOfRows = 0; + int32_t num_fields = taos_field_count(pSql); if (num_fields != 0) { if (pSql == NULL) { simDebug("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); @@ -717,9 +721,9 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { while ((row = taos_fetch_row(pSql))) { if (numOfRows < MAX_QUERY_ROW_NUM) { TAOS_FIELD *fields = taos_fetch_fields(pSql); - int* length = taos_fetch_lengths(pSql); - - for (int i = 0; i < num_fields; i++) { + int32_t * length = taos_fetch_lengths(pSql); + + for (int32_t i = 0; i < num_fields; i++) { char *value = NULL; if (i < MAX_QUERY_COL_NUM) { value = script->data[numOfRows][i]; @@ -735,8 +739,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { switch (fields[i].type) { case TSDB_DATA_TYPE_BOOL: - sprintf(value, "%s", - ((((int)(*((char *)row[i]))) == 1) ? "1" : "0")); + sprintf(value, "%s", ((((int32_t)(*((char *)row[i]))) == 1) ? "1" : "0")); break; case TSDB_DATA_TYPE_TINYINT: sprintf(value, "%d", *((int8_t *)row[i])); @@ -779,9 +782,8 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); - sprintf(value, "%s.%03d", timeStr, - (int)(*((int64_t *)row[i]) % 1000)); - + sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000)); + break; default: break; @@ -814,17 +816,16 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) { char command[4096]; sprintf(command, "curl -H 'Authorization: Taosd %s' -d \"%s\" 127.0.0.1:6041/rest/sql", script->auth, rest); - int ret = -1; - for (int attempt = 0; attempt < 10; ++attempt) { + int32_t ret = -1; + for (int32_t attempt = 0; attempt < 10; ++attempt) { ret = simExecuteRestFulCommand(script, command); - if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || - ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simDebug("script:%s, taos:%p, %s failed, ret:%d", - script->fileName, script->taos, rest, ret); + simDebug("script:%s, taos:%p, %s failed, ret:%d", script->fileName, script->taos, rest, ret); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; @@ -854,8 +855,8 @@ bool simExecuteSqlImpCmd(SScript *script, char *rest, bool isSlow) { simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); - for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { - for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { + for (int32_t row = 0; row < MAX_QUERY_ROW_NUM; ++row) { + for (int32_t col = 0; col < MAX_QUERY_COL_NUM; ++col) { strcpy(script->data[row][col], "null"); } } @@ -903,23 +904,23 @@ bool simExecuteSqlSlowCmd(SScript *script, char *rest) { bool simExecuteRestfulCmd(SScript *script, char *rest) { FILE *fp = NULL; - char filename[256]; - sprintf(filename, "%s/tmp.sql", tsScriptDir); + char filename[256]; + sprintf(filename, "%s/tmp.sql", tsScriptDir); fp = fopen(filename, "w"); if (fp == NULL) { fprintf(stderr, "ERROR: failed to open file: %s\n", filename); return false; } - char db[64] = {0}; - char tb[64] = {0}; - char gzip[32] = {0}; + char db[64] = {0}; + char tb[64] = {0}; + char gzip[32] = {0}; int32_t ts; int32_t times; sscanf(rest, "%s %s %d %d %s", db, tb, &ts, ×, gzip); - + fprintf(fp, "insert into %s.%s values ", db, tb); - for (int i = 0; i < times; ++i) { + for (int32_t i = 0; i < times; ++i) { fprintf(fp, "(%d000, %d)", ts + i, ts); } fprintf(fp, " \n"); @@ -951,8 +952,8 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); - for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { - for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { + for (int32_t row = 0; row < MAX_QUERY_ROW_NUM; ++row) { + for (int32_t col = 0; col < MAX_QUERY_COL_NUM; ++col) { strcpy(script->data[row][col], "null"); } } @@ -981,27 +982,27 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { return true; } - int ret; - TAOS_RES* pSql = NULL; + int32_t ret; + TAOS_RES *pSql = NULL; if (simAsyncQuery) { char command[4096]; sprintf(command, "curl -H 'Authorization: Taosd %s' -d '%s' 127.0.0.1:6041/rest/sql", script->auth, rest); ret = simExecuteRestFulCommand(script, command); - } - else { + } else { pSql = taos_query(script->taos, rest); ret = taos_errno(pSql); taos_free_result(pSql); } if (ret != TSDB_CODE_SUCCESS) { - simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", - script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", script->fileName, script->taos, + rest, ret & 0XFFFF, tstrerror(ret)); script->linePos++; return true; } - - sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret)); + + sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, + tstrerror(ret)); return false; } diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index ef1a488f602959b740cbe863cc22654a24b2d41b..33fd24dd5823b8ec7bf818cec7c36ddac783bc24 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" #include "sim.h" @@ -20,15 +21,15 @@ bool simAsyncQuery = false; -void simHandleSignal(int signo) { +void simHandleSignal(int32_t signo) { simSystemCleanUp(); exit(1); } -int main(int argc, char *argv[]) { +int32_t main(int32_t argc, char *argv[]) { char scriptFile[MAX_FILE_NAME_LEN] = "sim_main_test.sim"; - for (int i = 1; i < argc; ++i) { + for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { @@ -37,8 +38,7 @@ int main(int argc, char *argv[]) { simAsyncQuery = true; } else { printf("usage: %s [options] \n", argv[0]); - printf(" [-c config]: config directory, default is: %s\n", - configDir); + printf(" [-c config]: config directory, default is: %s\n", configDir); printf(" [-f script]: script filename\n"); exit(0); } diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index 2e6121304f18022a74b9e4f09a8043b5b4beb909..b909f5bd8fc10bea09afd65dc504ae35d6de3505 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -57,6 +57,7 @@ * */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "simParse.h" @@ -64,16 +65,16 @@ #undef TAOS_MEM_CHECK static SCommand *cmdHashList[MAX_NUM_CMD]; -static SCmdLine cmdLine[MAX_CMD_LINES]; -static char parseErr[MAX_ERROR_LEN]; -static char optionBuffer[MAX_OPTION_BUFFER]; -static int numOfLines, optionOffset; -static SLabel label, dest; -static SBlock block; +static SCmdLine cmdLine[MAX_CMD_LINES]; +static char parseErr[MAX_ERROR_LEN]; +static char optionBuffer[MAX_OPTION_BUFFER]; +static int32_t numOfLines, optionOffset; +static SLabel label, dest; +static SBlock block; -int simHashCmd(char *token, int tokenLen) { - int i; - int hash = 0; +int32_t simHashCmd(char *token, int32_t tokenLen) { + int32_t i; + int32_t hash = 0; for (i = 0; i < tokenLen; ++i) hash += token[i]; @@ -82,8 +83,8 @@ int simHashCmd(char *token, int tokenLen) { return hash; } -SCommand *simCheckCmd(char *token, int tokenLen) { - int hash; +SCommand *simCheckCmd(char *token, int32_t tokenLen) { + int32_t hash; SCommand *node; hash = simHashCmd(token, tokenLen); @@ -102,10 +103,10 @@ SCommand *simCheckCmd(char *token, int tokenLen) { } void simAddCmdIntoHash(SCommand *pCmd) { - int hash; + int32_t hash; SCommand *node; - hash = simHashCmd(pCmd->name, (int)strlen(pCmd->name)); + hash = simHashCmd(pCmd->name, (int32_t)strlen(pCmd->name)); node = cmdHashList[hash]; pCmd->next = node; cmdHashList[hash] = pCmd; @@ -122,7 +123,7 @@ void simResetParser() { } SScript *simBuildScriptObj(char *fileName) { - int i, destPos; + int32_t i, destPos; /* process labels */ @@ -176,11 +177,11 @@ SScript *simBuildScriptObj(char *fileName) { } SScript *simParseScript(char *fileName) { - FILE *fd; - int tokenLen, lineNum = 0; - char buffer[MAX_LINE_LEN], name[128], *token, *rest; + FILE * fd; + int32_t tokenLen, lineNum = 0; + char buffer[MAX_LINE_LEN], name[128], *token, *rest; SCommand *pCmd; - SScript *script; + SScript * script; if ((fileName[0] == '.') || (fileName[0] == '/')) { strcpy(name, fileName); @@ -199,12 +200,13 @@ SScript *simParseScript(char *fileName) { if (fgets(buffer, sizeof(buffer), fd) == NULL) continue; lineNum++; - int cmdlen = (int)strlen(buffer); - if (buffer[cmdlen - 1] == '\r' || buffer[cmdlen - 1] == '\n') + int32_t cmdlen = (int32_t)strlen(buffer); + if (buffer[cmdlen - 1] == '\r' || buffer[cmdlen - 1] == '\n') { buffer[cmdlen - 1] = 0; + } rest = buffer; - for (int i = 0; i < cmdlen; ++i) { + for (int32_t i = 0; i < cmdlen; ++i) { if (buffer[i] == '\r' || buffer[i] == '\n') { buffer[i] = ' '; } @@ -249,9 +251,9 @@ SScript *simParseScript(char *fileName) { return script; } -int simCheckExpression(char *exp) { - char *op1, *op2, *op, *rest; - int op1Len, op2Len, opLen; +int32_t simCheckExpression(char *exp) { + char * op1, *op2, *op, *rest; + int32_t op1Len, op2Len, opLen; rest = paGetToken(exp, &op1, &op1Len); if (op1Len == 0) { @@ -282,8 +284,7 @@ int simCheckExpression(char *exp) { return -1; } } else if (opLen == 2) { - if (op[1] != '=' || - (op[0] != '=' && op[0] != '<' && op[0] != '>' && op[0] != '!')) { + if (op[1] != '=' || (op[0] != '=' && op[0] != '<' && op[0] != '>' && op[0] != '!')) { sprintf(parseErr, "left side of assignment must be variable"); return -1; } @@ -294,10 +295,10 @@ int simCheckExpression(char *exp) { rest = paGetToken(rest, &op, &opLen); - if (opLen == 0) return (int)(rest - exp); + if (opLen == 0) return (int32_t)(rest - exp); /* if it is key word "then" */ - if (strncmp(op, "then", 4) == 0) return (int)(op - exp); + if (strncmp(op, "then", 4) == 0) return (int32_t)(op - exp); rest = paGetToken(rest, &op2, &op2Len); if (op2Len == 0) { @@ -310,16 +311,15 @@ int simCheckExpression(char *exp) { return -1; } - if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || - op[0] == '.') { - return (int)(rest - exp); + if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || op[0] == '.') { + return (int32_t)(rest - exp); } return -1; } -bool simParseExpression(char *token, int lineNum) { - int expLen; +bool simParseExpression(char *token, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(token); if (expLen <= 0) return -1; @@ -335,9 +335,9 @@ bool simParseExpression(char *token, int lineNum) { return true; } -bool simParseIfCmd(char *rest, SCommand *pCmd, int lineNum) { - char *ret; - int expLen; +bool simParseIfCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * ret; + int32_t expLen; expLen = simCheckExpression(rest); @@ -364,8 +364,8 @@ bool simParseIfCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseElifCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(rest); @@ -382,8 +382,7 @@ bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { } cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; numOfLines++; @@ -402,7 +401,7 @@ bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseElseCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching if"); return false; @@ -414,8 +413,7 @@ bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { } cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; numOfLines++; @@ -426,8 +424,8 @@ bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndiCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching if"); @@ -441,8 +439,9 @@ bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { if (block.pos[block.top - 1]) *(block.pos[block.top - 1]) = numOfLines; - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.numJump[block.top - 1] = 0; block.top--; @@ -450,8 +449,8 @@ bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseWhileCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseWhileCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(rest); @@ -473,8 +472,8 @@ bool simParseWhileCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseEndwCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndwCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching while"); @@ -493,17 +492,18 @@ bool simParseEndwCmd(char *rest, SCommand *pCmd, int lineNum) { *(block.pos[block.top - 1]) = numOfLines; - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.top--; return true; } -bool simParseSwitchCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseSwitchCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); if (tokenLen == 0) { @@ -524,9 +524,9 @@ bool simParseSwitchCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseCaseCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); if (tokenLen == 0) { @@ -544,16 +544,16 @@ bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - if (block.pos[block.top - 1] != NULL) + if (block.pos[block.top - 1] != NULL) { *(block.pos[block.top - 1]) = numOfLines; + } block.pos[block.top - 1] = &(cmdLine[numOfLines].jump); cmdLine[numOfLines].cmdno = SIM_CMD_TEST; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - memcpy(optionBuffer + optionOffset, block.sexp[block.top - 1], - block.sexpLen[block.top - 1]); + memcpy(optionBuffer + optionOffset, block.sexp[block.top - 1], block.sexpLen[block.top - 1]); optionOffset += block.sexpLen[block.top - 1]; *(optionBuffer + optionOffset++) = ' '; *(optionBuffer + optionOffset++) = '='; @@ -567,20 +567,18 @@ bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseBreakCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseBreakCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no blcok exists"); return false; } - if (block.type[block.top - 1] != BLOCK_SWITCH && - block.type[block.top - 1] != BLOCK_WHILE) { + if (block.type[block.top - 1] != BLOCK_SWITCH && block.type[block.top - 1] != BLOCK_WHILE) { sprintf(parseErr, "not in switch or while block"); return false; } - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; @@ -590,7 +588,7 @@ bool simParseBreakCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseDefaultCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseDefaultCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching switch"); return false; @@ -601,14 +599,15 @@ bool simParseDefaultCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - if (block.pos[block.top - 1] != NULL) + if (block.pos[block.top - 1] != NULL) { *(block.pos[block.top - 1]) = numOfLines; + } return true; } -bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndsCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching switch"); @@ -620,8 +619,9 @@ bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.numJump[block.top - 1] = 0; block.top--; @@ -629,7 +629,7 @@ bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseContinueCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseContinueCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching while"); return false; @@ -648,14 +648,14 @@ bool simParseContinueCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParsePrintCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParsePrintCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_PRINT; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -665,8 +665,8 @@ bool simParsePrintCmd(char *rest, SCommand *pCmd, int lineNum) { } void simCheckSqlOption(char *rest) { - int valueLen; - char *value, *xpos; + int32_t valueLen; + char * value, *xpos; xpos = strstr(rest, " -x"); // need a blank if (xpos) { @@ -682,15 +682,15 @@ void simCheckSqlOption(char *rest) { } } -bool simParseSqlCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSqlCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; simCheckSqlOption(rest); cmdLine[numOfLines].cmdno = SIM_CMD_SQL; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -699,14 +699,14 @@ bool simParseSqlCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_SQL_ERROR; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -715,26 +715,26 @@ bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSqlSlowCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseSqlSlowCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSqlCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_SQL_SLOW; return true; } -bool simParseRestfulCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseRestfulCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSqlCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_RESTFUL; return true; } -bool simParseSystemCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSystemCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_SYSTEM; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -743,15 +743,15 @@ bool simParseSystemCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSystemContentCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseSystemContentCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSystemCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_SYSTEM_CONTENT; return true; } -bool simParseSleepCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseSleepCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; cmdLine[numOfLines].cmdno = SIM_CMD_SLEEP; cmdLine[numOfLines].lineNum = lineNum; @@ -768,9 +768,9 @@ bool simParseSleepCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseReturnCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseReturnCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; cmdLine[numOfLines].cmdno = SIM_CMD_RETURN; cmdLine[numOfLines].lineNum = lineNum; @@ -787,9 +787,9 @@ bool simParseReturnCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseGotoCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseGotoCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); @@ -810,9 +810,9 @@ bool simParseGotoCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseRunCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseRunCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); @@ -832,14 +832,14 @@ bool simParseRunCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseRunBackCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseRunBackCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseRunCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_RUN_BACK; return true; } void simInitsimCmdList() { - int cmdno; + int32_t cmdno; memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand)); /* internal command */ diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 17df7f306a47545f7283df1848f98ce18b87db79..693ade7b35a095b499198db9dcc27335f73e14bd 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "taos.h" @@ -24,11 +25,11 @@ SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; SCommand simCmdList[SIM_CMD_END]; -int simScriptPos = -1; -int simScriptSucced = 0; -int simDebugFlag = 135; -void simCloseTaosdConnect(SScript *script); -char simHostName[128]; +int32_t simScriptPos = -1; +int32_t simScriptSucced = 0; +int32_t simDebugFlag = 135; +void simCloseTaosdConnect(SScript *script); +char simHostName[128]; char *simParseArbitratorName(char *varName) { static char hostName[140]; @@ -39,8 +40,8 @@ char *simParseArbitratorName(char *varName) { char *simParseHostName(char *varName) { static char hostName[140]; - int index = atoi(varName + 8); - int port = 7100; + int32_t index = atoi(varName + 8); + int32_t port = 7100; switch (index) { case 1: port = 7100; @@ -70,9 +71,9 @@ char *simParseHostName(char *varName) { port = 7900; break; } - + sprintf(hostName, "'%s:%d'", simHostName, port); - //simInfo("hostName:%s", hostName); + // simInfo("hostName:%s", hostName); return hostName; } @@ -88,39 +89,45 @@ void simSystemCleanUp() {} void simFreeScript(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { - for (int i = 0; i < script->bgScriptLen; ++i) { + simInfo("script:%s, background script num:%d, stop them", script->fileName, script->bgScriptLen); + + for (int32_t i = 0; i < script->bgScriptLen; ++i) { SScript *bgScript = script->bgScripts[i]; + simInfo("script:%s, set stop flag", script->fileName); bgScript->killed = true; + if (taosCheckPthreadValid(bgScript->bgPid)) { + pthread_join(bgScript->bgPid, NULL); + } } } + simDebug("script:%s, is freed", script->fileName); taos_close(script->taos); - taosTFree(script->lines); - taosTFree(script->optionBuffer); - taosTFree(script); + tfree(script->lines); + tfree(script->optionBuffer); + tfree(script); } SScript *simProcessCallOver(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { if (script->killed) { - simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX - "failed" FAILED_POSTFIX ", error:%s", - script->fileName, script->error); + simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX "failed" FAILED_POSTFIX ", error:%s", + script->fileName, script->error); exit(-1); } else { - simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX - "success" SUCCESS_POSTFIX, - script->fileName); + simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX "success" SUCCESS_POSTFIX, + script->fileName); simCloseTaosdConnect(script); simScriptSucced++; simScriptPos--; + + simFreeScript(script); if (simScriptPos == -1) { simInfo("----------------------------------------------------------------------"); simInfo("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); exit(0); } - simFreeScript(script); return simScriptList[simScriptPos]; } } else { @@ -143,11 +150,11 @@ void *simExecuteScript(void *inputScript) { if (script == NULL) break; } else { SCmdLine *line = &script->lines[script->linePos]; - char *option = script->optionBuffer + line->optionOffset; + char * option = script->optionBuffer + line->optionOffset; simDebug("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); SCommand *cmd = &simCmdList[line->cmdno]; - int ret = (*(cmd->executeCmd))(script, option); + int32_t ret = (*(cmd->executeCmd))(script, option); if (!ret) { script->killed = true; }